The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_bio.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1994,1997 John S. Dyson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice immediately at the beginning of the file, without modification,
   10  *    this list of conditions, and the following disclaimer.
   11  * 2. Absolutely no warranty of function or purpose is made by the author
   12  *              John S. Dyson.
   13  *
   14  * $FreeBSD: releng/5.0/sys/kern/vfs_bio.c 108006 2002-12-17 22:20:42Z mckusick $
   15  */
   16 
   17 /*
   18  * this file contains a new buffer I/O scheme implementing a coherent
   19  * VM object and buffer cache scheme.  Pains have been taken to make
   20  * sure that the performance degradation associated with schemes such
   21  * as this is not realized.
   22  *
   23  * Author:  John S. Dyson
   24  * Significant help during the development and debugging phases
   25  * had been provided by David Greenman, also of the FreeBSD core team.
   26  *
   27  * see man buf(9) for more info.
   28  */
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/stdint.h>
   33 #include <sys/bio.h>
   34 #include <sys/buf.h>
   35 #include <sys/devicestat.h>
   36 #include <sys/eventhandler.h>
   37 #include <sys/lock.h>
   38 #include <sys/malloc.h>
   39 #include <sys/mount.h>
   40 #include <sys/mutex.h>
   41 #include <sys/kernel.h>
   42 #include <sys/kthread.h>
   43 #include <sys/proc.h>
   44 #include <sys/resourcevar.h>
   45 #include <sys/sysctl.h>
   46 #include <sys/vmmeter.h>
   47 #include <sys/vnode.h>
   48 #include <vm/vm.h>
   49 #include <vm/vm_param.h>
   50 #include <vm/vm_kern.h>
   51 #include <vm/vm_pageout.h>
   52 #include <vm/vm_page.h>
   53 #include <vm/vm_object.h>
   54 #include <vm/vm_extern.h>
   55 #include <vm/vm_map.h>
   56 
   57 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
   58 
   59 struct  bio_ops bioops;         /* I/O operation notification */
   60 
   61 struct  buf_ops buf_ops_bio = {
   62         "buf_ops_bio",
   63         bwrite
   64 };
   65 
   66 /*
   67  * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
   68  * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
   69  */
   70 struct buf *buf;                /* buffer header pool */
   71 struct mtx buftimelock;         /* Interlock on setting prio and timo */
   72 
   73 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
   74                 vm_offset_t to);
   75 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
   76                 vm_offset_t to);
   77 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
   78                                int pageno, vm_page_t m);
   79 static void vfs_clean_pages(struct buf * bp);
   80 static void vfs_setdirty(struct buf *bp);
   81 static void vfs_vmio_release(struct buf *bp);
   82 static void vfs_backgroundwritedone(struct buf *bp);
   83 static int flushbufqueues(void);
   84 static void buf_daemon(void);
   85 
   86 int vmiodirenable = TRUE;
   87 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
   88     "Use the VM system for directory writes");
   89 int runningbufspace;
   90 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
   91     "Amount of presently outstanding async buffer io");
   92 static int bufspace;
   93 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
   94     "KVA memory used for bufs");
   95 static int maxbufspace;
   96 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
   97     "Maximum allowed value of bufspace (including buf_daemon)");
   98 static int bufmallocspace;
   99 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
  100     "Amount of malloced memory for buffers");
  101 static int maxbufmallocspace;
  102 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
  103     "Maximum amount of malloced memory for buffers");
  104 static int lobufspace;
  105 SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
  106     "Minimum amount of buffers we want to have");
  107 static int hibufspace;
  108 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
  109     "Maximum allowed value of bufspace (excluding buf_daemon)");
  110 static int bufreusecnt;
  111 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
  112     "Number of times we have reused a buffer");
  113 static int buffreekvacnt;
  114 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
  115     "Number of times we have freed the KVA space from some buffer");
  116 static int bufdefragcnt;
  117 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
  118     "Number of times we have had to repeat buffer allocation to defragment");
  119 static int lorunningspace;
  120 SYSCTL_INT(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
  121     "Minimum preferred space used for in-progress I/O");
  122 static int hirunningspace;
  123 SYSCTL_INT(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
  124     "Maximum amount of space to use for in-progress I/O");
  125 static int numdirtybuffers;
  126 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
  127     "Number of buffers that are dirty (has unwritten changes) at the moment");
  128 static int lodirtybuffers;
  129 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
  130     "How many buffers we want to have free before bufdaemon can sleep");
  131 static int hidirtybuffers;
  132 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
  133     "When the number of dirty buffers is considered severe");
  134 static int numfreebuffers;
  135 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
  136     "Number of free buffers");
  137 static int lofreebuffers;
  138 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
  139    "XXX Unused");
  140 static int hifreebuffers;
  141 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
  142    "XXX Complicatedly unused");
  143 static int getnewbufcalls;
  144 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
  145    "Number of calls to getnewbuf");
  146 static int getnewbufrestarts;
  147 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
  148     "Number of times getnewbuf has had to restart a buffer aquisition");
  149 static int dobkgrdwrite = 1;
  150 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
  151     "Do background writes (honoring the BX_BKGRDWRITE flag)?");
  152 
  153 /*
  154  * Wakeup point for bufdaemon, as well as indicator of whether it is already
  155  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
  156  * is idling.
  157  */
  158 static int bd_request;
  159 
  160 /*
  161  * bogus page -- for I/O to/from partially complete buffers
  162  * this is a temporary solution to the problem, but it is not
  163  * really that bad.  it would be better to split the buffer
  164  * for input in the case of buffers partially already in memory,
  165  * but the code is intricate enough already.
  166  */
  167 vm_page_t bogus_page;
  168 
  169 /*
  170  * Offset for bogus_page.
  171  * XXX bogus_offset should be local to bufinit
  172  */
  173 static vm_offset_t bogus_offset;
  174 
  175 /*
  176  * Synchronization (sleep/wakeup) variable for active buffer space requests.
  177  * Set when wait starts, cleared prior to wakeup().
  178  * Used in runningbufwakeup() and waitrunningbufspace().
  179  */
  180 static int runningbufreq;
  181 
  182 /* 
  183  * Synchronization (sleep/wakeup) variable for buffer requests.
  184  * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
  185  * by and/or.
  186  * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
  187  * getnewbuf(), and getblk().
  188  */
  189 static int needsbuffer;
  190 
  191 #ifdef USE_BUFHASH
  192 /*
  193  * Mask for index into the buffer hash table, which needs to be power of 2 in
  194  * size.  Set in kern_vfs_bio_buffer_alloc.
  195  */
  196 static int bufhashmask;
  197 
  198 /*
  199  * Hash table for all buffers, with a linked list hanging from each table
  200  * entry.  Set in kern_vfs_bio_buffer_alloc, initialized in buf_init.
  201  */
  202 static LIST_HEAD(bufhashhdr, buf) *bufhashtbl;
  203 
  204 /*
  205  * Somewhere to store buffers when they are not in another list, to always
  206  * have them in a list (and thus being able to use the same set of operations
  207  * on them.)
  208  */
  209 static struct bufhashhdr invalhash;
  210 
  211 #endif
  212 
  213 /*
  214  * Definitions for the buffer free lists.
  215  */
  216 #define BUFFER_QUEUES   6       /* number of free buffer queues */
  217 
  218 #define QUEUE_NONE      0       /* on no queue */
  219 #define QUEUE_LOCKED    1       /* locked buffers */
  220 #define QUEUE_CLEAN     2       /* non-B_DELWRI buffers */
  221 #define QUEUE_DIRTY     3       /* B_DELWRI buffers */
  222 #define QUEUE_EMPTYKVA  4       /* empty buffer headers w/KVA assignment */
  223 #define QUEUE_EMPTY     5       /* empty buffer headers */
  224 
  225 /* Queues for free buffers with various properties */
  226 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
  227 /*
  228  * Single global constant for BUF_WMESG, to avoid getting multiple references.
  229  * buf_wmesg is referred from macros.
  230  */
  231 const char *buf_wmesg = BUF_WMESG;
  232 
  233 #define VFS_BIO_NEED_ANY        0x01    /* any freeable buffer */
  234 #define VFS_BIO_NEED_DIRTYFLUSH 0x02    /* waiting for dirty buffer flush */
  235 #define VFS_BIO_NEED_FREE       0x04    /* wait for free bufs, hi hysteresis */
  236 #define VFS_BIO_NEED_BUFSPACE   0x08    /* wait for buf space, lo hysteresis */
  237 
  238 #ifdef USE_BUFHASH
  239 /*
  240  * Buffer hash table code.  Note that the logical block scans linearly, which
  241  * gives us some L1 cache locality.
  242  */
  243 
  244 static __inline 
  245 struct bufhashhdr *
  246 bufhash(struct vnode *vnp, daddr_t bn)
  247 {
  248         return(&bufhashtbl[(((uintptr_t)(vnp) >> 7) + (int)bn) & bufhashmask]);
  249 }
  250 
  251 #endif
  252 
  253 /*
  254  *      numdirtywakeup:
  255  *
  256  *      If someone is blocked due to there being too many dirty buffers,
  257  *      and numdirtybuffers is now reasonable, wake them up.
  258  */
  259 
  260 static __inline void
  261 numdirtywakeup(int level)
  262 {
  263         if (numdirtybuffers <= level) {
  264                 if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
  265                         needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
  266                         wakeup(&needsbuffer);
  267                 }
  268         }
  269 }
  270 
  271 /*
  272  *      bufspacewakeup:
  273  *
  274  *      Called when buffer space is potentially available for recovery.
  275  *      getnewbuf() will block on this flag when it is unable to free 
  276  *      sufficient buffer space.  Buffer space becomes recoverable when 
  277  *      bp's get placed back in the queues.
  278  */
  279 
  280 static __inline void
  281 bufspacewakeup(void)
  282 {
  283         /*
  284          * If someone is waiting for BUF space, wake them up.  Even
  285          * though we haven't freed the kva space yet, the waiting
  286          * process will be able to now.
  287          */
  288         if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
  289                 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
  290                 wakeup(&needsbuffer);
  291         }
  292 }
  293 
  294 /*
  295  * runningbufwakeup() - in-progress I/O accounting.
  296  *
  297  */
  298 static __inline void
  299 runningbufwakeup(struct buf *bp)
  300 {
  301         if (bp->b_runningbufspace) {
  302                 runningbufspace -= bp->b_runningbufspace;
  303                 bp->b_runningbufspace = 0;
  304                 if (runningbufreq && runningbufspace <= lorunningspace) {
  305                         runningbufreq = 0;
  306                         wakeup(&runningbufreq);
  307                 }
  308         }
  309 }
  310 
  311 /*
  312  *      bufcountwakeup:
  313  *
  314  *      Called when a buffer has been added to one of the free queues to
  315  *      account for the buffer and to wakeup anyone waiting for free buffers.
  316  *      This typically occurs when large amounts of metadata are being handled
  317  *      by the buffer cache ( else buffer space runs out first, usually ).
  318  */
  319 
  320 static __inline void
  321 bufcountwakeup(void) 
  322 {
  323         ++numfreebuffers;
  324         if (needsbuffer) {
  325                 needsbuffer &= ~VFS_BIO_NEED_ANY;
  326                 if (numfreebuffers >= hifreebuffers)
  327                         needsbuffer &= ~VFS_BIO_NEED_FREE;
  328                 wakeup(&needsbuffer);
  329         }
  330 }
  331 
  332 /*
  333  *      waitrunningbufspace()
  334  *
  335  *      runningbufspace is a measure of the amount of I/O currently
  336  *      running.  This routine is used in async-write situations to
  337  *      prevent creating huge backups of pending writes to a device.
  338  *      Only asynchronous writes are governed by this function.
  339  *
  340  *      Reads will adjust runningbufspace, but will not block based on it.
  341  *      The read load has a side effect of reducing the allowed write load.
  342  *
  343  *      This does NOT turn an async write into a sync write.  It waits  
  344  *      for earlier writes to complete and generally returns before the
  345  *      caller's write has reached the device.
  346  */
  347 static __inline void
  348 waitrunningbufspace(void)
  349 {
  350         /*
  351          * XXX race against wakeup interrupt, currently
  352          * protected by Giant.  FIXME!
  353          */
  354         while (runningbufspace > hirunningspace) {
  355                 ++runningbufreq;
  356                 tsleep(&runningbufreq, PVM, "wdrain", 0);
  357         }
  358 }
  359 
  360 
  361 /*
  362  *      vfs_buf_test_cache:
  363  *
  364  *      Called when a buffer is extended.  This function clears the B_CACHE
  365  *      bit if the newly extended portion of the buffer does not contain
  366  *      valid data.
  367  */
  368 static __inline__
  369 void
  370 vfs_buf_test_cache(struct buf *bp,
  371                   vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
  372                   vm_page_t m)
  373 {
  374         GIANT_REQUIRED;
  375 
  376         if (bp->b_flags & B_CACHE) {
  377                 int base = (foff + off) & PAGE_MASK;
  378                 if (vm_page_is_valid(m, base, size) == 0)
  379                         bp->b_flags &= ~B_CACHE;
  380         }
  381 }
  382 
  383 /* Wake up the buffer deamon if necessary */
  384 static __inline__
  385 void
  386 bd_wakeup(int dirtybuflevel)
  387 {
  388         if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
  389                 bd_request = 1;
  390                 wakeup(&bd_request);
  391         }
  392 }
  393 
  394 /*
  395  * bd_speedup - speedup the buffer cache flushing code
  396  */
  397 
  398 static __inline__
  399 void
  400 bd_speedup(void)
  401 {
  402         bd_wakeup(1);
  403 }
  404 
  405 /*
  406  * Calculating buffer cache scaling values and reserve space for buffer
  407  * headers.  This is called during low level kernel initialization and
  408  * may be called more then once.  We CANNOT write to the memory area
  409  * being reserved at this time.
  410  */
  411 caddr_t
  412 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
  413 {
  414         /*
  415          * physmem_est is in pages.  Convert it to kilobytes (assumes
  416          * PAGE_SIZE is >= 1K)
  417          */
  418         physmem_est = physmem_est * (PAGE_SIZE / 1024);
  419 
  420         /*
  421          * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
  422          * For the first 64MB of ram nominally allocate sufficient buffers to
  423          * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
  424          * buffers to cover 1/20 of our ram over 64MB.  When auto-sizing
  425          * the buffer cache we limit the eventual kva reservation to
  426          * maxbcache bytes.
  427          *
  428          * factor represents the 1/4 x ram conversion.
  429          */
  430         if (nbuf == 0) {
  431                 int factor = 4 * BKVASIZE / 1024;
  432 
  433                 nbuf = 50;
  434                 if (physmem_est > 4096)
  435                         nbuf += min((physmem_est - 4096) / factor,
  436                             65536 / factor);
  437                 if (physmem_est > 65536)
  438                         nbuf += (physmem_est - 65536) * 2 / (factor * 5);
  439 
  440                 if (maxbcache && nbuf > maxbcache / BKVASIZE)
  441                         nbuf = maxbcache / BKVASIZE;
  442         }
  443 
  444 #if 0
  445         /*
  446          * Do not allow the buffer_map to be more then 1/2 the size of the
  447          * kernel_map.
  448          */
  449         if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 
  450             (BKVASIZE * 2)) {
  451                 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 
  452                     (BKVASIZE * 2);
  453                 printf("Warning: nbufs capped at %d\n", nbuf);
  454         }
  455 #endif
  456 
  457         /*
  458          * swbufs are used as temporary holders for I/O, such as paging I/O.
  459          * We have no less then 16 and no more then 256.
  460          */
  461         nswbuf = max(min(nbuf/4, 256), 16);
  462 
  463         /*
  464          * Reserve space for the buffer cache buffers
  465          */
  466         swbuf = (void *)v;
  467         v = (caddr_t)(swbuf + nswbuf);
  468         buf = (void *)v;
  469         v = (caddr_t)(buf + nbuf);
  470 
  471 #ifdef USE_BUFHASH
  472         /*
  473          * Calculate the hash table size and reserve space
  474          */
  475         for (bufhashmask = 8; bufhashmask < nbuf / 4; bufhashmask <<= 1)
  476                 ;
  477         bufhashtbl = (void *)v;
  478         v = (caddr_t)(bufhashtbl + bufhashmask);
  479         --bufhashmask;
  480 #endif
  481         return(v);
  482 }
  483 
  484 /* Initialize the buffer subsystem.  Called before use of any buffers. */
  485 void
  486 bufinit(void)
  487 {
  488         struct buf *bp;
  489         int i;
  490 
  491         GIANT_REQUIRED;
  492 
  493 #ifdef USE_BUFHASH
  494         LIST_INIT(&invalhash);
  495 #endif
  496         mtx_init(&buftimelock, "buftime lock", NULL, MTX_DEF);
  497 
  498 #ifdef USE_BUFHASH
  499         for (i = 0; i <= bufhashmask; i++)
  500                 LIST_INIT(&bufhashtbl[i]);
  501 #endif
  502 
  503         /* next, make a null set of free lists */
  504         for (i = 0; i < BUFFER_QUEUES; i++)
  505                 TAILQ_INIT(&bufqueues[i]);
  506 
  507         /* finally, initialize each buffer header and stick on empty q */
  508         for (i = 0; i < nbuf; i++) {
  509                 bp = &buf[i];
  510                 bzero(bp, sizeof *bp);
  511                 bp->b_flags = B_INVAL;  /* we're just an empty header */
  512                 bp->b_dev = NODEV;
  513                 bp->b_rcred = NOCRED;
  514                 bp->b_wcred = NOCRED;
  515                 bp->b_qindex = QUEUE_EMPTY;
  516                 bp->b_xflags = 0;
  517                 LIST_INIT(&bp->b_dep);
  518                 BUF_LOCKINIT(bp);
  519                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
  520 #ifdef USE_BUFHASH
  521                 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
  522 #endif
  523         }
  524 
  525         /*
  526          * maxbufspace is the absolute maximum amount of buffer space we are 
  527          * allowed to reserve in KVM and in real terms.  The absolute maximum
  528          * is nominally used by buf_daemon.  hibufspace is the nominal maximum
  529          * used by most other processes.  The differential is required to 
  530          * ensure that buf_daemon is able to run when other processes might 
  531          * be blocked waiting for buffer space.
  532          *
  533          * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
  534          * this may result in KVM fragmentation which is not handled optimally
  535          * by the system.
  536          */
  537         maxbufspace = nbuf * BKVASIZE;
  538         hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
  539         lobufspace = hibufspace - MAXBSIZE;
  540 
  541         lorunningspace = 512 * 1024;
  542         hirunningspace = 1024 * 1024;
  543 
  544 /*
  545  * Limit the amount of malloc memory since it is wired permanently into
  546  * the kernel space.  Even though this is accounted for in the buffer
  547  * allocation, we don't want the malloced region to grow uncontrolled.
  548  * The malloc scheme improves memory utilization significantly on average
  549  * (small) directories.
  550  */
  551         maxbufmallocspace = hibufspace / 20;
  552 
  553 /*
  554  * Reduce the chance of a deadlock occuring by limiting the number
  555  * of delayed-write dirty buffers we allow to stack up.
  556  */
  557         hidirtybuffers = nbuf / 4 + 20;
  558         numdirtybuffers = 0;
  559 /*
  560  * To support extreme low-memory systems, make sure hidirtybuffers cannot
  561  * eat up all available buffer space.  This occurs when our minimum cannot
  562  * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
  563  * BKVASIZE'd (8K) buffers.
  564  */
  565         while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
  566                 hidirtybuffers >>= 1;
  567         }
  568         lodirtybuffers = hidirtybuffers / 2;
  569 
  570 /*
  571  * Try to keep the number of free buffers in the specified range,
  572  * and give special processes (e.g. like buf_daemon) access to an 
  573  * emergency reserve.
  574  */
  575         lofreebuffers = nbuf / 18 + 5;
  576         hifreebuffers = 2 * lofreebuffers;
  577         numfreebuffers = nbuf;
  578 
  579 /*
  580  * Maximum number of async ops initiated per buf_daemon loop.  This is
  581  * somewhat of a hack at the moment, we really need to limit ourselves
  582  * based on the number of bytes of I/O in-transit that were initiated
  583  * from buf_daemon.
  584  */
  585 
  586         bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
  587         bogus_page = vm_page_alloc(kernel_object,
  588                         ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
  589                         VM_ALLOC_NORMAL);
  590         cnt.v_wire_count++;
  591 }
  592 
  593 /*
  594  * bfreekva() - free the kva allocation for a buffer.
  595  *
  596  *      Must be called at splbio() or higher as this is the only locking for
  597  *      buffer_map.
  598  *
  599  *      Since this call frees up buffer space, we call bufspacewakeup().
  600  */
  601 static void
  602 bfreekva(struct buf * bp)
  603 {
  604         GIANT_REQUIRED;
  605 
  606         if (bp->b_kvasize) {
  607                 ++buffreekvacnt;
  608                 bufspace -= bp->b_kvasize;
  609                 vm_map_delete(buffer_map,
  610                     (vm_offset_t) bp->b_kvabase,
  611                     (vm_offset_t) bp->b_kvabase + bp->b_kvasize
  612                 );
  613                 bp->b_kvasize = 0;
  614                 bufspacewakeup();
  615         }
  616 }
  617 
  618 /*
  619  *      bremfree:
  620  *
  621  *      Remove the buffer from the appropriate free list.
  622  */
  623 void
  624 bremfree(struct buf * bp)
  625 {
  626         int s = splbio();
  627         int old_qindex = bp->b_qindex;
  628 
  629         GIANT_REQUIRED;
  630 
  631         if (bp->b_qindex != QUEUE_NONE) {
  632                 KASSERT(BUF_REFCNT(bp) == 1, ("bremfree: bp %p not locked",bp));
  633                 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
  634                 bp->b_qindex = QUEUE_NONE;
  635         } else {
  636                 if (BUF_REFCNT(bp) <= 1)
  637                         panic("bremfree: removing a buffer not on a queue");
  638         }
  639 
  640         /*
  641          * Fixup numfreebuffers count.  If the buffer is invalid or not
  642          * delayed-write, and it was on the EMPTY, LRU, or AGE queues,
  643          * the buffer was free and we must decrement numfreebuffers.
  644          */
  645         if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
  646                 switch(old_qindex) {
  647                 case QUEUE_DIRTY:
  648                 case QUEUE_CLEAN:
  649                 case QUEUE_EMPTY:
  650                 case QUEUE_EMPTYKVA:
  651                         --numfreebuffers;
  652                         break;
  653                 default:
  654                         break;
  655                 }
  656         }
  657         splx(s);
  658 }
  659 
  660 
  661 /*
  662  * Get a buffer with the specified data.  Look in the cache first.  We
  663  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
  664  * is set, the buffer is valid and we do not have to do anything ( see
  665  * getblk() ).  This is really just a special case of breadn().
  666  */
  667 int
  668 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
  669     struct buf ** bpp)
  670 {
  671 
  672         return (breadn(vp, blkno, size, 0, 0, 0, cred, bpp));
  673 }
  674 
  675 /*
  676  * Operates like bread, but also starts asynchronous I/O on
  677  * read-ahead blocks.  We must clear BIO_ERROR and B_INVAL prior
  678  * to initiating I/O . If B_CACHE is set, the buffer is valid 
  679  * and we do not have to do anything.
  680  */
  681 int
  682 breadn(struct vnode * vp, daddr_t blkno, int size,
  683     daddr_t * rablkno, int *rabsize,
  684     int cnt, struct ucred * cred, struct buf ** bpp)
  685 {
  686         struct buf *bp, *rabp;
  687         int i;
  688         int rv = 0, readwait = 0;
  689 
  690         *bpp = bp = getblk(vp, blkno, size, 0, 0);
  691 
  692         /* if not found in cache, do some I/O */
  693         if ((bp->b_flags & B_CACHE) == 0) {
  694                 if (curthread != PCPU_GET(idlethread))
  695                         curthread->td_proc->p_stats->p_ru.ru_inblock++;
  696                 bp->b_iocmd = BIO_READ;
  697                 bp->b_flags &= ~B_INVAL;
  698                 bp->b_ioflags &= ~BIO_ERROR;
  699                 if (bp->b_rcred == NOCRED && cred != NOCRED)
  700                         bp->b_rcred = crhold(cred);
  701                 vfs_busy_pages(bp, 0);
  702                 VOP_STRATEGY(vp, bp);
  703                 ++readwait;
  704         }
  705 
  706         for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
  707                 if (inmem(vp, *rablkno))
  708                         continue;
  709                 rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
  710 
  711                 if ((rabp->b_flags & B_CACHE) == 0) {
  712                         if (curthread != PCPU_GET(idlethread))
  713                                 curthread->td_proc->p_stats->p_ru.ru_inblock++;
  714                         rabp->b_flags |= B_ASYNC;
  715                         rabp->b_flags &= ~B_INVAL;
  716                         rabp->b_ioflags &= ~BIO_ERROR;
  717                         rabp->b_iocmd = BIO_READ;
  718                         if (rabp->b_rcred == NOCRED && cred != NOCRED)
  719                                 rabp->b_rcred = crhold(cred);
  720                         vfs_busy_pages(rabp, 0);
  721                         BUF_KERNPROC(rabp);
  722                         VOP_STRATEGY(vp, rabp);
  723                 } else {
  724                         brelse(rabp);
  725                 }
  726         }
  727 
  728         if (readwait) {
  729                 rv = bufwait(bp);
  730         }
  731         return (rv);
  732 }
  733 
  734 /*
  735  * Write, release buffer on completion.  (Done by iodone
  736  * if async).  Do not bother writing anything if the buffer
  737  * is invalid.
  738  *
  739  * Note that we set B_CACHE here, indicating that buffer is
  740  * fully valid and thus cacheable.  This is true even of NFS
  741  * now so we set it generally.  This could be set either here 
  742  * or in biodone() since the I/O is synchronous.  We put it
  743  * here.
  744  */
  745 
  746 int
  747 bwrite(struct buf * bp)
  748 {
  749         int oldflags, s;
  750         struct buf *newbp;
  751 
  752         if (bp->b_flags & B_INVAL) {
  753                 brelse(bp);
  754                 return (0);
  755         }
  756 
  757         oldflags = bp->b_flags;
  758 
  759         if (BUF_REFCNT(bp) == 0)
  760                 panic("bwrite: buffer is not busy???");
  761         s = splbio();
  762         /*
  763          * If a background write is already in progress, delay
  764          * writing this block if it is asynchronous. Otherwise
  765          * wait for the background write to complete.
  766          */
  767         if (bp->b_xflags & BX_BKGRDINPROG) {
  768                 if (bp->b_flags & B_ASYNC) {
  769                         splx(s);
  770                         bdwrite(bp);
  771                         return (0);
  772                 }
  773                 bp->b_xflags |= BX_BKGRDWAIT;
  774                 tsleep(&bp->b_xflags, PRIBIO, "bwrbg", 0);
  775                 if (bp->b_xflags & BX_BKGRDINPROG)
  776                         panic("bwrite: still writing");
  777         }
  778 
  779         /* Mark the buffer clean */
  780         bundirty(bp);
  781 
  782         /*
  783          * If this buffer is marked for background writing and we
  784          * do not have to wait for it, make a copy and write the
  785          * copy so as to leave this buffer ready for further use.
  786          *
  787          * This optimization eats a lot of memory.  If we have a page
  788          * or buffer shortfall we can't do it.
  789          */
  790         if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 
  791             (bp->b_flags & B_ASYNC) &&
  792             !vm_page_count_severe() &&
  793             !buf_dirty_count_severe()) {
  794                 if (bp->b_iodone != NULL) {
  795                         printf("bp->b_iodone = %p\n", bp->b_iodone);
  796                         panic("bwrite: need chained iodone");
  797                 }
  798 
  799                 /* get a new block */
  800                 newbp = geteblk(bp->b_bufsize);
  801 
  802                 /*
  803                  * set it to be identical to the old block.  We have to
  804                  * set b_lblkno and BKGRDMARKER before calling bgetvp()
  805                  * to avoid confusing the splay tree and gbincore().
  806                  */
  807                 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
  808                 newbp->b_lblkno = bp->b_lblkno;
  809                 newbp->b_xflags |= BX_BKGRDMARKER;
  810                 bgetvp(bp->b_vp, newbp);
  811                 newbp->b_blkno = bp->b_blkno;
  812                 newbp->b_offset = bp->b_offset;
  813                 newbp->b_iodone = vfs_backgroundwritedone;
  814                 newbp->b_flags |= B_ASYNC;
  815                 newbp->b_flags &= ~B_INVAL;
  816 
  817                 /* move over the dependencies */
  818                 if (LIST_FIRST(&bp->b_dep) != NULL)
  819                         buf_movedeps(bp, newbp);
  820 
  821                 /*
  822                  * Initiate write on the copy, release the original to
  823                  * the B_LOCKED queue so that it cannot go away until
  824                  * the background write completes. If not locked it could go
  825                  * away and then be reconstituted while it was being written.
  826                  * If the reconstituted buffer were written, we could end up
  827                  * with two background copies being written at the same time.
  828                  */
  829                 bp->b_xflags |= BX_BKGRDINPROG;
  830                 bp->b_flags |= B_LOCKED;
  831                 bqrelse(bp);
  832                 bp = newbp;
  833         }
  834 
  835         bp->b_flags &= ~B_DONE;
  836         bp->b_ioflags &= ~BIO_ERROR;
  837         bp->b_flags |= B_WRITEINPROG | B_CACHE;
  838         bp->b_iocmd = BIO_WRITE;
  839 
  840         VI_LOCK(bp->b_vp);
  841         bp->b_vp->v_numoutput++;
  842         VI_UNLOCK(bp->b_vp);
  843         vfs_busy_pages(bp, 1);
  844 
  845         /*
  846          * Normal bwrites pipeline writes
  847          */
  848         bp->b_runningbufspace = bp->b_bufsize;
  849         runningbufspace += bp->b_runningbufspace;
  850 
  851         if (curthread != PCPU_GET(idlethread))
  852                 curthread->td_proc->p_stats->p_ru.ru_oublock++;
  853         splx(s);
  854         if (oldflags & B_ASYNC)
  855                 BUF_KERNPROC(bp);
  856         BUF_STRATEGY(bp);
  857 
  858         if ((oldflags & B_ASYNC) == 0) {
  859                 int rtval = bufwait(bp);
  860                 brelse(bp);
  861                 return (rtval);
  862         } else if ((oldflags & B_NOWDRAIN) == 0) {
  863                 /*
  864                  * don't allow the async write to saturate the I/O
  865                  * system.  Deadlocks can occur only if a device strategy
  866                  * routine (like in MD) turns around and issues another
  867                  * high-level write, in which case B_NOWDRAIN is expected
  868                  * to be set.  Otherwise we will not deadlock here because
  869                  * we are blocking waiting for I/O that is already in-progress
  870                  * to complete.
  871                  */
  872                 waitrunningbufspace();
  873         }
  874 
  875         return (0);
  876 }
  877 
  878 /*
  879  * Complete a background write started from bwrite.
  880  */
  881 static void
  882 vfs_backgroundwritedone(bp)
  883         struct buf *bp;
  884 {
  885         struct buf *origbp;
  886 
  887         /*
  888          * Find the original buffer that we are writing.
  889          */
  890         VI_LOCK(bp->b_vp);
  891         if ((origbp = gbincore(bp->b_vp, bp->b_lblkno)) == NULL)
  892                 panic("backgroundwritedone: lost buffer");
  893         VI_UNLOCK(bp->b_vp);
  894         /*
  895          * Process dependencies then return any unfinished ones.
  896          */
  897         if (LIST_FIRST(&bp->b_dep) != NULL)
  898                 buf_complete(bp);
  899         if (LIST_FIRST(&bp->b_dep) != NULL)
  900                 buf_movedeps(bp, origbp);
  901         /*
  902          * Clear the BX_BKGRDINPROG flag in the original buffer
  903          * and awaken it if it is waiting for the write to complete.
  904          * If BX_BKGRDINPROG is not set in the original buffer it must
  905          * have been released and re-instantiated - which is not legal.
  906          */
  907         KASSERT((origbp->b_xflags & BX_BKGRDINPROG),
  908             ("backgroundwritedone: lost buffer2"));
  909         origbp->b_xflags &= ~BX_BKGRDINPROG;
  910         if (origbp->b_xflags & BX_BKGRDWAIT) {
  911                 origbp->b_xflags &= ~BX_BKGRDWAIT;
  912                 wakeup(&origbp->b_xflags);
  913         }
  914         /*
  915          * Clear the B_LOCKED flag and remove it from the locked
  916          * queue if it currently resides there.
  917          */
  918         origbp->b_flags &= ~B_LOCKED;
  919         if (BUF_LOCK(origbp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
  920                 bremfree(origbp);
  921                 bqrelse(origbp);
  922         }
  923         /*
  924          * This buffer is marked B_NOCACHE, so when it is released
  925          * by biodone, it will be tossed. We mark it with BIO_READ
  926          * to avoid biodone doing a second vwakeup.
  927          */
  928         bp->b_flags |= B_NOCACHE;
  929         bp->b_iocmd = BIO_READ;
  930         bp->b_flags &= ~(B_CACHE | B_DONE);
  931         bp->b_iodone = 0;
  932         bufdone(bp);
  933 }
  934 
  935 /*
  936  * Delayed write. (Buffer is marked dirty).  Do not bother writing
  937  * anything if the buffer is marked invalid.
  938  *
  939  * Note that since the buffer must be completely valid, we can safely
  940  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
  941  * biodone() in order to prevent getblk from writing the buffer
  942  * out synchronously.
  943  */
  944 void
  945 bdwrite(struct buf * bp)
  946 {
  947         GIANT_REQUIRED;
  948 
  949         if (BUF_REFCNT(bp) == 0)
  950                 panic("bdwrite: buffer is not busy");
  951 
  952         if (bp->b_flags & B_INVAL) {
  953                 brelse(bp);
  954                 return;
  955         }
  956         bdirty(bp);
  957 
  958         /*
  959          * Set B_CACHE, indicating that the buffer is fully valid.  This is
  960          * true even of NFS now.
  961          */
  962         bp->b_flags |= B_CACHE;
  963 
  964         /*
  965          * This bmap keeps the system from needing to do the bmap later,
  966          * perhaps when the system is attempting to do a sync.  Since it
  967          * is likely that the indirect block -- or whatever other datastructure
  968          * that the filesystem needs is still in memory now, it is a good
  969          * thing to do this.  Note also, that if the pageout daemon is
  970          * requesting a sync -- there might not be enough memory to do
  971          * the bmap then...  So, this is important to do.
  972          */
  973         if (bp->b_lblkno == bp->b_blkno) {
  974                 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
  975         }
  976 
  977         /*
  978          * Set the *dirty* buffer range based upon the VM system dirty pages.
  979          */
  980         vfs_setdirty(bp);
  981 
  982         /*
  983          * We need to do this here to satisfy the vnode_pager and the
  984          * pageout daemon, so that it thinks that the pages have been
  985          * "cleaned".  Note that since the pages are in a delayed write
  986          * buffer -- the VFS layer "will" see that the pages get written
  987          * out on the next sync, or perhaps the cluster will be completed.
  988          */
  989         vfs_clean_pages(bp);
  990         bqrelse(bp);
  991 
  992         /*
  993          * Wakeup the buffer flushing daemon if we have a lot of dirty
  994          * buffers (midpoint between our recovery point and our stall
  995          * point).
  996          */
  997         bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
  998 
  999         /*
 1000          * note: we cannot initiate I/O from a bdwrite even if we wanted to,
 1001          * due to the softdep code.
 1002          */
 1003 }
 1004 
 1005 /*
 1006  *      bdirty:
 1007  *
 1008  *      Turn buffer into delayed write request.  We must clear BIO_READ and
 1009  *      B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to 
 1010  *      itself to properly update it in the dirty/clean lists.  We mark it
 1011  *      B_DONE to ensure that any asynchronization of the buffer properly
 1012  *      clears B_DONE ( else a panic will occur later ).  
 1013  *
 1014  *      bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
 1015  *      might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
 1016  *      should only be called if the buffer is known-good.
 1017  *
 1018  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 1019  *      count.
 1020  *
 1021  *      Must be called at splbio().
 1022  *      The buffer must be on QUEUE_NONE.
 1023  */
 1024 void
 1025 bdirty(bp)
 1026         struct buf *bp;
 1027 {
 1028         KASSERT(bp->b_qindex == QUEUE_NONE,
 1029             ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1030         bp->b_flags &= ~(B_RELBUF);
 1031         bp->b_iocmd = BIO_WRITE;
 1032 
 1033         if ((bp->b_flags & B_DELWRI) == 0) {
 1034                 bp->b_flags |= B_DONE | B_DELWRI;
 1035                 reassignbuf(bp, bp->b_vp);
 1036                 ++numdirtybuffers;
 1037                 bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
 1038         }
 1039 }
 1040 
 1041 /*
 1042  *      bundirty:
 1043  *
 1044  *      Clear B_DELWRI for buffer.
 1045  *
 1046  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 1047  *      count.
 1048  *      
 1049  *      Must be called at splbio().
 1050  *      The buffer must be on QUEUE_NONE.
 1051  */
 1052 
 1053 void
 1054 bundirty(bp)
 1055         struct buf *bp;
 1056 {
 1057         KASSERT(bp->b_qindex == QUEUE_NONE,
 1058             ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1059 
 1060         if (bp->b_flags & B_DELWRI) {
 1061                 bp->b_flags &= ~B_DELWRI;
 1062                 reassignbuf(bp, bp->b_vp);
 1063                 --numdirtybuffers;
 1064                 numdirtywakeup(lodirtybuffers);
 1065         }
 1066         /*
 1067          * Since it is now being written, we can clear its deferred write flag.
 1068          */
 1069         bp->b_flags &= ~B_DEFERRED;
 1070 }
 1071 
 1072 /*
 1073  *      bawrite:
 1074  *
 1075  *      Asynchronous write.  Start output on a buffer, but do not wait for
 1076  *      it to complete.  The buffer is released when the output completes.
 1077  *
 1078  *      bwrite() ( or the VOP routine anyway ) is responsible for handling 
 1079  *      B_INVAL buffers.  Not us.
 1080  */
 1081 void
 1082 bawrite(struct buf * bp)
 1083 {
 1084         bp->b_flags |= B_ASYNC;
 1085         (void) BUF_WRITE(bp);
 1086 }
 1087 
 1088 /*
 1089  *      bwillwrite:
 1090  *
 1091  *      Called prior to the locking of any vnodes when we are expecting to
 1092  *      write.  We do not want to starve the buffer cache with too many
 1093  *      dirty buffers so we block here.  By blocking prior to the locking
 1094  *      of any vnodes we attempt to avoid the situation where a locked vnode
 1095  *      prevents the various system daemons from flushing related buffers.
 1096  */
 1097 
 1098 void
 1099 bwillwrite(void)
 1100 {
 1101         if (numdirtybuffers >= hidirtybuffers) {
 1102                 int s;
 1103 
 1104                 mtx_lock(&Giant);
 1105                 s = splbio();
 1106                 while (numdirtybuffers >= hidirtybuffers) {
 1107                         bd_wakeup(1);
 1108                         needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
 1109                         tsleep(&needsbuffer, (PRIBIO + 4), "flswai", 0);
 1110                 }
 1111                 splx(s);
 1112                 mtx_unlock(&Giant);
 1113         }
 1114 }
 1115 
 1116 /*
 1117  * Return true if we have too many dirty buffers.
 1118  */
 1119 int
 1120 buf_dirty_count_severe(void)
 1121 {
 1122         return(numdirtybuffers >= hidirtybuffers);
 1123 }
 1124 
 1125 /*
 1126  *      brelse:
 1127  *
 1128  *      Release a busy buffer and, if requested, free its resources.  The
 1129  *      buffer will be stashed in the appropriate bufqueue[] allowing it
 1130  *      to be accessed later as a cache entity or reused for other purposes.
 1131  */
 1132 void
 1133 brelse(struct buf * bp)
 1134 {
 1135         int s;
 1136 
 1137         GIANT_REQUIRED;
 1138 
 1139         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
 1140             ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1141 
 1142         s = splbio();
 1143 
 1144         if (bp->b_flags & B_LOCKED)
 1145                 bp->b_ioflags &= ~BIO_ERROR;
 1146 
 1147         if (bp->b_iocmd == BIO_WRITE &&
 1148             (bp->b_ioflags & BIO_ERROR) &&
 1149             !(bp->b_flags & B_INVAL)) {
 1150                 /*
 1151                  * Failed write, redirty.  Must clear BIO_ERROR to prevent
 1152                  * pages from being scrapped.  If B_INVAL is set then
 1153                  * this case is not run and the next case is run to 
 1154                  * destroy the buffer.  B_INVAL can occur if the buffer
 1155                  * is outside the range supported by the underlying device.
 1156                  */
 1157                 bp->b_ioflags &= ~BIO_ERROR;
 1158                 bdirty(bp);
 1159         } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
 1160             (bp->b_ioflags & BIO_ERROR) ||
 1161             bp->b_iocmd == BIO_DELETE || (bp->b_bufsize <= 0)) {
 1162                 /*
 1163                  * Either a failed I/O or we were asked to free or not
 1164                  * cache the buffer.
 1165                  */
 1166                 bp->b_flags |= B_INVAL;
 1167                 if (LIST_FIRST(&bp->b_dep) != NULL)
 1168                         buf_deallocate(bp);
 1169                 if (bp->b_flags & B_DELWRI) {
 1170                         --numdirtybuffers;
 1171                         numdirtywakeup(lodirtybuffers);
 1172                 }
 1173                 bp->b_flags &= ~(B_DELWRI | B_CACHE);
 1174                 if ((bp->b_flags & B_VMIO) == 0) {
 1175                         if (bp->b_bufsize)
 1176                                 allocbuf(bp, 0);
 1177                         if (bp->b_vp)
 1178                                 brelvp(bp);
 1179                 }
 1180         }
 1181 
 1182         /*
 1183          * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release() 
 1184          * is called with B_DELWRI set, the underlying pages may wind up
 1185          * getting freed causing a previous write (bdwrite()) to get 'lost'
 1186          * because pages associated with a B_DELWRI bp are marked clean.
 1187          * 
 1188          * We still allow the B_INVAL case to call vfs_vmio_release(), even
 1189          * if B_DELWRI is set.
 1190          *
 1191          * If B_DELWRI is not set we may have to set B_RELBUF if we are low
 1192          * on pages to return pages to the VM page queues.
 1193          */
 1194         if (bp->b_flags & B_DELWRI)
 1195                 bp->b_flags &= ~B_RELBUF;
 1196         else if (vm_page_count_severe() && !(bp->b_xflags & BX_BKGRDINPROG))
 1197                 bp->b_flags |= B_RELBUF;
 1198 
 1199         /*
 1200          * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
 1201          * constituted, not even NFS buffers now.  Two flags effect this.  If
 1202          * B_INVAL, the struct buf is invalidated but the VM object is kept
 1203          * around ( i.e. so it is trivial to reconstitute the buffer later ).
 1204          *
 1205          * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
 1206          * invalidated.  BIO_ERROR cannot be set for a failed write unless the
 1207          * buffer is also B_INVAL because it hits the re-dirtying code above.
 1208          *
 1209          * Normally we can do this whether a buffer is B_DELWRI or not.  If
 1210          * the buffer is an NFS buffer, it is tracking piecemeal writes or
 1211          * the commit state and we cannot afford to lose the buffer. If the
 1212          * buffer has a background write in progress, we need to keep it
 1213          * around to prevent it from being reconstituted and starting a second
 1214          * background write.
 1215          */
 1216         if ((bp->b_flags & B_VMIO)
 1217             && !(bp->b_vp->v_mount != NULL &&
 1218                  (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
 1219                  !vn_isdisk(bp->b_vp, NULL) &&
 1220                  (bp->b_flags & B_DELWRI))
 1221             ) {
 1222 
 1223                 int i, j, resid;
 1224                 vm_page_t m;
 1225                 off_t foff;
 1226                 vm_pindex_t poff;
 1227                 vm_object_t obj;
 1228                 struct vnode *vp;
 1229 
 1230                 vp = bp->b_vp;
 1231                 obj = bp->b_object;
 1232 
 1233                 /*
 1234                  * Get the base offset and length of the buffer.  Note that 
 1235                  * in the VMIO case if the buffer block size is not
 1236                  * page-aligned then b_data pointer may not be page-aligned.
 1237                  * But our b_pages[] array *IS* page aligned.
 1238                  *
 1239                  * block sizes less then DEV_BSIZE (usually 512) are not 
 1240                  * supported due to the page granularity bits (m->valid,
 1241                  * m->dirty, etc...). 
 1242                  *
 1243                  * See man buf(9) for more information
 1244                  */
 1245                 resid = bp->b_bufsize;
 1246                 foff = bp->b_offset;
 1247 
 1248                 for (i = 0; i < bp->b_npages; i++) {
 1249                         int had_bogus = 0;
 1250 
 1251                         m = bp->b_pages[i];
 1252                         vm_page_flag_clear(m, PG_ZERO);
 1253 
 1254                         /*
 1255                          * If we hit a bogus page, fixup *all* the bogus pages
 1256                          * now.
 1257                          */
 1258                         if (m == bogus_page) {
 1259                                 poff = OFF_TO_IDX(bp->b_offset);
 1260                                 had_bogus = 1;
 1261 
 1262                                 for (j = i; j < bp->b_npages; j++) {
 1263                                         vm_page_t mtmp;
 1264                                         mtmp = bp->b_pages[j];
 1265                                         if (mtmp == bogus_page) {
 1266                                                 mtmp = vm_page_lookup(obj, poff + j);
 1267                                                 if (!mtmp) {
 1268                                                         panic("brelse: page missing\n");
 1269                                                 }
 1270                                                 bp->b_pages[j] = mtmp;
 1271                                         }
 1272                                 }
 1273 
 1274                                 if ((bp->b_flags & B_INVAL) == 0) {
 1275                                         pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 1276                                 }
 1277                                 m = bp->b_pages[i];
 1278                         }
 1279                         if ((bp->b_flags & B_NOCACHE) || (bp->b_ioflags & BIO_ERROR)) {
 1280                                 int poffset = foff & PAGE_MASK;
 1281                                 int presid = resid > (PAGE_SIZE - poffset) ?
 1282                                         (PAGE_SIZE - poffset) : resid;
 1283 
 1284                                 KASSERT(presid >= 0, ("brelse: extra page"));
 1285                                 vm_page_set_invalid(m, poffset, presid);
 1286                                 if (had_bogus)
 1287                                         printf("avoided corruption bug in bogus_page/brelse code\n");
 1288                         }
 1289                         resid -= PAGE_SIZE - (foff & PAGE_MASK);
 1290                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 1291                 }
 1292 
 1293                 if (bp->b_flags & (B_INVAL | B_RELBUF))
 1294                         vfs_vmio_release(bp);
 1295 
 1296         } else if (bp->b_flags & B_VMIO) {
 1297 
 1298                 if (bp->b_flags & (B_INVAL | B_RELBUF)) {
 1299                         vfs_vmio_release(bp);
 1300                 }
 1301 
 1302         }
 1303                         
 1304         if (bp->b_qindex != QUEUE_NONE)
 1305                 panic("brelse: free buffer onto another queue???");
 1306         if (BUF_REFCNT(bp) > 1) {
 1307                 /* do not release to free list */
 1308                 BUF_UNLOCK(bp);
 1309                 splx(s);
 1310                 return;
 1311         }
 1312 
 1313         /* enqueue */
 1314 
 1315         /* buffers with no memory */
 1316         if (bp->b_bufsize == 0) {
 1317                 bp->b_flags |= B_INVAL;
 1318                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1319                 if (bp->b_xflags & BX_BKGRDINPROG)
 1320                         panic("losing buffer 1");
 1321                 if (bp->b_kvasize) {
 1322                         bp->b_qindex = QUEUE_EMPTYKVA;
 1323                 } else {
 1324                         bp->b_qindex = QUEUE_EMPTY;
 1325                 }
 1326                 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
 1327 #ifdef USE_BUFHASH
 1328                 LIST_REMOVE(bp, b_hash);
 1329                 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
 1330 #endif
 1331                 bp->b_dev = NODEV;
 1332         /* buffers with junk contents */
 1333         } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
 1334             (bp->b_ioflags & BIO_ERROR)) {
 1335                 bp->b_flags |= B_INVAL;
 1336                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1337                 if (bp->b_xflags & BX_BKGRDINPROG)
 1338                         panic("losing buffer 2");
 1339                 bp->b_qindex = QUEUE_CLEAN;
 1340                 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
 1341 #ifdef USE_BUFHASH
 1342                 LIST_REMOVE(bp, b_hash);
 1343                 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
 1344 #endif
 1345                 bp->b_dev = NODEV;
 1346 
 1347         /* buffers that are locked */
 1348         } else if (bp->b_flags & B_LOCKED) {
 1349                 bp->b_qindex = QUEUE_LOCKED;
 1350                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
 1351 
 1352         /* remaining buffers */
 1353         } else {
 1354                 if (bp->b_flags & B_DELWRI)
 1355                         bp->b_qindex = QUEUE_DIRTY;
 1356                 else
 1357                         bp->b_qindex = QUEUE_CLEAN;
 1358                 if (bp->b_flags & B_AGE)
 1359                         TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
 1360                 else
 1361                         TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
 1362         }
 1363 
 1364         /*
 1365          * If B_INVAL and B_DELWRI is set, clear B_DELWRI.  We have already
 1366          * placed the buffer on the correct queue.  We must also disassociate
 1367          * the device and vnode for a B_INVAL buffer so gbincore() doesn't
 1368          * find it.
 1369          */
 1370         if (bp->b_flags & B_INVAL) {
 1371                 if (bp->b_flags & B_DELWRI)
 1372                         bundirty(bp);
 1373                 if (bp->b_vp)
 1374                         brelvp(bp);
 1375         }
 1376 
 1377         /*
 1378          * Fixup numfreebuffers count.  The bp is on an appropriate queue
 1379          * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
 1380          * We've already handled the B_INVAL case ( B_DELWRI will be clear
 1381          * if B_INVAL is set ).
 1382          */
 1383 
 1384         if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI))
 1385                 bufcountwakeup();
 1386 
 1387         /*
 1388          * Something we can maybe free or reuse
 1389          */
 1390         if (bp->b_bufsize || bp->b_kvasize)
 1391                 bufspacewakeup();
 1392 
 1393         /* unlock */
 1394         BUF_UNLOCK(bp);
 1395         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | 
 1396                         B_DIRECT | B_NOWDRAIN);
 1397         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 1398                 panic("brelse: not dirty");
 1399         splx(s);
 1400 }
 1401 
 1402 /*
 1403  * Release a buffer back to the appropriate queue but do not try to free
 1404  * it.  The buffer is expected to be used again soon.
 1405  *
 1406  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
 1407  * biodone() to requeue an async I/O on completion.  It is also used when
 1408  * known good buffers need to be requeued but we think we may need the data
 1409  * again soon.
 1410  *
 1411  * XXX we should be able to leave the B_RELBUF hint set on completion.
 1412  */
 1413 void
 1414 bqrelse(struct buf * bp)
 1415 {
 1416         int s;
 1417 
 1418         s = splbio();
 1419 
 1420         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1421 
 1422         if (bp->b_qindex != QUEUE_NONE)
 1423                 panic("bqrelse: free buffer onto another queue???");
 1424         if (BUF_REFCNT(bp) > 1) {
 1425                 /* do not release to free list */
 1426                 BUF_UNLOCK(bp);
 1427                 splx(s);
 1428                 return;
 1429         }
 1430         if (bp->b_flags & B_LOCKED) {
 1431                 bp->b_ioflags &= ~BIO_ERROR;
 1432                 bp->b_qindex = QUEUE_LOCKED;
 1433                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
 1434                 /* buffers with stale but valid contents */
 1435         } else if (bp->b_flags & B_DELWRI) {
 1436                 bp->b_qindex = QUEUE_DIRTY;
 1437                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist);
 1438         } else if (vm_page_count_severe()) {
 1439                 /*
 1440                  * We are too low on memory, we have to try to free the
 1441                  * buffer (most importantly: the wired pages making up its
 1442                  * backing store) *now*.
 1443                  */
 1444                 splx(s);
 1445                 brelse(bp);
 1446                 return;
 1447         } else {
 1448                 bp->b_qindex = QUEUE_CLEAN;
 1449                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
 1450         }
 1451 
 1452         if ((bp->b_flags & B_LOCKED) == 0 &&
 1453             ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))) {
 1454                 bufcountwakeup();
 1455         }
 1456 
 1457         /*
 1458          * Something we can maybe free or reuse.
 1459          */
 1460         if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
 1461                 bufspacewakeup();
 1462 
 1463         /* unlock */
 1464         BUF_UNLOCK(bp);
 1465         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
 1466         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 1467                 panic("bqrelse: not dirty");
 1468         splx(s);
 1469 }
 1470 
 1471 /* Give pages used by the bp back to the VM system (where possible) */
 1472 static void
 1473 vfs_vmio_release(bp)
 1474         struct buf *bp;
 1475 {
 1476         int i;
 1477         vm_page_t m;
 1478 
 1479         GIANT_REQUIRED;
 1480         vm_page_lock_queues();
 1481         for (i = 0; i < bp->b_npages; i++) {
 1482                 m = bp->b_pages[i];
 1483                 bp->b_pages[i] = NULL;
 1484                 /*
 1485                  * In order to keep page LRU ordering consistent, put
 1486                  * everything on the inactive queue.
 1487                  */
 1488                 vm_page_unwire(m, 0);
 1489                 /*
 1490                  * We don't mess with busy pages, it is
 1491                  * the responsibility of the process that
 1492                  * busied the pages to deal with them.
 1493                  */
 1494                 if ((m->flags & PG_BUSY) || (m->busy != 0))
 1495                         continue;
 1496                         
 1497                 if (m->wire_count == 0) {
 1498                         vm_page_flag_clear(m, PG_ZERO);
 1499                         /*
 1500                          * Might as well free the page if we can and it has
 1501                          * no valid data.  We also free the page if the
 1502                          * buffer was used for direct I/O
 1503                          */
 1504                         if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
 1505                             m->hold_count == 0) {
 1506                                 vm_page_busy(m);
 1507                                 pmap_remove_all(m);
 1508                                 vm_page_free(m);
 1509                         } else if (bp->b_flags & B_DIRECT) {
 1510                                 vm_page_try_to_free(m);
 1511                         } else if (vm_page_count_severe()) {
 1512                                 vm_page_try_to_cache(m);
 1513                         }
 1514                 }
 1515         }
 1516         vm_page_unlock_queues();
 1517         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
 1518         
 1519         if (bp->b_bufsize) {
 1520                 bufspacewakeup();
 1521                 bp->b_bufsize = 0;
 1522         }
 1523         bp->b_npages = 0;
 1524         bp->b_flags &= ~B_VMIO;
 1525         if (bp->b_vp)
 1526                 brelvp(bp);
 1527 }
 1528 
 1529 #ifdef USE_BUFHASH
 1530 /*
 1531  * XXX MOVED TO VFS_SUBR.C
 1532  *
 1533  * Check to see if a block is currently memory resident.
 1534  */
 1535 struct buf *
 1536 gbincore(struct vnode * vp, daddr_t blkno)
 1537 {
 1538         struct buf *bp;
 1539         struct bufhashhdr *bh;
 1540 
 1541         bh = bufhash(vp, blkno);
 1542 
 1543         /* Search hash chain */
 1544         LIST_FOREACH(bp, bh, b_hash) {
 1545                 /* hit */
 1546                 if (bp->b_vp == vp && bp->b_lblkno == blkno &&
 1547                     (bp->b_flags & B_INVAL) == 0) {
 1548                         break;
 1549                 }
 1550         }
 1551         return (bp);
 1552 }
 1553 #endif
 1554 
 1555 /*
 1556  *      vfs_bio_awrite:
 1557  *
 1558  *      Implement clustered async writes for clearing out B_DELWRI buffers.
 1559  *      This is much better then the old way of writing only one buffer at
 1560  *      a time.  Note that we may not be presented with the buffers in the 
 1561  *      correct order, so we search for the cluster in both directions.
 1562  */
 1563 int
 1564 vfs_bio_awrite(struct buf * bp)
 1565 {
 1566         int i;
 1567         int j;
 1568         daddr_t lblkno = bp->b_lblkno;
 1569         struct vnode *vp = bp->b_vp;
 1570         int s;
 1571         int ncl;
 1572         struct buf *bpa;
 1573         int nwritten;
 1574         int size;
 1575         int maxcl;
 1576 
 1577         s = splbio();
 1578         /*
 1579          * right now we support clustered writing only to regular files.  If
 1580          * we find a clusterable block we could be in the middle of a cluster
 1581          * rather then at the beginning.
 1582          */
 1583         if ((vp->v_type == VREG) && 
 1584             (vp->v_mount != 0) && /* Only on nodes that have the size info */
 1585             (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
 1586 
 1587                 size = vp->v_mount->mnt_stat.f_iosize;
 1588                 maxcl = MAXPHYS / size;
 1589 
 1590                 VI_LOCK(vp);
 1591                 for (i = 1; i < maxcl; i++) {
 1592                         if ((bpa = gbincore(vp, lblkno + i)) &&
 1593                             BUF_REFCNT(bpa) == 0 &&
 1594                             ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
 1595                             (B_DELWRI | B_CLUSTEROK)) &&
 1596                             (bpa->b_bufsize == size)) {
 1597                                 if ((bpa->b_blkno == bpa->b_lblkno) ||
 1598                                     (bpa->b_blkno !=
 1599                                      bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
 1600                                         break;
 1601                         } else {
 1602                                 break;
 1603                         }
 1604                 }
 1605                 for (j = 1; i + j <= maxcl && j <= lblkno; j++) {
 1606                         if ((bpa = gbincore(vp, lblkno - j)) &&
 1607                             BUF_REFCNT(bpa) == 0 &&
 1608                             ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
 1609                             (B_DELWRI | B_CLUSTEROK)) &&
 1610                             (bpa->b_bufsize == size)) {
 1611                                 if ((bpa->b_blkno == bpa->b_lblkno) ||
 1612                                     (bpa->b_blkno !=
 1613                                      bp->b_blkno - ((j * size) >> DEV_BSHIFT)))
 1614                                         break;
 1615                         } else {
 1616                                 break;
 1617                         }
 1618                 }
 1619                 VI_UNLOCK(vp);
 1620                 --j;
 1621                 ncl = i + j;
 1622                 /*
 1623                  * this is a possible cluster write
 1624                  */
 1625                 if (ncl != 1) {
 1626                         nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
 1627                         splx(s);
 1628                         return nwritten;
 1629                 }
 1630         }
 1631 
 1632         BUF_LOCK(bp, LK_EXCLUSIVE);
 1633         bremfree(bp);
 1634         bp->b_flags |= B_ASYNC;
 1635 
 1636         splx(s);
 1637         /*
 1638          * default (old) behavior, writing out only one block
 1639          *
 1640          * XXX returns b_bufsize instead of b_bcount for nwritten?
 1641          */
 1642         nwritten = bp->b_bufsize;
 1643         (void) BUF_WRITE(bp);
 1644 
 1645         return nwritten;
 1646 }
 1647 
 1648 /*
 1649  *      getnewbuf:
 1650  *
 1651  *      Find and initialize a new buffer header, freeing up existing buffers 
 1652  *      in the bufqueues as necessary.  The new buffer is returned locked.
 1653  *
 1654  *      Important:  B_INVAL is not set.  If the caller wishes to throw the
 1655  *      buffer away, the caller must set B_INVAL prior to calling brelse().
 1656  *
 1657  *      We block if:
 1658  *              We have insufficient buffer headers
 1659  *              We have insufficient buffer space
 1660  *              buffer_map is too fragmented ( space reservation fails )
 1661  *              If we have to flush dirty buffers ( but we try to avoid this )
 1662  *
 1663  *      To avoid VFS layer recursion we do not flush dirty buffers ourselves.
 1664  *      Instead we ask the buf daemon to do it for us.  We attempt to
 1665  *      avoid piecemeal wakeups of the pageout daemon.
 1666  */
 1667 
 1668 static struct buf *
 1669 getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
 1670 {
 1671         struct buf *bp;
 1672         struct buf *nbp;
 1673         int defrag = 0;
 1674         int nqindex;
 1675         static int flushingbufs;
 1676 
 1677         GIANT_REQUIRED;
 1678 
 1679         /*
 1680          * We can't afford to block since we might be holding a vnode lock,
 1681          * which may prevent system daemons from running.  We deal with
 1682          * low-memory situations by proactively returning memory and running
 1683          * async I/O rather then sync I/O.
 1684          */
 1685 
 1686         ++getnewbufcalls;
 1687         --getnewbufrestarts;
 1688 restart:
 1689         ++getnewbufrestarts;
 1690 
 1691         /*
 1692          * Setup for scan.  If we do not have enough free buffers,
 1693          * we setup a degenerate case that immediately fails.  Note
 1694          * that if we are specially marked process, we are allowed to
 1695          * dip into our reserves.
 1696          *
 1697          * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
 1698          *
 1699          * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
 1700          * However, there are a number of cases (defragging, reusing, ...)
 1701          * where we cannot backup.
 1702          */
 1703         nqindex = QUEUE_EMPTYKVA;
 1704         nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
 1705 
 1706         if (nbp == NULL) {
 1707                 /*
 1708                  * If no EMPTYKVA buffers and we are either
 1709                  * defragging or reusing, locate a CLEAN buffer
 1710                  * to free or reuse.  If bufspace useage is low
 1711                  * skip this step so we can allocate a new buffer.
 1712                  */
 1713                 if (defrag || bufspace >= lobufspace) {
 1714                         nqindex = QUEUE_CLEAN;
 1715                         nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
 1716                 }
 1717 
 1718                 /*
 1719                  * If we could not find or were not allowed to reuse a
 1720                  * CLEAN buffer, check to see if it is ok to use an EMPTY
 1721                  * buffer.  We can only use an EMPTY buffer if allocating
 1722                  * its KVA would not otherwise run us out of buffer space.
 1723                  */
 1724                 if (nbp == NULL && defrag == 0 &&
 1725                     bufspace + maxsize < hibufspace) {
 1726                         nqindex = QUEUE_EMPTY;
 1727                         nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
 1728                 }
 1729         }
 1730 
 1731         /*
 1732          * Run scan, possibly freeing data and/or kva mappings on the fly
 1733          * depending.
 1734          */
 1735 
 1736         while ((bp = nbp) != NULL) {
 1737                 int qindex = nqindex;
 1738 
 1739                 /*
 1740                  * Calculate next bp ( we can only use it if we do not block
 1741                  * or do other fancy things ).
 1742                  */
 1743                 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
 1744                         switch(qindex) {
 1745                         case QUEUE_EMPTY:
 1746                                 nqindex = QUEUE_EMPTYKVA;
 1747                                 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
 1748                                         break;
 1749                                 /* FALLTHROUGH */
 1750                         case QUEUE_EMPTYKVA:
 1751                                 nqindex = QUEUE_CLEAN;
 1752                                 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
 1753                                         break;
 1754                                 /* FALLTHROUGH */
 1755                         case QUEUE_CLEAN:
 1756                                 /*
 1757                                  * nbp is NULL. 
 1758                                  */
 1759                                 break;
 1760                         }
 1761                 }
 1762 
 1763                 /*
 1764                  * Sanity Checks
 1765                  */
 1766                 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
 1767 
 1768                 /*
 1769                  * Note: we no longer distinguish between VMIO and non-VMIO
 1770                  * buffers.
 1771                  */
 1772 
 1773                 KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
 1774 
 1775                 /*
 1776                  * If we are defragging then we need a buffer with 
 1777                  * b_kvasize != 0.  XXX this situation should no longer
 1778                  * occur, if defrag is non-zero the buffer's b_kvasize
 1779                  * should also be non-zero at this point.  XXX
 1780                  */
 1781                 if (defrag && bp->b_kvasize == 0) {
 1782                         printf("Warning: defrag empty buffer %p\n", bp);
 1783                         continue;
 1784                 }
 1785 
 1786                 /*
 1787                  * Start freeing the bp.  This is somewhat involved.  nbp
 1788                  * remains valid only for QUEUE_EMPTY[KVA] bp's.
 1789                  */
 1790 
 1791                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
 1792                         panic("getnewbuf: locked buf");
 1793                 bremfree(bp);
 1794 
 1795                 if (qindex == QUEUE_CLEAN) {
 1796                         if (bp->b_flags & B_VMIO) {
 1797                                 bp->b_flags &= ~B_ASYNC;
 1798                                 vfs_vmio_release(bp);
 1799                         }
 1800                         if (bp->b_vp)
 1801                                 brelvp(bp);
 1802                 }
 1803 
 1804                 /*
 1805                  * NOTE:  nbp is now entirely invalid.  We can only restart
 1806                  * the scan from this point on.
 1807                  *
 1808                  * Get the rest of the buffer freed up.  b_kva* is still
 1809                  * valid after this operation.
 1810                  */
 1811 
 1812                 if (bp->b_rcred != NOCRED) {
 1813                         crfree(bp->b_rcred);
 1814                         bp->b_rcred = NOCRED;
 1815                 }
 1816                 if (bp->b_wcred != NOCRED) {
 1817                         crfree(bp->b_wcred);
 1818                         bp->b_wcred = NOCRED;
 1819                 }
 1820                 if (LIST_FIRST(&bp->b_dep) != NULL)
 1821                         buf_deallocate(bp);
 1822                 if (bp->b_xflags & BX_BKGRDINPROG)
 1823                         panic("losing buffer 3");
 1824 #ifdef USE_BUFHASH
 1825                 LIST_REMOVE(bp, b_hash);
 1826                 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
 1827 #endif
 1828 
 1829                 if (bp->b_bufsize)
 1830                         allocbuf(bp, 0);
 1831 
 1832                 bp->b_flags = 0;
 1833                 bp->b_ioflags = 0;
 1834                 bp->b_xflags = 0;
 1835                 bp->b_dev = NODEV;
 1836                 bp->b_vp = NULL;
 1837                 bp->b_blkno = bp->b_lblkno = 0;
 1838                 bp->b_offset = NOOFFSET;
 1839                 bp->b_iodone = 0;
 1840                 bp->b_error = 0;
 1841                 bp->b_resid = 0;
 1842                 bp->b_bcount = 0;
 1843                 bp->b_npages = 0;
 1844                 bp->b_dirtyoff = bp->b_dirtyend = 0;
 1845                 bp->b_magic = B_MAGIC_BIO;
 1846                 bp->b_op = &buf_ops_bio;
 1847                 bp->b_object = NULL;
 1848 
 1849                 LIST_INIT(&bp->b_dep);
 1850 
 1851                 /*
 1852                  * If we are defragging then free the buffer.
 1853                  */
 1854                 if (defrag) {
 1855                         bp->b_flags |= B_INVAL;
 1856                         bfreekva(bp);
 1857                         brelse(bp);
 1858                         defrag = 0;
 1859                         goto restart;
 1860                 }
 1861 
 1862                 /*
 1863                  * If we are overcomitted then recover the buffer and its
 1864                  * KVM space.  This occurs in rare situations when multiple
 1865                  * processes are blocked in getnewbuf() or allocbuf().
 1866                  */
 1867                 if (bufspace >= hibufspace)
 1868                         flushingbufs = 1;
 1869                 if (flushingbufs && bp->b_kvasize != 0) {
 1870                         bp->b_flags |= B_INVAL;
 1871                         bfreekva(bp);
 1872                         brelse(bp);
 1873                         goto restart;
 1874                 }
 1875                 if (bufspace < lobufspace)
 1876                         flushingbufs = 0;
 1877                 break;
 1878         }
 1879 
 1880         /*
 1881          * If we exhausted our list, sleep as appropriate.  We may have to
 1882          * wakeup various daemons and write out some dirty buffers.
 1883          *
 1884          * Generally we are sleeping due to insufficient buffer space.
 1885          */
 1886 
 1887         if (bp == NULL) {
 1888                 int flags;
 1889                 char *waitmsg;
 1890 
 1891                 if (defrag) {
 1892                         flags = VFS_BIO_NEED_BUFSPACE;
 1893                         waitmsg = "nbufkv";
 1894                 } else if (bufspace >= hibufspace) {
 1895                         waitmsg = "nbufbs";
 1896                         flags = VFS_BIO_NEED_BUFSPACE;
 1897                 } else {
 1898                         waitmsg = "newbuf";
 1899                         flags = VFS_BIO_NEED_ANY;
 1900                 }
 1901 
 1902                 bd_speedup();   /* heeeelp */
 1903 
 1904                 needsbuffer |= flags;
 1905                 while (needsbuffer & flags) {
 1906                         if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag,
 1907                             waitmsg, slptimeo))
 1908                                 return (NULL);
 1909                 }
 1910         } else {
 1911                 /*
 1912                  * We finally have a valid bp.  We aren't quite out of the
 1913                  * woods, we still have to reserve kva space.  In order
 1914                  * to keep fragmentation sane we only allocate kva in
 1915                  * BKVASIZE chunks.
 1916                  */
 1917                 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
 1918 
 1919                 if (maxsize != bp->b_kvasize) {
 1920                         vm_offset_t addr = 0;
 1921 
 1922                         bfreekva(bp);
 1923 
 1924                         if (vm_map_findspace(buffer_map,
 1925                                 vm_map_min(buffer_map), maxsize, &addr)) {
 1926                                 /*
 1927                                  * Uh oh.  Buffer map is to fragmented.  We
 1928                                  * must defragment the map.
 1929                                  */
 1930                                 ++bufdefragcnt;
 1931                                 defrag = 1;
 1932                                 bp->b_flags |= B_INVAL;
 1933                                 brelse(bp);
 1934                                 goto restart;
 1935                         }
 1936                         if (addr) {
 1937                                 vm_map_insert(buffer_map, NULL, 0,
 1938                                         addr, addr + maxsize,
 1939                                         VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
 1940 
 1941                                 bp->b_kvabase = (caddr_t) addr;
 1942                                 bp->b_kvasize = maxsize;
 1943                                 bufspace += bp->b_kvasize;
 1944                                 ++bufreusecnt;
 1945                         }
 1946                 }
 1947                 bp->b_data = bp->b_kvabase;
 1948         }
 1949         return(bp);
 1950 }
 1951 
 1952 /*
 1953  *      buf_daemon:
 1954  *
 1955  *      buffer flushing daemon.  Buffers are normally flushed by the
 1956  *      update daemon but if it cannot keep up this process starts to
 1957  *      take the load in an attempt to prevent getnewbuf() from blocking.
 1958  */
 1959 
 1960 static struct proc *bufdaemonproc;
 1961 
 1962 static struct kproc_desc buf_kp = {
 1963         "bufdaemon",
 1964         buf_daemon,
 1965         &bufdaemonproc
 1966 };
 1967 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp)
 1968 
 1969 static void
 1970 buf_daemon()
 1971 {
 1972         int s;
 1973 
 1974         mtx_lock(&Giant);
 1975 
 1976         /*
 1977          * This process needs to be suspended prior to shutdown sync.
 1978          */
 1979         EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
 1980             SHUTDOWN_PRI_LAST);
 1981 
 1982         /*
 1983          * This process is allowed to take the buffer cache to the limit
 1984          */
 1985         s = splbio();
 1986 
 1987         for (;;) {
 1988                 kthread_suspend_check(bufdaemonproc);
 1989 
 1990                 bd_request = 0;
 1991 
 1992                 /*
 1993                  * Do the flush.  Limit the amount of in-transit I/O we
 1994                  * allow to build up, otherwise we would completely saturate
 1995                  * the I/O system.  Wakeup any waiting processes before we
 1996                  * normally would so they can run in parallel with our drain.
 1997                  */
 1998                 while (numdirtybuffers > lodirtybuffers) {
 1999                         if (flushbufqueues() == 0)
 2000                                 break;
 2001                         waitrunningbufspace();
 2002                         numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
 2003                 }
 2004 
 2005                 /*
 2006                  * Only clear bd_request if we have reached our low water
 2007                  * mark.  The buf_daemon normally waits 1 second and
 2008                  * then incrementally flushes any dirty buffers that have
 2009                  * built up, within reason.
 2010                  *
 2011                  * If we were unable to hit our low water mark and couldn't
 2012                  * find any flushable buffers, we sleep half a second.
 2013                  * Otherwise we loop immediately.
 2014                  */
 2015                 if (numdirtybuffers <= lodirtybuffers) {
 2016                         /*
 2017                          * We reached our low water mark, reset the
 2018                          * request and sleep until we are needed again.
 2019                          * The sleep is just so the suspend code works.
 2020                          */
 2021                         bd_request = 0;
 2022                         tsleep(&bd_request, PVM, "psleep", hz);
 2023                 } else {
 2024                         /*
 2025                          * We couldn't find any flushable dirty buffers but
 2026                          * still have too many dirty buffers, we
 2027                          * have to sleep and try again.  (rare)
 2028                          */
 2029                         tsleep(&bd_request, PVM, "qsleep", hz / 10);
 2030                 }
 2031         }
 2032 }
 2033 
 2034 /*
 2035  *      flushbufqueues:
 2036  *
 2037  *      Try to flush a buffer in the dirty queue.  We must be careful to
 2038  *      free up B_INVAL buffers instead of write them, which NFS is 
 2039  *      particularly sensitive to.
 2040  */
 2041 int flushwithdeps = 0;
 2042 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
 2043     0, "Number of buffers flushed with dependecies that require rollbacks");
 2044 static int
 2045 flushbufqueues(void)
 2046 {
 2047         struct thread *td = curthread;
 2048         struct vnode *vp;
 2049         struct buf *bp;
 2050 
 2051         TAILQ_FOREACH(bp, &bufqueues[QUEUE_DIRTY], b_freelist) {
 2052                 KASSERT((bp->b_flags & B_DELWRI),
 2053                     ("unexpected clean buffer %p", bp));
 2054                 if ((bp->b_xflags & BX_BKGRDINPROG) != 0)
 2055                         continue;
 2056                 if (bp->b_flags & B_INVAL) {
 2057                         if (BUF_LOCK(bp, LK_EXCLUSIVE) != 0)
 2058                                 panic("flushbufqueues: locked buf");
 2059                         bremfree(bp);
 2060                         brelse(bp);
 2061                         return (1);
 2062                 }
 2063                 if (LIST_FIRST(&bp->b_dep) != NULL && buf_countdeps(bp, 0))
 2064                         continue;
 2065                 /*
 2066                  * We must hold the lock on a vnode before writing
 2067                  * one of its buffers. Otherwise we may confuse, or
 2068                  * in the case of a snapshot vnode, deadlock the
 2069                  * system.
 2070                  */
 2071                 if ((vp = bp->b_vp) == NULL ||
 2072                     vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) == 0) {
 2073                         vfs_bio_awrite(bp);
 2074                         if (vp != NULL)
 2075                                 VOP_UNLOCK(vp, 0, td);
 2076                         return (1);
 2077                 }
 2078         }
 2079         /*
 2080          * Could not find any buffers without rollback dependencies,
 2081          * so just write the first one in the hopes of eventually
 2082          * making progress.
 2083          */
 2084         TAILQ_FOREACH(bp, &bufqueues[QUEUE_DIRTY], b_freelist) {
 2085                 KASSERT((bp->b_flags & B_DELWRI),
 2086                     ("unexpected clean buffer %p", bp));
 2087                 if ((bp->b_xflags & BX_BKGRDINPROG) != 0)
 2088                         continue;
 2089                 if (bp->b_flags & B_INVAL) {
 2090                         if (BUF_LOCK(bp, LK_EXCLUSIVE) != 0)
 2091                                 panic("flushbufqueues: locked buf");
 2092                         bremfree(bp);
 2093                         brelse(bp);
 2094                         return (1);
 2095                 }
 2096                 /*
 2097                  * We must hold the lock on a vnode before writing
 2098                  * one of its buffers. Otherwise we may confuse, or
 2099                  * in the case of a snapshot vnode, deadlock the
 2100                  * system.
 2101                  */
 2102                 if ((vp = bp->b_vp) == NULL ||
 2103                     vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) == 0) {
 2104                         vfs_bio_awrite(bp);
 2105                         if (vp != NULL)
 2106                                 VOP_UNLOCK(vp, 0, td);
 2107                         flushwithdeps += 1;
 2108                         return (0);
 2109                 }
 2110         }
 2111         return (0);
 2112 }
 2113 
 2114 /*
 2115  * Check to see if a block is currently memory resident.
 2116  */
 2117 struct buf *
 2118 incore(struct vnode * vp, daddr_t blkno)
 2119 {
 2120         struct buf *bp;
 2121 
 2122         int s = splbio();
 2123         VI_LOCK(vp);
 2124         bp = gbincore(vp, blkno);
 2125         VI_UNLOCK(vp);
 2126         splx(s);
 2127         return (bp);
 2128 }
 2129 
 2130 /*
 2131  * Returns true if no I/O is needed to access the
 2132  * associated VM object.  This is like incore except
 2133  * it also hunts around in the VM system for the data.
 2134  */
 2135 
 2136 int
 2137 inmem(struct vnode * vp, daddr_t blkno)
 2138 {
 2139         vm_object_t obj;
 2140         vm_offset_t toff, tinc, size;
 2141         vm_page_t m;
 2142         vm_ooffset_t off;
 2143 
 2144         GIANT_REQUIRED;
 2145         ASSERT_VOP_LOCKED(vp, "inmem");
 2146 
 2147         if (incore(vp, blkno))
 2148                 return 1;
 2149         if (vp->v_mount == NULL)
 2150                 return 0;
 2151         if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_vflag & VV_OBJBUF) == 0)
 2152                 return 0;
 2153 
 2154         size = PAGE_SIZE;
 2155         if (size > vp->v_mount->mnt_stat.f_iosize)
 2156                 size = vp->v_mount->mnt_stat.f_iosize;
 2157         off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
 2158 
 2159         for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
 2160                 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
 2161                 if (!m)
 2162                         goto notinmem;
 2163                 tinc = size;
 2164                 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
 2165                         tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
 2166                 if (vm_page_is_valid(m,
 2167                     (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
 2168                         goto notinmem;
 2169         }
 2170         return 1;
 2171 
 2172 notinmem:
 2173         return (0);
 2174 }
 2175 
 2176 /*
 2177  *      vfs_setdirty:
 2178  *
 2179  *      Sets the dirty range for a buffer based on the status of the dirty
 2180  *      bits in the pages comprising the buffer.
 2181  *
 2182  *      The range is limited to the size of the buffer.
 2183  *
 2184  *      This routine is primarily used by NFS, but is generalized for the
 2185  *      B_VMIO case.
 2186  */
 2187 static void
 2188 vfs_setdirty(struct buf *bp) 
 2189 {
 2190         int i;
 2191         vm_object_t object;
 2192 
 2193         GIANT_REQUIRED;
 2194         /*
 2195          * Degenerate case - empty buffer
 2196          */
 2197 
 2198         if (bp->b_bufsize == 0)
 2199                 return;
 2200 
 2201         /*
 2202          * We qualify the scan for modified pages on whether the
 2203          * object has been flushed yet.  The OBJ_WRITEABLE flag
 2204          * is not cleared simply by protecting pages off.
 2205          */
 2206 
 2207         if ((bp->b_flags & B_VMIO) == 0)
 2208                 return;
 2209 
 2210         object = bp->b_pages[0]->object;
 2211 
 2212         if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
 2213                 printf("Warning: object %p writeable but not mightbedirty\n", object);
 2214         if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
 2215                 printf("Warning: object %p mightbedirty but not writeable\n", object);
 2216 
 2217         if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
 2218                 vm_offset_t boffset;
 2219                 vm_offset_t eoffset;
 2220 
 2221                 /*
 2222                  * test the pages to see if they have been modified directly
 2223                  * by users through the VM system.
 2224                  */
 2225                 for (i = 0; i < bp->b_npages; i++) {
 2226                         vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
 2227                         vm_page_test_dirty(bp->b_pages[i]);
 2228                 }
 2229 
 2230                 /*
 2231                  * Calculate the encompassing dirty range, boffset and eoffset,
 2232                  * (eoffset - boffset) bytes.
 2233                  */
 2234 
 2235                 for (i = 0; i < bp->b_npages; i++) {
 2236                         if (bp->b_pages[i]->dirty)
 2237                                 break;
 2238                 }
 2239                 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2240 
 2241                 for (i = bp->b_npages - 1; i >= 0; --i) {
 2242                         if (bp->b_pages[i]->dirty) {
 2243                                 break;
 2244                         }
 2245                 }
 2246                 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2247 
 2248                 /*
 2249                  * Fit it to the buffer.
 2250                  */
 2251 
 2252                 if (eoffset > bp->b_bcount)
 2253                         eoffset = bp->b_bcount;
 2254 
 2255                 /*
 2256                  * If we have a good dirty range, merge with the existing
 2257                  * dirty range.
 2258                  */
 2259 
 2260                 if (boffset < eoffset) {
 2261                         if (bp->b_dirtyoff > boffset)
 2262                                 bp->b_dirtyoff = boffset;
 2263                         if (bp->b_dirtyend < eoffset)
 2264                                 bp->b_dirtyend = eoffset;
 2265                 }
 2266         }
 2267 }
 2268 
 2269 /*
 2270  *      getblk:
 2271  *
 2272  *      Get a block given a specified block and offset into a file/device.
 2273  *      The buffers B_DONE bit will be cleared on return, making it almost
 2274  *      ready for an I/O initiation.  B_INVAL may or may not be set on 
 2275  *      return.  The caller should clear B_INVAL prior to initiating a
 2276  *      READ.
 2277  *
 2278  *      For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
 2279  *      an existing buffer.
 2280  *
 2281  *      For a VMIO buffer, B_CACHE is modified according to the backing VM.
 2282  *      If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
 2283  *      and then cleared based on the backing VM.  If the previous buffer is
 2284  *      non-0-sized but invalid, B_CACHE will be cleared.
 2285  *
 2286  *      If getblk() must create a new buffer, the new buffer is returned with
 2287  *      both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
 2288  *      case it is returned with B_INVAL clear and B_CACHE set based on the
 2289  *      backing VM.
 2290  *
 2291  *      getblk() also forces a BUF_WRITE() for any B_DELWRI buffer whos
 2292  *      B_CACHE bit is clear.
 2293  *      
 2294  *      What this means, basically, is that the caller should use B_CACHE to
 2295  *      determine whether the buffer is fully valid or not and should clear
 2296  *      B_INVAL prior to issuing a read.  If the caller intends to validate
 2297  *      the buffer by loading its data area with something, the caller needs
 2298  *      to clear B_INVAL.  If the caller does this without issuing an I/O, 
 2299  *      the caller should set B_CACHE ( as an optimization ), else the caller
 2300  *      should issue the I/O and biodone() will set B_CACHE if the I/O was
 2301  *      a write attempt or if it was a successfull read.  If the caller 
 2302  *      intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
 2303  *      prior to issuing the READ.  biodone() will *not* clear B_INVAL.
 2304  */
 2305 struct buf *
 2306 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
 2307 {
 2308         struct buf *bp;
 2309         int s;
 2310 #ifdef USE_BUFHASH
 2311         struct bufhashhdr *bh;
 2312 #endif
 2313         ASSERT_VOP_LOCKED(vp, "getblk");
 2314 
 2315         if (size > MAXBSIZE)
 2316                 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
 2317 
 2318         s = splbio();
 2319 loop:
 2320         /*
 2321          * Block if we are low on buffers.   Certain processes are allowed
 2322          * to completely exhaust the buffer cache.
 2323          *
 2324          * If this check ever becomes a bottleneck it may be better to
 2325          * move it into the else, when gbincore() fails.  At the moment
 2326          * it isn't a problem.
 2327          *
 2328          * XXX remove if 0 sections (clean this up after its proven)
 2329          */
 2330         if (numfreebuffers == 0) {
 2331                 if (curthread == PCPU_GET(idlethread))
 2332                         return NULL;
 2333                 needsbuffer |= VFS_BIO_NEED_ANY;
 2334         }
 2335 
 2336         VI_LOCK(vp);
 2337         if ((bp = gbincore(vp, blkno))) {
 2338                 VI_UNLOCK(vp);
 2339                 /*
 2340                  * Buffer is in-core.  If the buffer is not busy, it must
 2341                  * be on a queue.
 2342                  */
 2343 
 2344                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
 2345                         if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
 2346                             "getblk", slpflag, slptimeo) == ENOLCK)
 2347                                 goto loop;
 2348                         splx(s);
 2349                         return (struct buf *) NULL;
 2350                 }
 2351 
 2352                 /*
 2353                  * The buffer is locked.  B_CACHE is cleared if the buffer is 
 2354                  * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
 2355                  * and for a VMIO buffer B_CACHE is adjusted according to the
 2356                  * backing VM cache.
 2357                  */
 2358                 if (bp->b_flags & B_INVAL)
 2359                         bp->b_flags &= ~B_CACHE;
 2360                 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
 2361                         bp->b_flags |= B_CACHE;
 2362                 bremfree(bp);
 2363 
 2364                 /*
 2365                  * check for size inconsistancies for non-VMIO case.
 2366                  */
 2367 
 2368                 if (bp->b_bcount != size) {
 2369                         if ((bp->b_flags & B_VMIO) == 0 ||
 2370                             (size > bp->b_kvasize)) {
 2371                                 if (bp->b_flags & B_DELWRI) {
 2372                                         bp->b_flags |= B_NOCACHE;
 2373                                         BUF_WRITE(bp);
 2374                                 } else {
 2375                                         if ((bp->b_flags & B_VMIO) &&
 2376                                            (LIST_FIRST(&bp->b_dep) == NULL)) {
 2377                                                 bp->b_flags |= B_RELBUF;
 2378                                                 brelse(bp);
 2379                                         } else {
 2380                                                 bp->b_flags |= B_NOCACHE;
 2381                                                 BUF_WRITE(bp);
 2382                                         }
 2383                                 }
 2384                                 goto loop;
 2385                         }
 2386                 }
 2387 
 2388                 /*
 2389                  * If the size is inconsistant in the VMIO case, we can resize
 2390                  * the buffer.  This might lead to B_CACHE getting set or
 2391                  * cleared.  If the size has not changed, B_CACHE remains
 2392                  * unchanged from its previous state.
 2393                  */
 2394 
 2395                 if (bp->b_bcount != size)
 2396                         allocbuf(bp, size);
 2397 
 2398                 KASSERT(bp->b_offset != NOOFFSET, 
 2399                     ("getblk: no buffer offset"));
 2400 
 2401                 /*
 2402                  * A buffer with B_DELWRI set and B_CACHE clear must
 2403                  * be committed before we can return the buffer in
 2404                  * order to prevent the caller from issuing a read
 2405                  * ( due to B_CACHE not being set ) and overwriting
 2406                  * it.
 2407                  *
 2408                  * Most callers, including NFS and FFS, need this to
 2409                  * operate properly either because they assume they
 2410                  * can issue a read if B_CACHE is not set, or because
 2411                  * ( for example ) an uncached B_DELWRI might loop due 
 2412                  * to softupdates re-dirtying the buffer.  In the latter
 2413                  * case, B_CACHE is set after the first write completes,
 2414                  * preventing further loops.
 2415                  * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
 2416                  * above while extending the buffer, we cannot allow the
 2417                  * buffer to remain with B_CACHE set after the write
 2418                  * completes or it will represent a corrupt state.  To
 2419                  * deal with this we set B_NOCACHE to scrap the buffer
 2420                  * after the write.
 2421                  *
 2422                  * We might be able to do something fancy, like setting
 2423                  * B_CACHE in bwrite() except if B_DELWRI is already set,
 2424                  * so the below call doesn't set B_CACHE, but that gets real
 2425                  * confusing.  This is much easier.
 2426                  */
 2427 
 2428                 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
 2429                         bp->b_flags |= B_NOCACHE;
 2430                         BUF_WRITE(bp);
 2431                         goto loop;
 2432                 }
 2433 
 2434                 splx(s);
 2435                 bp->b_flags &= ~B_DONE;
 2436         } else {
 2437                 int bsize, maxsize, vmio;
 2438                 off_t offset;
 2439 
 2440                 /*
 2441                  * Buffer is not in-core, create new buffer.  The buffer
 2442                  * returned by getnewbuf() is locked.  Note that the returned
 2443                  * buffer is also considered valid (not marked B_INVAL).
 2444                  */
 2445                 VI_UNLOCK(vp);
 2446                 if (vn_isdisk(vp, NULL))
 2447                         bsize = DEV_BSIZE;
 2448                 else if (vp->v_mountedhere)
 2449                         bsize = vp->v_mountedhere->mnt_stat.f_iosize;
 2450                 else if (vp->v_mount)
 2451                         bsize = vp->v_mount->mnt_stat.f_iosize;
 2452                 else
 2453                         bsize = size;
 2454 
 2455                 offset = blkno * bsize;
 2456                 vmio = (VOP_GETVOBJECT(vp, NULL) == 0) &&
 2457                     (vp->v_vflag & VV_OBJBUF);
 2458                 maxsize = vmio ? size + (offset & PAGE_MASK) : size;
 2459                 maxsize = imax(maxsize, bsize);
 2460 
 2461                 if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == NULL) {
 2462                         if (slpflag || slptimeo) {
 2463                                 splx(s);
 2464                                 return NULL;
 2465                         }
 2466                         goto loop;
 2467                 }
 2468 
 2469                 /*
 2470                  * This code is used to make sure that a buffer is not
 2471                  * created while the getnewbuf routine is blocked.
 2472                  * This can be a problem whether the vnode is locked or not.
 2473                  * If the buffer is created out from under us, we have to
 2474                  * throw away the one we just created.  There is now window
 2475                  * race because we are safely running at splbio() from the
 2476                  * point of the duplicate buffer creation through to here,
 2477                  * and we've locked the buffer.
 2478                  *
 2479                  * Note: this must occur before we associate the buffer
 2480                  * with the vp especially considering limitations in
 2481                  * the splay tree implementation when dealing with duplicate
 2482                  * lblkno's.
 2483                  */
 2484                 VI_LOCK(vp);
 2485                 if (gbincore(vp, blkno)) {
 2486                         VI_UNLOCK(vp);
 2487                         bp->b_flags |= B_INVAL;
 2488                         brelse(bp);
 2489                         goto loop;
 2490                 }
 2491                 VI_UNLOCK(vp);
 2492 
 2493                 /*
 2494                  * Insert the buffer into the hash, so that it can
 2495                  * be found by incore.
 2496                  */
 2497                 bp->b_blkno = bp->b_lblkno = blkno;
 2498                 bp->b_offset = offset;
 2499 
 2500                 bgetvp(vp, bp);
 2501 #ifdef USE_BUFHASH
 2502                 LIST_REMOVE(bp, b_hash);
 2503                 bh = bufhash(vp, blkno);
 2504                 LIST_INSERT_HEAD(bh, bp, b_hash);
 2505 #endif
 2506 
 2507                 /*
 2508                  * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
 2509                  * buffer size starts out as 0, B_CACHE will be set by
 2510                  * allocbuf() for the VMIO case prior to it testing the
 2511                  * backing store for validity.
 2512                  */
 2513 
 2514                 if (vmio) {
 2515                         bp->b_flags |= B_VMIO;
 2516 #if defined(VFS_BIO_DEBUG)
 2517                         if (vp->v_type != VREG)
 2518                                 printf("getblk: vmioing file type %d???\n", vp->v_type);
 2519 #endif
 2520                         VOP_GETVOBJECT(vp, &bp->b_object);
 2521                 } else {
 2522                         bp->b_flags &= ~B_VMIO;
 2523                         bp->b_object = NULL;
 2524                 }
 2525 
 2526                 allocbuf(bp, size);
 2527 
 2528                 splx(s);
 2529                 bp->b_flags &= ~B_DONE;
 2530         }
 2531         KASSERT(BUF_REFCNT(bp) == 1, ("getblk: bp %p not locked",bp));
 2532         return (bp);
 2533 }
 2534 
 2535 /*
 2536  * Get an empty, disassociated buffer of given size.  The buffer is initially
 2537  * set to B_INVAL.
 2538  */
 2539 struct buf *
 2540 geteblk(int size)
 2541 {
 2542         struct buf *bp;
 2543         int s;
 2544         int maxsize;
 2545 
 2546         maxsize = (size + BKVAMASK) & ~BKVAMASK;
 2547 
 2548         s = splbio();
 2549         while ((bp = getnewbuf(0, 0, size, maxsize)) == 0)
 2550                 continue;
 2551         splx(s);
 2552         allocbuf(bp, size);
 2553         bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
 2554         KASSERT(BUF_REFCNT(bp) == 1, ("geteblk: bp %p not locked",bp));
 2555         return (bp);
 2556 }
 2557 
 2558 
 2559 /*
 2560  * This code constitutes the buffer memory from either anonymous system
 2561  * memory (in the case of non-VMIO operations) or from an associated
 2562  * VM object (in the case of VMIO operations).  This code is able to
 2563  * resize a buffer up or down.
 2564  *
 2565  * Note that this code is tricky, and has many complications to resolve
 2566  * deadlock or inconsistant data situations.  Tread lightly!!! 
 2567  * There are B_CACHE and B_DELWRI interactions that must be dealt with by 
 2568  * the caller.  Calling this code willy nilly can result in the loss of data.
 2569  *
 2570  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
 2571  * B_CACHE for the non-VMIO case.
 2572  */
 2573 
 2574 int
 2575 allocbuf(struct buf *bp, int size)
 2576 {
 2577         int newbsize, mbsize;
 2578         int i;
 2579 
 2580         GIANT_REQUIRED;
 2581 
 2582         if (BUF_REFCNT(bp) == 0)
 2583                 panic("allocbuf: buffer not busy");
 2584 
 2585         if (bp->b_kvasize < size)
 2586                 panic("allocbuf: buffer too small");
 2587 
 2588         if ((bp->b_flags & B_VMIO) == 0) {
 2589                 caddr_t origbuf;
 2590                 int origbufsize;
 2591                 /*
 2592                  * Just get anonymous memory from the kernel.  Don't
 2593                  * mess with B_CACHE.
 2594                  */
 2595                 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 2596                 if (bp->b_flags & B_MALLOC)
 2597                         newbsize = mbsize;
 2598                 else
 2599                         newbsize = round_page(size);
 2600 
 2601                 if (newbsize < bp->b_bufsize) {
 2602                         /*
 2603                          * malloced buffers are not shrunk
 2604                          */
 2605                         if (bp->b_flags & B_MALLOC) {
 2606                                 if (newbsize) {
 2607                                         bp->b_bcount = size;
 2608                                 } else {
 2609                                         free(bp->b_data, M_BIOBUF);
 2610                                         if (bp->b_bufsize) {
 2611                                                 bufmallocspace -= bp->b_bufsize;
 2612                                                 bufspacewakeup();
 2613                                                 bp->b_bufsize = 0;
 2614                                         }
 2615                                         bp->b_data = bp->b_kvabase;
 2616                                         bp->b_bcount = 0;
 2617                                         bp->b_flags &= ~B_MALLOC;
 2618                                 }
 2619                                 return 1;
 2620                         }               
 2621                         vm_hold_free_pages(
 2622                             bp,
 2623                             (vm_offset_t) bp->b_data + newbsize,
 2624                             (vm_offset_t) bp->b_data + bp->b_bufsize);
 2625                 } else if (newbsize > bp->b_bufsize) {
 2626                         /*
 2627                          * We only use malloced memory on the first allocation.
 2628                          * and revert to page-allocated memory when the buffer
 2629                          * grows.
 2630                          */
 2631                         if ( (bufmallocspace < maxbufmallocspace) &&
 2632                                 (bp->b_bufsize == 0) &&
 2633                                 (mbsize <= PAGE_SIZE/2)) {
 2634 
 2635                                 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
 2636                                 bp->b_bufsize = mbsize;
 2637                                 bp->b_bcount = size;
 2638                                 bp->b_flags |= B_MALLOC;
 2639                                 bufmallocspace += mbsize;
 2640                                 return 1;
 2641                         }
 2642                         origbuf = NULL;
 2643                         origbufsize = 0;
 2644                         /*
 2645                          * If the buffer is growing on its other-than-first allocation,
 2646                          * then we revert to the page-allocation scheme.
 2647                          */
 2648                         if (bp->b_flags & B_MALLOC) {
 2649                                 origbuf = bp->b_data;
 2650                                 origbufsize = bp->b_bufsize;
 2651                                 bp->b_data = bp->b_kvabase;
 2652                                 if (bp->b_bufsize) {
 2653                                         bufmallocspace -= bp->b_bufsize;
 2654                                         bufspacewakeup();
 2655                                         bp->b_bufsize = 0;
 2656                                 }
 2657                                 bp->b_flags &= ~B_MALLOC;
 2658                                 newbsize = round_page(newbsize);
 2659                         }
 2660                         vm_hold_load_pages(
 2661                             bp,
 2662                             (vm_offset_t) bp->b_data + bp->b_bufsize,
 2663                             (vm_offset_t) bp->b_data + newbsize);
 2664                         if (origbuf) {
 2665                                 bcopy(origbuf, bp->b_data, origbufsize);
 2666                                 free(origbuf, M_BIOBUF);
 2667                         }
 2668                 }
 2669         } else {
 2670                 int desiredpages;
 2671 
 2672                 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 2673                 desiredpages = (size == 0) ? 0 :
 2674                         num_pages((bp->b_offset & PAGE_MASK) + newbsize);
 2675 
 2676                 if (bp->b_flags & B_MALLOC)
 2677                         panic("allocbuf: VMIO buffer can't be malloced");
 2678                 /*
 2679                  * Set B_CACHE initially if buffer is 0 length or will become
 2680                  * 0-length.
 2681                  */
 2682                 if (size == 0 || bp->b_bufsize == 0)
 2683                         bp->b_flags |= B_CACHE;
 2684 
 2685                 if (newbsize < bp->b_bufsize) {
 2686                         /*
 2687                          * DEV_BSIZE aligned new buffer size is less then the
 2688                          * DEV_BSIZE aligned existing buffer size.  Figure out
 2689                          * if we have to remove any pages.
 2690                          */
 2691                         if (desiredpages < bp->b_npages) {
 2692                                 vm_page_t m;
 2693 
 2694                                 vm_page_lock_queues();
 2695                                 for (i = desiredpages; i < bp->b_npages; i++) {
 2696                                         /*
 2697                                          * the page is not freed here -- it
 2698                                          * is the responsibility of 
 2699                                          * vnode_pager_setsize
 2700                                          */
 2701                                         m = bp->b_pages[i];
 2702                                         KASSERT(m != bogus_page,
 2703                                             ("allocbuf: bogus page found"));
 2704                                         while (vm_page_sleep_if_busy(m, TRUE, "biodep"))
 2705                                                 vm_page_lock_queues();
 2706 
 2707                                         bp->b_pages[i] = NULL;
 2708                                         vm_page_unwire(m, 0);
 2709                                 }
 2710                                 vm_page_unlock_queues();
 2711                                 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
 2712                                     (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
 2713                                 bp->b_npages = desiredpages;
 2714                         }
 2715                 } else if (size > bp->b_bcount) {
 2716                         /*
 2717                          * We are growing the buffer, possibly in a 
 2718                          * byte-granular fashion.
 2719                          */
 2720                         struct vnode *vp;
 2721                         vm_object_t obj;
 2722                         vm_offset_t toff;
 2723                         vm_offset_t tinc;
 2724 
 2725                         /*
 2726                          * Step 1, bring in the VM pages from the object, 
 2727                          * allocating them if necessary.  We must clear
 2728                          * B_CACHE if these pages are not valid for the 
 2729                          * range covered by the buffer.
 2730                          */
 2731 
 2732                         vp = bp->b_vp;
 2733                         obj = bp->b_object;
 2734 
 2735                         while (bp->b_npages < desiredpages) {
 2736                                 vm_page_t m;
 2737                                 vm_pindex_t pi;
 2738 
 2739                                 pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
 2740                                 if ((m = vm_page_lookup(obj, pi)) == NULL) {
 2741                                         /*
 2742                                          * note: must allocate system pages
 2743                                          * since blocking here could intefere
 2744                                          * with paging I/O, no matter which
 2745                                          * process we are.
 2746                                          */
 2747                                         m = vm_page_alloc(obj, pi,
 2748                                             VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
 2749                                         if (m == NULL) {
 2750                                                 VM_WAIT;
 2751                                                 vm_pageout_deficit += desiredpages - bp->b_npages;
 2752                                         } else {
 2753                                                 vm_page_lock_queues();
 2754                                                 vm_page_wakeup(m);
 2755                                                 vm_page_unlock_queues();
 2756                                                 bp->b_flags &= ~B_CACHE;
 2757                                                 bp->b_pages[bp->b_npages] = m;
 2758                                                 ++bp->b_npages;
 2759                                         }
 2760                                         continue;
 2761                                 }
 2762 
 2763                                 /*
 2764                                  * We found a page.  If we have to sleep on it,
 2765                                  * retry because it might have gotten freed out
 2766                                  * from under us.
 2767                                  *
 2768                                  * We can only test PG_BUSY here.  Blocking on
 2769                                  * m->busy might lead to a deadlock:
 2770                                  *
 2771                                  *  vm_fault->getpages->cluster_read->allocbuf
 2772                                  *
 2773                                  */
 2774                                 vm_page_lock_queues();
 2775                                 if (vm_page_sleep_if_busy(m, FALSE, "pgtblk"))
 2776                                         continue;
 2777 
 2778                                 /*
 2779                                  * We have a good page.  Should we wakeup the
 2780                                  * page daemon?
 2781                                  */
 2782                                 if ((curproc != pageproc) &&
 2783                                     ((m->queue - m->pc) == PQ_CACHE) &&
 2784                                     ((cnt.v_free_count + cnt.v_cache_count) <
 2785                                         (cnt.v_free_min + cnt.v_cache_min))) {
 2786                                         pagedaemon_wakeup();
 2787                                 }
 2788                                 vm_page_flag_clear(m, PG_ZERO);
 2789                                 vm_page_wire(m);
 2790                                 vm_page_unlock_queues();
 2791                                 bp->b_pages[bp->b_npages] = m;
 2792                                 ++bp->b_npages;
 2793                         }
 2794 
 2795                         /*
 2796                          * Step 2.  We've loaded the pages into the buffer,
 2797                          * we have to figure out if we can still have B_CACHE
 2798                          * set.  Note that B_CACHE is set according to the
 2799                          * byte-granular range ( bcount and size ), new the
 2800                          * aligned range ( newbsize ).
 2801                          *
 2802                          * The VM test is against m->valid, which is DEV_BSIZE
 2803                          * aligned.  Needless to say, the validity of the data
 2804                          * needs to also be DEV_BSIZE aligned.  Note that this
 2805                          * fails with NFS if the server or some other client
 2806                          * extends the file's EOF.  If our buffer is resized, 
 2807                          * B_CACHE may remain set! XXX
 2808                          */
 2809 
 2810                         toff = bp->b_bcount;
 2811                         tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
 2812 
 2813                         while ((bp->b_flags & B_CACHE) && toff < size) {
 2814                                 vm_pindex_t pi;
 2815 
 2816                                 if (tinc > (size - toff))
 2817                                         tinc = size - toff;
 2818 
 2819                                 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 
 2820                                     PAGE_SHIFT;
 2821 
 2822                                 vfs_buf_test_cache(
 2823                                     bp, 
 2824                                     bp->b_offset,
 2825                                     toff, 
 2826                                     tinc, 
 2827                                     bp->b_pages[pi]
 2828                                 );
 2829                                 toff += tinc;
 2830                                 tinc = PAGE_SIZE;
 2831                         }
 2832 
 2833                         /*
 2834                          * Step 3, fixup the KVM pmap.  Remember that
 2835                          * bp->b_data is relative to bp->b_offset, but 
 2836                          * bp->b_offset may be offset into the first page.
 2837                          */
 2838 
 2839                         bp->b_data = (caddr_t)
 2840                             trunc_page((vm_offset_t)bp->b_data);
 2841                         pmap_qenter(
 2842                             (vm_offset_t)bp->b_data,
 2843                             bp->b_pages, 
 2844                             bp->b_npages
 2845                         );
 2846                         
 2847                         bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 
 2848                             (vm_offset_t)(bp->b_offset & PAGE_MASK));
 2849                 }
 2850         }
 2851         if (newbsize < bp->b_bufsize)
 2852                 bufspacewakeup();
 2853         bp->b_bufsize = newbsize;       /* actual buffer allocation     */
 2854         bp->b_bcount = size;            /* requested buffer size        */
 2855         return 1;
 2856 }
 2857 
 2858 void
 2859 biodone(struct bio *bp)
 2860 {
 2861         bp->bio_flags |= BIO_DONE;
 2862         if (bp->bio_done != NULL)
 2863                 bp->bio_done(bp);
 2864         else
 2865                 wakeup(bp);
 2866 }
 2867 
 2868 /*
 2869  * Wait for a BIO to finish.
 2870  *
 2871  * XXX: resort to a timeout for now.  The optimal locking (if any) for this
 2872  * case is not yet clear.
 2873  */
 2874 int
 2875 biowait(struct bio *bp, const char *wchan)
 2876 {
 2877 
 2878         while ((bp->bio_flags & BIO_DONE) == 0)
 2879                 msleep(bp, NULL, PRIBIO, wchan, hz / 10);
 2880         if (bp->bio_error != 0)
 2881                 return (bp->bio_error);
 2882         if (!(bp->bio_flags & BIO_ERROR))
 2883                 return (0);
 2884         return (EIO);
 2885 }
 2886 
 2887 void
 2888 biofinish(struct bio *bp, struct devstat *stat, int error)
 2889 {
 2890         
 2891         if (error) {
 2892                 bp->bio_error = error;
 2893                 bp->bio_flags |= BIO_ERROR;
 2894         }
 2895         if (stat != NULL)
 2896                 devstat_end_transaction_bio(stat, bp);
 2897         biodone(bp);
 2898 }
 2899 
 2900 void
 2901 bioq_init(struct bio_queue_head *head)
 2902 {
 2903         TAILQ_INIT(&head->queue);
 2904         head->last_pblkno = 0;
 2905         head->insert_point = NULL;
 2906         head->switch_point = NULL;
 2907 }
 2908 
 2909 void
 2910 bioq_remove(struct bio_queue_head *head, struct bio *bp)
 2911 {
 2912         if (bp == head->switch_point)
 2913                 head->switch_point = TAILQ_NEXT(bp, bio_queue);
 2914         if (bp == head->insert_point) {
 2915                 head->insert_point = TAILQ_PREV(bp, bio_queue, bio_queue);
 2916                 if (head->insert_point == NULL)
 2917                         head->last_pblkno = 0;
 2918         } else if (bp == TAILQ_FIRST(&head->queue))
 2919                 head->last_pblkno = bp->bio_pblkno;
 2920         TAILQ_REMOVE(&head->queue, bp, bio_queue);
 2921         if (TAILQ_FIRST(&head->queue) == head->switch_point)
 2922                 head->switch_point = NULL;
 2923 }
 2924 
 2925 /*
 2926  *      bufwait:
 2927  *
 2928  *      Wait for buffer I/O completion, returning error status.  The buffer
 2929  *      is left locked and B_DONE on return.  B_EINTR is converted into a EINTR
 2930  *      error and cleared.
 2931  */
 2932 int
 2933 bufwait(register struct buf * bp)
 2934 {
 2935         int s;
 2936 
 2937         s = splbio();
 2938         while ((bp->b_flags & B_DONE) == 0) {
 2939                 if (bp->b_iocmd == BIO_READ)
 2940                         tsleep(bp, PRIBIO, "biord", 0);
 2941                 else
 2942                         tsleep(bp, PRIBIO, "biowr", 0);
 2943         }
 2944         splx(s);
 2945         if (bp->b_flags & B_EINTR) {
 2946                 bp->b_flags &= ~B_EINTR;
 2947                 return (EINTR);
 2948         }
 2949         if (bp->b_ioflags & BIO_ERROR) {
 2950                 return (bp->b_error ? bp->b_error : EIO);
 2951         } else {
 2952                 return (0);
 2953         }
 2954 }
 2955 
 2956  /*
 2957   * Call back function from struct bio back up to struct buf.
 2958   * The corresponding initialization lives in sys/conf.h:DEV_STRATEGY().
 2959   */
 2960 void
 2961 bufdonebio(struct bio *bp)
 2962 {
 2963         bufdone(bp->bio_caller2);
 2964 }
 2965 
 2966 /*
 2967  *      bufdone:
 2968  *
 2969  *      Finish I/O on a buffer, optionally calling a completion function.
 2970  *      This is usually called from an interrupt so process blocking is
 2971  *      not allowed.
 2972  *
 2973  *      biodone is also responsible for setting B_CACHE in a B_VMIO bp.
 2974  *      In a non-VMIO bp, B_CACHE will be set on the next getblk() 
 2975  *      assuming B_INVAL is clear.
 2976  *
 2977  *      For the VMIO case, we set B_CACHE if the op was a read and no
 2978  *      read error occured, or if the op was a write.  B_CACHE is never
 2979  *      set if the buffer is invalid or otherwise uncacheable.
 2980  *
 2981  *      biodone does not mess with B_INVAL, allowing the I/O routine or the
 2982  *      initiator to leave B_INVAL set to brelse the buffer out of existance
 2983  *      in the biodone routine.
 2984  */
 2985 void
 2986 bufdone(struct buf *bp)
 2987 {
 2988         int s;
 2989         void    (*biodone)(struct buf *);
 2990 
 2991         GIANT_REQUIRED;
 2992 
 2993         s = splbio();
 2994 
 2995         KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNT(bp)));
 2996         KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
 2997 
 2998         bp->b_flags |= B_DONE;
 2999         runningbufwakeup(bp);
 3000 
 3001         if (bp->b_iocmd == BIO_DELETE) {
 3002                 brelse(bp);
 3003                 splx(s);
 3004                 return;
 3005         }
 3006 
 3007         if (bp->b_iocmd == BIO_WRITE) {
 3008                 vwakeup(bp);
 3009         }
 3010 
 3011         /* call optional completion function if requested */
 3012         if (bp->b_iodone != NULL) {
 3013                 biodone = bp->b_iodone;
 3014                 bp->b_iodone = NULL;
 3015                 (*biodone) (bp);
 3016                 splx(s);
 3017                 return;
 3018         }
 3019         if (LIST_FIRST(&bp->b_dep) != NULL)
 3020                 buf_complete(bp);
 3021 
 3022         if (bp->b_flags & B_VMIO) {
 3023                 int i;
 3024                 vm_ooffset_t foff;
 3025                 vm_page_t m;
 3026                 vm_object_t obj;
 3027                 int iosize;
 3028                 struct vnode *vp = bp->b_vp;
 3029 
 3030                 obj = bp->b_object;
 3031 
 3032 #if defined(VFS_BIO_DEBUG)
 3033                 mp_fixme("usecount and vflag accessed without locks.");
 3034                 if (vp->v_usecount == 0) {
 3035                         panic("biodone: zero vnode ref count");
 3036                 }
 3037 
 3038                 if ((vp->v_vflag & VV_OBJBUF) == 0) {
 3039                         panic("biodone: vnode is not setup for merged cache");
 3040                 }
 3041 #endif
 3042 
 3043                 foff = bp->b_offset;
 3044                 KASSERT(bp->b_offset != NOOFFSET,
 3045                     ("biodone: no buffer offset"));
 3046 
 3047 #if defined(VFS_BIO_DEBUG)
 3048                 if (obj->paging_in_progress < bp->b_npages) {
 3049                         printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
 3050                             obj->paging_in_progress, bp->b_npages);
 3051                 }
 3052 #endif
 3053 
 3054                 /*
 3055                  * Set B_CACHE if the op was a normal read and no error
 3056                  * occured.  B_CACHE is set for writes in the b*write()
 3057                  * routines.
 3058                  */
 3059                 iosize = bp->b_bcount - bp->b_resid;
 3060                 if (bp->b_iocmd == BIO_READ &&
 3061                     !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
 3062                     !(bp->b_ioflags & BIO_ERROR)) {
 3063                         bp->b_flags |= B_CACHE;
 3064                 }
 3065                 vm_page_lock_queues();
 3066                 for (i = 0; i < bp->b_npages; i++) {
 3067                         int bogusflag = 0;
 3068                         int resid;
 3069 
 3070                         resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
 3071                         if (resid > iosize)
 3072                                 resid = iosize;
 3073 
 3074                         /*
 3075                          * cleanup bogus pages, restoring the originals
 3076                          */
 3077                         m = bp->b_pages[i];
 3078                         if (m == bogus_page) {
 3079                                 bogusflag = 1;
 3080                                 m = vm_page_lookup(obj, OFF_TO_IDX(foff));
 3081                                 if (m == NULL)
 3082                                         panic("biodone: page disappeared!");
 3083                                 bp->b_pages[i] = m;
 3084                                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 3085                         }
 3086 #if defined(VFS_BIO_DEBUG)
 3087                         if (OFF_TO_IDX(foff) != m->pindex) {
 3088                                 printf(
 3089 "biodone: foff(%jd)/m->pindex(%ju) mismatch\n",
 3090                                     (intmax_t)foff, (uintmax_t)m->pindex);
 3091                         }
 3092 #endif
 3093 
 3094                         /*
 3095                          * In the write case, the valid and clean bits are
 3096                          * already changed correctly ( see bdwrite() ), so we 
 3097                          * only need to do this here in the read case.
 3098                          */
 3099                         if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
 3100                                 vfs_page_set_valid(bp, foff, i, m);
 3101                         }
 3102                         vm_page_flag_clear(m, PG_ZERO);
 3103 
 3104                         /*
 3105                          * when debugging new filesystems or buffer I/O methods, this
 3106                          * is the most common error that pops up.  if you see this, you
 3107                          * have not set the page busy flag correctly!!!
 3108                          */
 3109                         if (m->busy == 0) {
 3110                                 printf("biodone: page busy < 0, "
 3111                                     "pindex: %d, foff: 0x(%x,%x), "
 3112                                     "resid: %d, index: %d\n",
 3113                                     (int) m->pindex, (int)(foff >> 32),
 3114                                                 (int) foff & 0xffffffff, resid, i);
 3115                                 if (!vn_isdisk(vp, NULL))
 3116                                         printf(" iosize: %ld, lblkno: %jd, flags: 0x%lx, npages: %d\n",
 3117                                             bp->b_vp->v_mount->mnt_stat.f_iosize,
 3118                                             (intmax_t) bp->b_lblkno,
 3119                                             bp->b_flags, bp->b_npages);
 3120                                 else
 3121                                         printf(" VDEV, lblkno: %jd, flags: 0x%lx, npages: %d\n",
 3122                                             (intmax_t) bp->b_lblkno,
 3123                                             bp->b_flags, bp->b_npages);
 3124                                 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
 3125                                     m->valid, m->dirty, m->wire_count);
 3126                                 panic("biodone: page busy < 0\n");
 3127                         }
 3128                         vm_page_io_finish(m);
 3129                         vm_object_pip_subtract(obj, 1);
 3130                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3131                         iosize -= resid;
 3132                 }
 3133                 vm_page_unlock_queues();
 3134                 if (obj)
 3135                         vm_object_pip_wakeupn(obj, 0);
 3136         }
 3137 
 3138         /*
 3139          * For asynchronous completions, release the buffer now. The brelse
 3140          * will do a wakeup there if necessary - so no need to do a wakeup
 3141          * here in the async case. The sync case always needs to do a wakeup.
 3142          */
 3143 
 3144         if (bp->b_flags & B_ASYNC) {
 3145                 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
 3146                         brelse(bp);
 3147                 else
 3148                         bqrelse(bp);
 3149         } else {
 3150                 wakeup(bp);
 3151         }
 3152         splx(s);
 3153 }
 3154 
 3155 /*
 3156  * This routine is called in lieu of iodone in the case of
 3157  * incomplete I/O.  This keeps the busy status for pages
 3158  * consistant.
 3159  */
 3160 void
 3161 vfs_unbusy_pages(struct buf * bp)
 3162 {
 3163         int i;
 3164 
 3165         GIANT_REQUIRED;
 3166 
 3167         runningbufwakeup(bp);
 3168         if (bp->b_flags & B_VMIO) {
 3169                 vm_object_t obj;
 3170 
 3171                 obj = bp->b_object;
 3172                 vm_page_lock_queues();
 3173                 for (i = 0; i < bp->b_npages; i++) {
 3174                         vm_page_t m = bp->b_pages[i];
 3175 
 3176                         if (m == bogus_page) {
 3177                                 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
 3178                                 if (!m) {
 3179                                         panic("vfs_unbusy_pages: page missing\n");
 3180                                 }
 3181                                 bp->b_pages[i] = m;
 3182                                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 3183                         }
 3184                         vm_object_pip_subtract(obj, 1);
 3185                         vm_page_flag_clear(m, PG_ZERO);
 3186                         vm_page_io_finish(m);
 3187                 }
 3188                 vm_page_unlock_queues();
 3189                 vm_object_pip_wakeupn(obj, 0);
 3190         }
 3191 }
 3192 
 3193 /*
 3194  * vfs_page_set_valid:
 3195  *
 3196  *      Set the valid bits in a page based on the supplied offset.   The
 3197  *      range is restricted to the buffer's size.
 3198  *
 3199  *      This routine is typically called after a read completes.
 3200  */
 3201 static void
 3202 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
 3203 {
 3204         vm_ooffset_t soff, eoff;
 3205 
 3206         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3207         /*
 3208          * Start and end offsets in buffer.  eoff - soff may not cross a
 3209          * page boundry or cross the end of the buffer.  The end of the
 3210          * buffer, in this case, is our file EOF, not the allocation size
 3211          * of the buffer.
 3212          */
 3213         soff = off;
 3214         eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3215         if (eoff > bp->b_offset + bp->b_bcount)
 3216                 eoff = bp->b_offset + bp->b_bcount;
 3217 
 3218         /*
 3219          * Set valid range.  This is typically the entire buffer and thus the
 3220          * entire page.
 3221          */
 3222         if (eoff > soff) {
 3223                 vm_page_set_validclean(
 3224                     m,
 3225                    (vm_offset_t) (soff & PAGE_MASK),
 3226                    (vm_offset_t) (eoff - soff)
 3227                 );
 3228         }
 3229 }
 3230 
 3231 /*
 3232  * This routine is called before a device strategy routine.
 3233  * It is used to tell the VM system that paging I/O is in
 3234  * progress, and treat the pages associated with the buffer
 3235  * almost as being PG_BUSY.  Also the object paging_in_progress
 3236  * flag is handled to make sure that the object doesn't become
 3237  * inconsistant.
 3238  *
 3239  * Since I/O has not been initiated yet, certain buffer flags
 3240  * such as BIO_ERROR or B_INVAL may be in an inconsistant state
 3241  * and should be ignored.
 3242  */
 3243 void
 3244 vfs_busy_pages(struct buf * bp, int clear_modify)
 3245 {
 3246         int i, bogus;
 3247 
 3248         if (bp->b_flags & B_VMIO) {
 3249                 vm_object_t obj;
 3250                 vm_ooffset_t foff;
 3251 
 3252                 obj = bp->b_object;
 3253                 foff = bp->b_offset;
 3254                 KASSERT(bp->b_offset != NOOFFSET,
 3255                     ("vfs_busy_pages: no buffer offset"));
 3256                 vfs_setdirty(bp);
 3257 retry:
 3258                 vm_page_lock_queues();
 3259                 for (i = 0; i < bp->b_npages; i++) {
 3260                         vm_page_t m = bp->b_pages[i];
 3261 
 3262                         if (vm_page_sleep_if_busy(m, FALSE, "vbpage"))
 3263                                 goto retry;
 3264                 }
 3265                 bogus = 0;
 3266                 for (i = 0; i < bp->b_npages; i++) {
 3267                         vm_page_t m = bp->b_pages[i];
 3268 
 3269                         vm_page_flag_clear(m, PG_ZERO);
 3270                         if ((bp->b_flags & B_CLUSTER) == 0) {
 3271                                 vm_object_pip_add(obj, 1);
 3272                                 vm_page_io_start(m);
 3273                         }
 3274                         /*
 3275                          * When readying a buffer for a read ( i.e
 3276                          * clear_modify == 0 ), it is important to do
 3277                          * bogus_page replacement for valid pages in 
 3278                          * partially instantiated buffers.  Partially 
 3279                          * instantiated buffers can, in turn, occur when
 3280                          * reconstituting a buffer from its VM backing store
 3281                          * base.  We only have to do this if B_CACHE is
 3282                          * clear ( which causes the I/O to occur in the
 3283                          * first place ).  The replacement prevents the read
 3284                          * I/O from overwriting potentially dirty VM-backed
 3285                          * pages.  XXX bogus page replacement is, uh, bogus.
 3286                          * It may not work properly with small-block devices.
 3287                          * We need to find a better way.
 3288                          */
 3289                         pmap_remove_all(m);
 3290                         if (clear_modify)
 3291                                 vfs_page_set_valid(bp, foff, i, m);
 3292                         else if (m->valid == VM_PAGE_BITS_ALL &&
 3293                                 (bp->b_flags & B_CACHE) == 0) {
 3294                                 bp->b_pages[i] = bogus_page;
 3295                                 bogus++;
 3296                         }
 3297                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3298                 }
 3299                 vm_page_unlock_queues();
 3300                 if (bogus)
 3301                         pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 3302         }
 3303 }
 3304 
 3305 /*
 3306  * Tell the VM system that the pages associated with this buffer
 3307  * are clean.  This is used for delayed writes where the data is
 3308  * going to go to disk eventually without additional VM intevention.
 3309  *
 3310  * Note that while we only really need to clean through to b_bcount, we
 3311  * just go ahead and clean through to b_bufsize.
 3312  */
 3313 static void
 3314 vfs_clean_pages(struct buf * bp)
 3315 {
 3316         int i;
 3317 
 3318         GIANT_REQUIRED;
 3319 
 3320         if (bp->b_flags & B_VMIO) {
 3321                 vm_ooffset_t foff;
 3322 
 3323                 foff = bp->b_offset;
 3324                 KASSERT(bp->b_offset != NOOFFSET,
 3325                     ("vfs_clean_pages: no buffer offset"));
 3326                 vm_page_lock_queues();
 3327                 for (i = 0; i < bp->b_npages; i++) {
 3328                         vm_page_t m = bp->b_pages[i];
 3329                         vm_ooffset_t noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3330                         vm_ooffset_t eoff = noff;
 3331 
 3332                         if (eoff > bp->b_offset + bp->b_bufsize)
 3333                                 eoff = bp->b_offset + bp->b_bufsize;
 3334                         vfs_page_set_valid(bp, foff, i, m);
 3335                         /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
 3336                         foff = noff;
 3337                 }
 3338                 vm_page_unlock_queues();
 3339         }
 3340 }
 3341 
 3342 /*
 3343  *      vfs_bio_set_validclean:
 3344  *
 3345  *      Set the range within the buffer to valid and clean.  The range is 
 3346  *      relative to the beginning of the buffer, b_offset.  Note that b_offset
 3347  *      itself may be offset from the beginning of the first page.
 3348  *
 3349  */
 3350 
 3351 void   
 3352 vfs_bio_set_validclean(struct buf *bp, int base, int size)
 3353 {
 3354         if (bp->b_flags & B_VMIO) {
 3355                 int i;
 3356                 int n;
 3357 
 3358                 /*
 3359                  * Fixup base to be relative to beginning of first page.
 3360                  * Set initial n to be the maximum number of bytes in the
 3361                  * first page that can be validated.
 3362                  */
 3363 
 3364                 base += (bp->b_offset & PAGE_MASK);
 3365                 n = PAGE_SIZE - (base & PAGE_MASK);
 3366 
 3367                 vm_page_lock_queues();
 3368                 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
 3369                         vm_page_t m = bp->b_pages[i];
 3370 
 3371                         if (n > size)
 3372                                 n = size;
 3373 
 3374                         vm_page_set_validclean(m, base & PAGE_MASK, n);
 3375                         base += n;
 3376                         size -= n;
 3377                         n = PAGE_SIZE;
 3378                 }
 3379                 vm_page_unlock_queues();
 3380         }
 3381 }
 3382 
 3383 /*
 3384  *      vfs_bio_clrbuf:
 3385  *
 3386  *      clear a buffer.  This routine essentially fakes an I/O, so we need
 3387  *      to clear BIO_ERROR and B_INVAL.
 3388  *
 3389  *      Note that while we only theoretically need to clear through b_bcount,
 3390  *      we go ahead and clear through b_bufsize.
 3391  */
 3392 
 3393 void
 3394 vfs_bio_clrbuf(struct buf *bp) 
 3395 {
 3396         int i, mask = 0;
 3397         caddr_t sa, ea;
 3398 
 3399         GIANT_REQUIRED;
 3400 
 3401         if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
 3402                 bp->b_flags &= ~B_INVAL;
 3403                 bp->b_ioflags &= ~BIO_ERROR;
 3404                 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
 3405                     (bp->b_offset & PAGE_MASK) == 0) {
 3406                         mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
 3407                         if ((bp->b_pages[0]->valid & mask) == mask) {
 3408                                 bp->b_resid = 0;
 3409                                 return;
 3410                         }
 3411                         if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
 3412                             ((bp->b_pages[0]->valid & mask) == 0)) {
 3413                                 bzero(bp->b_data, bp->b_bufsize);
 3414                                 bp->b_pages[0]->valid |= mask;
 3415                                 bp->b_resid = 0;
 3416                                 return;
 3417                         }
 3418                 }
 3419                 ea = sa = bp->b_data;
 3420                 for(i=0;i<bp->b_npages;i++,sa=ea) {
 3421                         int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
 3422                         ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
 3423                         ea = (caddr_t)(vm_offset_t)ulmin(
 3424                             (u_long)(vm_offset_t)ea,
 3425                             (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
 3426                         mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
 3427                         if ((bp->b_pages[i]->valid & mask) == mask)
 3428                                 continue;
 3429                         if ((bp->b_pages[i]->valid & mask) == 0) {
 3430                                 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
 3431                                         bzero(sa, ea - sa);
 3432                                 }
 3433                         } else {
 3434                                 for (; sa < ea; sa += DEV_BSIZE, j++) {
 3435                                         if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
 3436                                                 (bp->b_pages[i]->valid & (1<<j)) == 0)
 3437                                                 bzero(sa, DEV_BSIZE);
 3438                                 }
 3439                         }
 3440                         bp->b_pages[i]->valid |= mask;
 3441                         vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
 3442                 }
 3443                 bp->b_resid = 0;
 3444         } else {
 3445                 clrbuf(bp);
 3446         }
 3447 }
 3448 
 3449 /*
 3450  * vm_hold_load_pages and vm_hold_free_pages get pages into
 3451  * a buffers address space.  The pages are anonymous and are
 3452  * not associated with a file object.
 3453  */
 3454 static void
 3455 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
 3456 {
 3457         vm_offset_t pg;
 3458         vm_page_t p;
 3459         int index;
 3460 
 3461         GIANT_REQUIRED;
 3462 
 3463         to = round_page(to);
 3464         from = round_page(from);
 3465         index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 3466 
 3467         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 3468 tryagain:
 3469                 /*
 3470                  * note: must allocate system pages since blocking here
 3471                  * could intefere with paging I/O, no matter which
 3472                  * process we are.
 3473                  */
 3474                 p = vm_page_alloc(kernel_object,
 3475                         ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
 3476                     VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
 3477                 if (!p) {
 3478                         vm_pageout_deficit += (to - from) >> PAGE_SHIFT;
 3479                         VM_WAIT;
 3480                         goto tryagain;
 3481                 }
 3482                 vm_page_lock_queues();
 3483                 p->valid = VM_PAGE_BITS_ALL;
 3484                 vm_page_flag_clear(p, PG_ZERO);
 3485                 vm_page_unlock_queues();
 3486                 pmap_qenter(pg, &p, 1);
 3487                 bp->b_pages[index] = p;
 3488                 vm_page_wakeup(p);
 3489         }
 3490         bp->b_npages = index;
 3491 }
 3492 
 3493 /* Return pages associated with this buf to the vm system */
 3494 static void
 3495 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
 3496 {
 3497         vm_offset_t pg;
 3498         vm_page_t p;
 3499         int index, newnpages;
 3500 
 3501         GIANT_REQUIRED;
 3502 
 3503         from = round_page(from);
 3504         to = round_page(to);
 3505         newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 3506 
 3507         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 3508                 p = bp->b_pages[index];
 3509                 if (p && (index < bp->b_npages)) {
 3510                         if (p->busy) {
 3511                                 printf(
 3512                             "vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
 3513                                     (intmax_t)bp->b_blkno,
 3514                                     (intmax_t)bp->b_lblkno);
 3515                         }
 3516                         bp->b_pages[index] = NULL;
 3517                         pmap_qremove(pg, 1);
 3518                         vm_page_lock_queues();
 3519                         vm_page_busy(p);
 3520                         vm_page_unwire(p, 0);
 3521                         vm_page_free(p);
 3522                         vm_page_unlock_queues();
 3523                 }
 3524         }
 3525         bp->b_npages = newnpages;
 3526 }
 3527 
 3528 
 3529 #include "opt_ddb.h"
 3530 #ifdef DDB
 3531 #include <ddb/ddb.h>
 3532 
 3533 /* DDB command to show buffer data */
 3534 DB_SHOW_COMMAND(buffer, db_show_buffer)
 3535 {
 3536         /* get args */
 3537         struct buf *bp = (struct buf *)addr;
 3538 
 3539         if (!have_addr) {
 3540                 db_printf("usage: show buffer <addr>\n");
 3541                 return;
 3542         }
 3543 
 3544         db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
 3545         db_printf(
 3546             "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
 3547             "b_dev = (%d,%d), b_data = %p, b_blkno = %jd, b_pblkno = %jd\n",
 3548             bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
 3549             major(bp->b_dev), minor(bp->b_dev), bp->b_data,
 3550             (intmax_t)bp->b_blkno, (intmax_t)bp->b_pblkno);
 3551         if (bp->b_npages) {
 3552                 int i;
 3553                 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
 3554                 for (i = 0; i < bp->b_npages; i++) {
 3555                         vm_page_t m;
 3556                         m = bp->b_pages[i];
 3557                         db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
 3558                             (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
 3559                         if ((i + 1) < bp->b_npages)
 3560                                 db_printf(",");
 3561                 }
 3562                 db_printf("\n");
 3563         }
 3564 }
 3565 #endif /* DDB */

Cache object: cbeed550fd550fa5b58791dd9457b569


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.