The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_bio.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2004 Poul-Henning Kamp
    3  * Copyright (c) 1994,1997 John S. Dyson
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 /*
   29  * this file contains a new buffer I/O scheme implementing a coherent
   30  * VM object and buffer cache scheme.  Pains have been taken to make
   31  * sure that the performance degradation associated with schemes such
   32  * as this is not realized.
   33  *
   34  * Author:  John S. Dyson
   35  * Significant help during the development and debugging phases
   36  * had been provided by David Greenman, also of the FreeBSD core team.
   37  *
   38  * see man buf(9) for more info.
   39  */
   40 
   41 #include <sys/cdefs.h>
   42 __FBSDID("$FreeBSD$");
   43 
   44 #include <sys/param.h>
   45 #include <sys/systm.h>
   46 #include <sys/bio.h>
   47 #include <sys/conf.h>
   48 #include <sys/buf.h>
   49 #include <sys/devicestat.h>
   50 #include <sys/eventhandler.h>
   51 #include <sys/limits.h>
   52 #include <sys/lock.h>
   53 #include <sys/malloc.h>
   54 #include <sys/mount.h>
   55 #include <sys/mutex.h>
   56 #include <sys/kernel.h>
   57 #include <sys/kthread.h>
   58 #include <sys/proc.h>
   59 #include <sys/resourcevar.h>
   60 #include <sys/sysctl.h>
   61 #include <sys/vmmeter.h>
   62 #include <sys/vnode.h>
   63 #include <geom/geom.h>
   64 #include <vm/vm.h>
   65 #include <vm/vm_param.h>
   66 #include <vm/vm_kern.h>
   67 #include <vm/vm_pageout.h>
   68 #include <vm/vm_page.h>
   69 #include <vm/vm_object.h>
   70 #include <vm/vm_extern.h>
   71 #include <vm/vm_map.h>
   72 #include "opt_directio.h"
   73 #include "opt_swap.h"
   74 
   75 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
   76 
   77 struct  bio_ops bioops;         /* I/O operation notification */
   78 
   79 struct  buf_ops buf_ops_bio = {
   80         .bop_name       =       "buf_ops_bio",
   81         .bop_write      =       bufwrite,
   82         .bop_strategy   =       bufstrategy,
   83         .bop_sync       =       bufsync,
   84         .bop_bdflush    =       bufbdflush,
   85 };
   86 
   87 /*
   88  * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
   89  * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
   90  */
   91 struct buf *buf;                /* buffer header pool */
   92 
   93 static struct proc *bufdaemonproc;
   94 
   95 static int inmem(struct vnode *vp, daddr_t blkno);
   96 static void vm_hold_free_pages(struct buf *bp, vm_offset_t from,
   97                 vm_offset_t to);
   98 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
   99                 vm_offset_t to);
  100 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
  101                                int pageno, vm_page_t m);
  102 static void vfs_clean_pages(struct buf *bp);
  103 static void vfs_setdirty(struct buf *bp);
  104 static void vfs_setdirty_locked_object(struct buf *bp);
  105 static void vfs_vmio_release(struct buf *bp);
  106 static int vfs_bio_clcheck(struct vnode *vp, int size,
  107                 daddr_t lblkno, daddr_t blkno);
  108 static int flushbufqueues(int, int);
  109 static void buf_daemon(void);
  110 static void bremfreel(struct buf *bp);
  111 
  112 int vmiodirenable = TRUE;
  113 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
  114     "Use the VM system for directory writes");
  115 int runningbufspace;
  116 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
  117     "Amount of presently outstanding async buffer io");
  118 static int bufspace;
  119 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
  120     "Virtual memory used for buffers");
  121 static int maxbufspace;
  122 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
  123     "Maximum allowed value of bufspace (including buf_daemon)");
  124 static int bufmallocspace;
  125 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
  126     "Amount of malloced memory for buffers");
  127 static int maxbufmallocspace;
  128 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
  129     "Maximum amount of malloced memory for buffers");
  130 static int lobufspace;
  131 SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
  132     "Minimum amount of buffers we want to have");
  133 int hibufspace;
  134 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
  135     "Maximum allowed value of bufspace (excluding buf_daemon)");
  136 static int bufreusecnt;
  137 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
  138     "Number of times we have reused a buffer");
  139 static int buffreekvacnt;
  140 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
  141     "Number of times we have freed the KVA space from some buffer");
  142 static int bufdefragcnt;
  143 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
  144     "Number of times we have had to repeat buffer allocation to defragment");
  145 static int lorunningspace;
  146 SYSCTL_INT(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
  147     "Minimum preferred space used for in-progress I/O");
  148 static int hirunningspace;
  149 SYSCTL_INT(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
  150     "Maximum amount of space to use for in-progress I/O");
  151 int dirtybufferflushes;
  152 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
  153     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
  154 int bdwriteskip;
  155 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
  156     0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
  157 int altbufferflushes;
  158 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
  159     0, "Number of fsync flushes to limit dirty buffers");
  160 static int recursiveflushes;
  161 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
  162     0, "Number of flushes skipped due to being recursive");
  163 static int numdirtybuffers;
  164 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
  165     "Number of buffers that are dirty (has unwritten changes) at the moment");
  166 static int lodirtybuffers;
  167 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
  168     "How many buffers we want to have free before bufdaemon can sleep");
  169 static int hidirtybuffers;
  170 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
  171     "When the number of dirty buffers is considered severe");
  172 int dirtybufthresh;
  173 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
  174     0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
  175 static int numfreebuffers;
  176 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
  177     "Number of free buffers");
  178 static int lofreebuffers;
  179 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
  180    "XXX Unused");
  181 static int hifreebuffers;
  182 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
  183    "XXX Complicatedly unused");
  184 static int getnewbufcalls;
  185 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
  186    "Number of calls to getnewbuf");
  187 static int getnewbufrestarts;
  188 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
  189     "Number of times getnewbuf has had to restart a buffer aquisition");
  190 
  191 /*
  192  * Wakeup point for bufdaemon, as well as indicator of whether it is already
  193  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
  194  * is idling.
  195  */
  196 static int bd_request;
  197 
  198 /*
  199  * This lock synchronizes access to bd_request.
  200  */
  201 static struct mtx bdlock;
  202 
  203 /*
  204  * bogus page -- for I/O to/from partially complete buffers
  205  * this is a temporary solution to the problem, but it is not
  206  * really that bad.  it would be better to split the buffer
  207  * for input in the case of buffers partially already in memory,
  208  * but the code is intricate enough already.
  209  */
  210 vm_page_t bogus_page;
  211 
  212 /*
  213  * Synchronization (sleep/wakeup) variable for active buffer space requests.
  214  * Set when wait starts, cleared prior to wakeup().
  215  * Used in runningbufwakeup() and waitrunningbufspace().
  216  */
  217 static int runningbufreq;
  218 
  219 /*
  220  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
  221  * waitrunningbufspace().
  222  */
  223 static struct mtx rbreqlock;
  224 
  225 /* 
  226  * Synchronization (sleep/wakeup) variable for buffer requests.
  227  * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
  228  * by and/or.
  229  * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
  230  * getnewbuf(), and getblk().
  231  */
  232 static int needsbuffer;
  233 
  234 /*
  235  * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
  236  */
  237 static struct mtx nblock;
  238 
  239 /*
  240  * Lock that protects against bwait()/bdone()/B_DONE races.
  241  */
  242 
  243 static struct mtx bdonelock;
  244 
  245 /*
  246  * Lock that protects against bwait()/bdone()/B_DONE races.
  247  */
  248 static struct mtx bpinlock;
  249 
  250 /*
  251  * Definitions for the buffer free lists.
  252  */
  253 #define BUFFER_QUEUES   6       /* number of free buffer queues */
  254 
  255 #define QUEUE_NONE      0       /* on no queue */
  256 #define QUEUE_CLEAN     1       /* non-B_DELWRI buffers */
  257 #define QUEUE_DIRTY     2       /* B_DELWRI buffers */
  258 #define QUEUE_DIRTY_GIANT 3     /* B_DELWRI buffers that need giant */
  259 #define QUEUE_EMPTYKVA  4       /* empty buffer headers w/KVA assignment */
  260 #define QUEUE_EMPTY     5       /* empty buffer headers */
  261 
  262 /* Queues for free buffers with various properties */
  263 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
  264 
  265 /* Lock for the bufqueues */
  266 static struct mtx bqlock;
  267 
  268 /*
  269  * Single global constant for BUF_WMESG, to avoid getting multiple references.
  270  * buf_wmesg is referred from macros.
  271  */
  272 const char *buf_wmesg = BUF_WMESG;
  273 
  274 #define VFS_BIO_NEED_ANY        0x01    /* any freeable buffer */
  275 #define VFS_BIO_NEED_DIRTYFLUSH 0x02    /* waiting for dirty buffer flush */
  276 #define VFS_BIO_NEED_FREE       0x04    /* wait for free bufs, hi hysteresis */
  277 #define VFS_BIO_NEED_BUFSPACE   0x08    /* wait for buf space, lo hysteresis */
  278 
  279 #ifdef DIRECTIO
  280 extern void ffs_rawread_setup(void);
  281 #endif /* DIRECTIO */
  282 /*
  283  *      numdirtywakeup:
  284  *
  285  *      If someone is blocked due to there being too many dirty buffers,
  286  *      and numdirtybuffers is now reasonable, wake them up.
  287  */
  288 
  289 static __inline void
  290 numdirtywakeup(int level)
  291 {
  292 
  293         if (numdirtybuffers <= level) {
  294                 mtx_lock(&nblock);
  295                 if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
  296                         needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
  297                         wakeup(&needsbuffer);
  298                 }
  299                 mtx_unlock(&nblock);
  300         }
  301 }
  302 
  303 /*
  304  *      bufspacewakeup:
  305  *
  306  *      Called when buffer space is potentially available for recovery.
  307  *      getnewbuf() will block on this flag when it is unable to free 
  308  *      sufficient buffer space.  Buffer space becomes recoverable when 
  309  *      bp's get placed back in the queues.
  310  */
  311 
  312 static __inline void
  313 bufspacewakeup(void)
  314 {
  315 
  316         /*
  317          * If someone is waiting for BUF space, wake them up.  Even
  318          * though we haven't freed the kva space yet, the waiting
  319          * process will be able to now.
  320          */
  321         mtx_lock(&nblock);
  322         if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
  323                 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
  324                 wakeup(&needsbuffer);
  325         }
  326         mtx_unlock(&nblock);
  327 }
  328 
  329 /*
  330  * runningbufwakeup() - in-progress I/O accounting.
  331  *
  332  */
  333 void
  334 runningbufwakeup(struct buf *bp)
  335 {
  336 
  337         if (bp->b_runningbufspace) {
  338                 atomic_subtract_int(&runningbufspace, bp->b_runningbufspace);
  339                 bp->b_runningbufspace = 0;
  340                 mtx_lock(&rbreqlock);
  341                 if (runningbufreq && runningbufspace <= lorunningspace) {
  342                         runningbufreq = 0;
  343                         wakeup(&runningbufreq);
  344                 }
  345                 mtx_unlock(&rbreqlock);
  346         }
  347 }
  348 
  349 /*
  350  *      bufcountwakeup:
  351  *
  352  *      Called when a buffer has been added to one of the free queues to
  353  *      account for the buffer and to wakeup anyone waiting for free buffers.
  354  *      This typically occurs when large amounts of metadata are being handled
  355  *      by the buffer cache ( else buffer space runs out first, usually ).
  356  */
  357 
  358 static __inline void
  359 bufcountwakeup(void) 
  360 {
  361 
  362         atomic_add_int(&numfreebuffers, 1);
  363         mtx_lock(&nblock);
  364         if (needsbuffer) {
  365                 needsbuffer &= ~VFS_BIO_NEED_ANY;
  366                 if (numfreebuffers >= hifreebuffers)
  367                         needsbuffer &= ~VFS_BIO_NEED_FREE;
  368                 wakeup(&needsbuffer);
  369         }
  370         mtx_unlock(&nblock);
  371 }
  372 
  373 /*
  374  *      waitrunningbufspace()
  375  *
  376  *      runningbufspace is a measure of the amount of I/O currently
  377  *      running.  This routine is used in async-write situations to
  378  *      prevent creating huge backups of pending writes to a device.
  379  *      Only asynchronous writes are governed by this function.
  380  *
  381  *      Reads will adjust runningbufspace, but will not block based on it.
  382  *      The read load has a side effect of reducing the allowed write load.
  383  *
  384  *      This does NOT turn an async write into a sync write.  It waits  
  385  *      for earlier writes to complete and generally returns before the
  386  *      caller's write has reached the device.
  387  */
  388 void
  389 waitrunningbufspace(void)
  390 {
  391 
  392         mtx_lock(&rbreqlock);
  393         while (runningbufspace > hirunningspace) {
  394                 ++runningbufreq;
  395                 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
  396         }
  397         mtx_unlock(&rbreqlock);
  398 }
  399 
  400 
  401 /*
  402  *      vfs_buf_test_cache:
  403  *
  404  *      Called when a buffer is extended.  This function clears the B_CACHE
  405  *      bit if the newly extended portion of the buffer does not contain
  406  *      valid data.
  407  */
  408 static __inline
  409 void
  410 vfs_buf_test_cache(struct buf *bp,
  411                   vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
  412                   vm_page_t m)
  413 {
  414 
  415         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  416         if (bp->b_flags & B_CACHE) {
  417                 int base = (foff + off) & PAGE_MASK;
  418                 if (vm_page_is_valid(m, base, size) == 0)
  419                         bp->b_flags &= ~B_CACHE;
  420         }
  421 }
  422 
  423 /* Wake up the buffer daemon if necessary */
  424 static __inline
  425 void
  426 bd_wakeup(int dirtybuflevel)
  427 {
  428 
  429         mtx_lock(&bdlock);
  430         if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
  431                 bd_request = 1;
  432                 wakeup(&bd_request);
  433         }
  434         mtx_unlock(&bdlock);
  435 }
  436 
  437 /*
  438  * bd_speedup - speedup the buffer cache flushing code
  439  */
  440 
  441 static __inline
  442 void
  443 bd_speedup(void)
  444 {
  445 
  446         bd_wakeup(1);
  447 }
  448 
  449 /*
  450  * Calculating buffer cache scaling values and reserve space for buffer
  451  * headers.  This is called during low level kernel initialization and
  452  * may be called more then once.  We CANNOT write to the memory area
  453  * being reserved at this time.
  454  */
  455 caddr_t
  456 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
  457 {
  458         int maxbuf;
  459 
  460         /*
  461          * physmem_est is in pages.  Convert it to kilobytes (assumes
  462          * PAGE_SIZE is >= 1K)
  463          */
  464         physmem_est = physmem_est * (PAGE_SIZE / 1024);
  465 
  466         /*
  467          * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
  468          * For the first 64MB of ram nominally allocate sufficient buffers to
  469          * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
  470          * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
  471          * the buffer cache we limit the eventual kva reservation to
  472          * maxbcache bytes.
  473          *
  474          * factor represents the 1/4 x ram conversion.
  475          */
  476         if (nbuf == 0) {
  477                 int factor = 4 * BKVASIZE / 1024;
  478 
  479                 nbuf = 50;
  480                 if (physmem_est > 4096)
  481                         nbuf += min((physmem_est - 4096) / factor,
  482                             65536 / factor);
  483                 if (physmem_est > 65536)
  484                         nbuf += (physmem_est - 65536) * 2 / (factor * 5);
  485 
  486                 if (maxbcache && nbuf > maxbcache / BKVASIZE)
  487                         nbuf = maxbcache / BKVASIZE;
  488 
  489                 /* XXX Avoid integer overflows later on with maxbufspace. */
  490                 maxbuf = (INT_MAX / 3) / BKVASIZE;
  491                 if (nbuf > maxbuf)
  492                         nbuf = maxbuf;
  493         }
  494 
  495 #if 0
  496         /*
  497          * Do not allow the buffer_map to be more then 1/2 the size of the
  498          * kernel_map.
  499          */
  500         if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 
  501             (BKVASIZE * 2)) {
  502                 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 
  503                     (BKVASIZE * 2);
  504                 printf("Warning: nbufs capped at %d\n", nbuf);
  505         }
  506 #endif
  507 
  508         /*
  509          * swbufs are used as temporary holders for I/O, such as paging I/O.
  510          * We have no less then 16 and no more then 256.
  511          */
  512         nswbuf = max(min(nbuf/4, 256), 16);
  513 #ifdef NSWBUF_MIN
  514         if (nswbuf < NSWBUF_MIN)
  515                 nswbuf = NSWBUF_MIN;
  516 #endif
  517 #ifdef DIRECTIO
  518         ffs_rawread_setup();
  519 #endif
  520 
  521         /*
  522          * Reserve space for the buffer cache buffers
  523          */
  524         swbuf = (void *)v;
  525         v = (caddr_t)(swbuf + nswbuf);
  526         buf = (void *)v;
  527         v = (caddr_t)(buf + nbuf);
  528 
  529         return(v);
  530 }
  531 
  532 /* Initialize the buffer subsystem.  Called before use of any buffers. */
  533 void
  534 bufinit(void)
  535 {
  536         struct buf *bp;
  537         int i;
  538 
  539         mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF);
  540         mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
  541         mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
  542         mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
  543         mtx_init(&bdonelock, "bdone lock", NULL, MTX_DEF);
  544         mtx_init(&bpinlock, "bpin lock", NULL, MTX_DEF);
  545 
  546         /* next, make a null set of free lists */
  547         for (i = 0; i < BUFFER_QUEUES; i++)
  548                 TAILQ_INIT(&bufqueues[i]);
  549 
  550         /* finally, initialize each buffer header and stick on empty q */
  551         for (i = 0; i < nbuf; i++) {
  552                 bp = &buf[i];
  553                 bzero(bp, sizeof *bp);
  554                 bp->b_flags = B_INVAL;  /* we're just an empty header */
  555                 bp->b_rcred = NOCRED;
  556                 bp->b_wcred = NOCRED;
  557                 bp->b_qindex = QUEUE_EMPTY;
  558                 bp->b_vflags = 0;
  559                 bp->b_xflags = 0;
  560                 LIST_INIT(&bp->b_dep);
  561                 BUF_LOCKINIT(bp);
  562                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
  563         }
  564 
  565         /*
  566          * maxbufspace is the absolute maximum amount of buffer space we are 
  567          * allowed to reserve in KVM and in real terms.  The absolute maximum
  568          * is nominally used by buf_daemon.  hibufspace is the nominal maximum
  569          * used by most other processes.  The differential is required to 
  570          * ensure that buf_daemon is able to run when other processes might 
  571          * be blocked waiting for buffer space.
  572          *
  573          * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
  574          * this may result in KVM fragmentation which is not handled optimally
  575          * by the system.
  576          */
  577         maxbufspace = nbuf * BKVASIZE;
  578         hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
  579         lobufspace = hibufspace - MAXBSIZE;
  580 
  581         lorunningspace = 512 * 1024;
  582         hirunningspace = 1024 * 1024;
  583 
  584 /*
  585  * Limit the amount of malloc memory since it is wired permanently into
  586  * the kernel space.  Even though this is accounted for in the buffer
  587  * allocation, we don't want the malloced region to grow uncontrolled.
  588  * The malloc scheme improves memory utilization significantly on average
  589  * (small) directories.
  590  */
  591         maxbufmallocspace = hibufspace / 20;
  592 
  593 /*
  594  * Reduce the chance of a deadlock occuring by limiting the number
  595  * of delayed-write dirty buffers we allow to stack up.
  596  */
  597         hidirtybuffers = nbuf / 4 + 20;
  598         dirtybufthresh = hidirtybuffers * 9 / 10;
  599         numdirtybuffers = 0;
  600 /*
  601  * To support extreme low-memory systems, make sure hidirtybuffers cannot
  602  * eat up all available buffer space.  This occurs when our minimum cannot
  603  * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
  604  * BKVASIZE'd (8K) buffers.
  605  */
  606         while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
  607                 hidirtybuffers >>= 1;
  608         }
  609         lodirtybuffers = hidirtybuffers / 2;
  610 
  611 /*
  612  * Try to keep the number of free buffers in the specified range,
  613  * and give special processes (e.g. like buf_daemon) access to an 
  614  * emergency reserve.
  615  */
  616         lofreebuffers = nbuf / 18 + 5;
  617         hifreebuffers = 2 * lofreebuffers;
  618         numfreebuffers = nbuf;
  619 
  620 /*
  621  * Maximum number of async ops initiated per buf_daemon loop.  This is
  622  * somewhat of a hack at the moment, we really need to limit ourselves
  623  * based on the number of bytes of I/O in-transit that were initiated
  624  * from buf_daemon.
  625  */
  626 
  627         bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
  628             VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
  629 }
  630 
  631 /*
  632  * bfreekva() - free the kva allocation for a buffer.
  633  *
  634  *      Since this call frees up buffer space, we call bufspacewakeup().
  635  */
  636 static void
  637 bfreekva(struct buf *bp)
  638 {
  639 
  640         if (bp->b_kvasize) {
  641                 atomic_add_int(&buffreekvacnt, 1);
  642                 atomic_subtract_int(&bufspace, bp->b_kvasize);
  643                 vm_map_remove(buffer_map, (vm_offset_t) bp->b_kvabase,
  644                     (vm_offset_t) bp->b_kvabase + bp->b_kvasize);
  645                 bp->b_kvasize = 0;
  646                 bufspacewakeup();
  647         }
  648 }
  649 
  650 /*
  651  *      bremfree:
  652  *
  653  *      Mark the buffer for removal from the appropriate free list in brelse.
  654  *      
  655  */
  656 void
  657 bremfree(struct buf *bp)
  658 {
  659 
  660         CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
  661         KASSERT(BUF_REFCNT(bp), ("bremfree: buf must be locked."));
  662         KASSERT((bp->b_flags & B_REMFREE) == 0,
  663             ("bremfree: buffer %p already marked for delayed removal.", bp));
  664         KASSERT(bp->b_qindex != QUEUE_NONE,
  665             ("bremfree: buffer %p not on a queue.", bp));
  666 
  667         bp->b_flags |= B_REMFREE;
  668         /* Fixup numfreebuffers count.  */
  669         if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)
  670                 atomic_subtract_int(&numfreebuffers, 1);
  671 }
  672 
  673 /*
  674  *      bremfreef:
  675  *
  676  *      Force an immediate removal from a free list.  Used only in nfs when
  677  *      it abuses the b_freelist pointer.
  678  */
  679 void
  680 bremfreef(struct buf *bp)
  681 {
  682         mtx_lock(&bqlock);
  683         bremfreel(bp);
  684         mtx_unlock(&bqlock);
  685 }
  686 
  687 /*
  688  *      bremfreel:
  689  *
  690  *      Removes a buffer from the free list, must be called with the
  691  *      bqlock held.
  692  */
  693 static void
  694 bremfreel(struct buf *bp)
  695 {
  696         CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
  697             bp, bp->b_vp, bp->b_flags);
  698         KASSERT(BUF_REFCNT(bp), ("bremfreel: buffer %p not locked.", bp));
  699         KASSERT(bp->b_qindex != QUEUE_NONE,
  700             ("bremfreel: buffer %p not on a queue.", bp));
  701         mtx_assert(&bqlock, MA_OWNED);
  702 
  703         TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
  704         bp->b_qindex = QUEUE_NONE;
  705         /*
  706          * If this was a delayed bremfree() we only need to remove the buffer
  707          * from the queue and return the stats are already done.
  708          */
  709         if (bp->b_flags & B_REMFREE) {
  710                 bp->b_flags &= ~B_REMFREE;
  711                 return;
  712         }
  713         /*
  714          * Fixup numfreebuffers count.  If the buffer is invalid or not
  715          * delayed-write, the buffer was free and we must decrement
  716          * numfreebuffers.
  717          */
  718         if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)
  719                 atomic_subtract_int(&numfreebuffers, 1);
  720 }
  721 
  722 
  723 /*
  724  * Get a buffer with the specified data.  Look in the cache first.  We
  725  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
  726  * is set, the buffer is valid and we do not have to do anything ( see
  727  * getblk() ).  This is really just a special case of breadn().
  728  */
  729 int
  730 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
  731     struct buf **bpp)
  732 {
  733 
  734         return (breadn(vp, blkno, size, 0, 0, 0, cred, bpp));
  735 }
  736 
  737 /*
  738  * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
  739  * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
  740  * the buffer is valid and we do not have to do anything.
  741  */
  742 void
  743 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
  744     int cnt, struct ucred * cred)
  745 {
  746         struct buf *rabp;
  747         int i;
  748 
  749         for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
  750                 if (inmem(vp, *rablkno))
  751                         continue;
  752                 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
  753 
  754                 if ((rabp->b_flags & B_CACHE) == 0) {
  755                         if (!TD_IS_IDLETHREAD(curthread))
  756                                 curthread->td_ru.ru_inblock++;
  757                         rabp->b_flags |= B_ASYNC;
  758                         rabp->b_flags &= ~B_INVAL;
  759                         rabp->b_ioflags &= ~BIO_ERROR;
  760                         rabp->b_iocmd = BIO_READ;
  761                         if (rabp->b_rcred == NOCRED && cred != NOCRED)
  762                                 rabp->b_rcred = crhold(cred);
  763                         vfs_busy_pages(rabp, 0);
  764                         BUF_KERNPROC(rabp);
  765                         rabp->b_iooffset = dbtob(rabp->b_blkno);
  766                         bstrategy(rabp);
  767                 } else {
  768                         brelse(rabp);
  769                 }
  770         }
  771 }
  772 
  773 /*
  774  * Operates like bread, but also starts asynchronous I/O on
  775  * read-ahead blocks.
  776  */
  777 int
  778 breadn(struct vnode * vp, daddr_t blkno, int size,
  779     daddr_t * rablkno, int *rabsize,
  780     int cnt, struct ucred * cred, struct buf **bpp)
  781 {
  782         struct buf *bp;
  783         int rv = 0, readwait = 0;
  784 
  785         CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
  786         *bpp = bp = getblk(vp, blkno, size, 0, 0, 0);
  787 
  788         /* if not found in cache, do some I/O */
  789         if ((bp->b_flags & B_CACHE) == 0) {
  790                 if (!TD_IS_IDLETHREAD(curthread))
  791                         curthread->td_ru.ru_inblock++;
  792                 bp->b_iocmd = BIO_READ;
  793                 bp->b_flags &= ~B_INVAL;
  794                 bp->b_ioflags &= ~BIO_ERROR;
  795                 if (bp->b_rcred == NOCRED && cred != NOCRED)
  796                         bp->b_rcred = crhold(cred);
  797                 vfs_busy_pages(bp, 0);
  798                 bp->b_iooffset = dbtob(bp->b_blkno);
  799                 bstrategy(bp);
  800                 ++readwait;
  801         }
  802 
  803         breada(vp, rablkno, rabsize, cnt, cred);
  804 
  805         if (readwait) {
  806                 rv = bufwait(bp);
  807         }
  808         return (rv);
  809 }
  810 
  811 /*
  812  * Write, release buffer on completion.  (Done by iodone
  813  * if async).  Do not bother writing anything if the buffer
  814  * is invalid.
  815  *
  816  * Note that we set B_CACHE here, indicating that buffer is
  817  * fully valid and thus cacheable.  This is true even of NFS
  818  * now so we set it generally.  This could be set either here 
  819  * or in biodone() since the I/O is synchronous.  We put it
  820  * here.
  821  */
  822 int
  823 bufwrite(struct buf *bp)
  824 {
  825         int oldflags;
  826         struct vnode *vp;
  827         int vp_md;
  828 
  829         CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
  830         if (bp->b_flags & B_INVAL) {
  831                 brelse(bp);
  832                 return (0);
  833         }
  834 
  835         oldflags = bp->b_flags;
  836 
  837         if (BUF_REFCNT(bp) == 0)
  838                 panic("bufwrite: buffer is not busy???");
  839 
  840         if (bp->b_pin_count > 0)
  841                 bunpin_wait(bp);
  842 
  843         KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
  844             ("FFS background buffer should not get here %p", bp));
  845 
  846         vp = bp->b_vp;
  847         if (vp)
  848                 vp_md = vp->v_vflag & VV_MD;
  849         else
  850                 vp_md = 0;
  851 
  852         /* Mark the buffer clean */
  853         bundirty(bp);
  854 
  855         bp->b_flags &= ~B_DONE;
  856         bp->b_ioflags &= ~BIO_ERROR;
  857         bp->b_flags |= B_CACHE;
  858         bp->b_iocmd = BIO_WRITE;
  859 
  860         bufobj_wref(bp->b_bufobj);
  861         vfs_busy_pages(bp, 1);
  862 
  863         /*
  864          * Normal bwrites pipeline writes
  865          */
  866         bp->b_runningbufspace = bp->b_bufsize;
  867         atomic_add_int(&runningbufspace, bp->b_runningbufspace);
  868 
  869         if (!TD_IS_IDLETHREAD(curthread))
  870                 curthread->td_ru.ru_oublock++;
  871         if (oldflags & B_ASYNC)
  872                 BUF_KERNPROC(bp);
  873         bp->b_iooffset = dbtob(bp->b_blkno);
  874         bstrategy(bp);
  875 
  876         if ((oldflags & B_ASYNC) == 0) {
  877                 int rtval = bufwait(bp);
  878                 brelse(bp);
  879                 return (rtval);
  880         } else {
  881                 /*
  882                  * don't allow the async write to saturate the I/O
  883                  * system.  We will not deadlock here because
  884                  * we are blocking waiting for I/O that is already in-progress
  885                  * to complete. We do not block here if it is the update
  886                  * or syncer daemon trying to clean up as that can lead
  887                  * to deadlock.
  888                  */
  889                 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
  890                         waitrunningbufspace();
  891         }
  892 
  893         return (0);
  894 }
  895 
  896 void
  897 bufbdflush(struct bufobj *bo, struct buf *bp)
  898 {
  899         struct buf *nbp;
  900 
  901         if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
  902                 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
  903                 altbufferflushes++;
  904         } else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
  905                 BO_LOCK(bo);
  906                 /*
  907                  * Try to find a buffer to flush.
  908                  */
  909                 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
  910                         if ((nbp->b_vflags & BV_BKGRDINPROG) ||
  911                             BUF_LOCK(nbp,
  912                                      LK_EXCLUSIVE | LK_NOWAIT, NULL))
  913                                 continue;
  914                         if (bp == nbp)
  915                                 panic("bdwrite: found ourselves");
  916                         BO_UNLOCK(bo);
  917                         /* Don't countdeps with the bo lock held. */
  918                         if (buf_countdeps(nbp, 0)) {
  919                                 BO_LOCK(bo);
  920                                 BUF_UNLOCK(nbp);
  921                                 continue;
  922                         }
  923                         if (nbp->b_flags & B_CLUSTEROK) {
  924                                 vfs_bio_awrite(nbp);
  925                         } else {
  926                                 bremfree(nbp);
  927                                 bawrite(nbp);
  928                         }
  929                         dirtybufferflushes++;
  930                         break;
  931                 }
  932                 if (nbp == NULL)
  933                         BO_UNLOCK(bo);
  934         }
  935 }
  936 
  937 /*
  938  * Delayed write. (Buffer is marked dirty).  Do not bother writing
  939  * anything if the buffer is marked invalid.
  940  *
  941  * Note that since the buffer must be completely valid, we can safely
  942  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
  943  * biodone() in order to prevent getblk from writing the buffer
  944  * out synchronously.
  945  */
  946 void
  947 bdwrite(struct buf *bp)
  948 {
  949         struct thread *td = curthread;
  950         struct vnode *vp;
  951         struct bufobj *bo;
  952 
  953         CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
  954         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
  955         KASSERT(BUF_REFCNT(bp) != 0, ("bdwrite: buffer is not busy"));
  956 
  957         if (bp->b_flags & B_INVAL) {
  958                 brelse(bp);
  959                 return;
  960         }
  961 
  962         /*
  963          * If we have too many dirty buffers, don't create any more.
  964          * If we are wildly over our limit, then force a complete
  965          * cleanup. Otherwise, just keep the situation from getting
  966          * out of control. Note that we have to avoid a recursive
  967          * disaster and not try to clean up after our own cleanup!
  968          */
  969         vp = bp->b_vp;
  970         bo = bp->b_bufobj;
  971         if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
  972                 td->td_pflags |= TDP_INBDFLUSH;
  973                 BO_BDFLUSH(bo, bp);
  974                 td->td_pflags &= ~TDP_INBDFLUSH;
  975         } else
  976                 recursiveflushes++;
  977 
  978         bdirty(bp);
  979         /*
  980          * Set B_CACHE, indicating that the buffer is fully valid.  This is
  981          * true even of NFS now.
  982          */
  983         bp->b_flags |= B_CACHE;
  984 
  985         /*
  986          * This bmap keeps the system from needing to do the bmap later,
  987          * perhaps when the system is attempting to do a sync.  Since it
  988          * is likely that the indirect block -- or whatever other datastructure
  989          * that the filesystem needs is still in memory now, it is a good
  990          * thing to do this.  Note also, that if the pageout daemon is
  991          * requesting a sync -- there might not be enough memory to do
  992          * the bmap then...  So, this is important to do.
  993          */
  994         if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
  995                 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
  996         }
  997 
  998         /*
  999          * Set the *dirty* buffer range based upon the VM system dirty pages.
 1000          */
 1001         vfs_setdirty(bp);
 1002 
 1003         /*
 1004          * We need to do this here to satisfy the vnode_pager and the
 1005          * pageout daemon, so that it thinks that the pages have been
 1006          * "cleaned".  Note that since the pages are in a delayed write
 1007          * buffer -- the VFS layer "will" see that the pages get written
 1008          * out on the next sync, or perhaps the cluster will be completed.
 1009          */
 1010         vfs_clean_pages(bp);
 1011         bqrelse(bp);
 1012 
 1013         /*
 1014          * Wakeup the buffer flushing daemon if we have a lot of dirty
 1015          * buffers (midpoint between our recovery point and our stall
 1016          * point).
 1017          */
 1018         bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
 1019 
 1020         /*
 1021          * note: we cannot initiate I/O from a bdwrite even if we wanted to,
 1022          * due to the softdep code.
 1023          */
 1024 }
 1025 
 1026 /*
 1027  *      bdirty:
 1028  *
 1029  *      Turn buffer into delayed write request.  We must clear BIO_READ and
 1030  *      B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to 
 1031  *      itself to properly update it in the dirty/clean lists.  We mark it
 1032  *      B_DONE to ensure that any asynchronization of the buffer properly
 1033  *      clears B_DONE ( else a panic will occur later ).  
 1034  *
 1035  *      bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
 1036  *      might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
 1037  *      should only be called if the buffer is known-good.
 1038  *
 1039  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 1040  *      count.
 1041  *
 1042  *      The buffer must be on QUEUE_NONE.
 1043  */
 1044 void
 1045 bdirty(struct buf *bp)
 1046 {
 1047 
 1048         CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
 1049             bp, bp->b_vp, bp->b_flags);
 1050         KASSERT(BUF_REFCNT(bp) == 1, ("bdirty: bp %p not locked",bp));
 1051         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
 1052         KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
 1053             ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1054         bp->b_flags &= ~(B_RELBUF);
 1055         bp->b_iocmd = BIO_WRITE;
 1056 
 1057         if ((bp->b_flags & B_DELWRI) == 0) {
 1058                 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
 1059                 reassignbuf(bp);
 1060                 atomic_add_int(&numdirtybuffers, 1);
 1061                 bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
 1062         }
 1063 }
 1064 
 1065 /*
 1066  *      bundirty:
 1067  *
 1068  *      Clear B_DELWRI for buffer.
 1069  *
 1070  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 1071  *      count.
 1072  *      
 1073  *      The buffer must be on QUEUE_NONE.
 1074  */
 1075 
 1076 void
 1077 bundirty(struct buf *bp)
 1078 {
 1079 
 1080         CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 1081         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
 1082         KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
 1083             ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1084         KASSERT(BUF_REFCNT(bp) == 1, ("bundirty: bp %p not locked",bp));
 1085 
 1086         if (bp->b_flags & B_DELWRI) {
 1087                 bp->b_flags &= ~B_DELWRI;
 1088                 reassignbuf(bp);
 1089                 atomic_subtract_int(&numdirtybuffers, 1);
 1090                 numdirtywakeup(lodirtybuffers);
 1091         }
 1092         /*
 1093          * Since it is now being written, we can clear its deferred write flag.
 1094          */
 1095         bp->b_flags &= ~B_DEFERRED;
 1096 }
 1097 
 1098 /*
 1099  *      bawrite:
 1100  *
 1101  *      Asynchronous write.  Start output on a buffer, but do not wait for
 1102  *      it to complete.  The buffer is released when the output completes.
 1103  *
 1104  *      bwrite() ( or the VOP routine anyway ) is responsible for handling 
 1105  *      B_INVAL buffers.  Not us.
 1106  */
 1107 void
 1108 bawrite(struct buf *bp)
 1109 {
 1110 
 1111         bp->b_flags |= B_ASYNC;
 1112         (void) bwrite(bp);
 1113 }
 1114 
 1115 /*
 1116  *      bwillwrite:
 1117  *
 1118  *      Called prior to the locking of any vnodes when we are expecting to
 1119  *      write.  We do not want to starve the buffer cache with too many
 1120  *      dirty buffers so we block here.  By blocking prior to the locking
 1121  *      of any vnodes we attempt to avoid the situation where a locked vnode
 1122  *      prevents the various system daemons from flushing related buffers.
 1123  */
 1124 
 1125 void
 1126 bwillwrite(void)
 1127 {
 1128 
 1129         if (numdirtybuffers >= hidirtybuffers) {
 1130                 mtx_lock(&nblock);
 1131                 while (numdirtybuffers >= hidirtybuffers) {
 1132                         bd_wakeup(1);
 1133                         needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
 1134                         msleep(&needsbuffer, &nblock,
 1135                             (PRIBIO + 4), "flswai", 0);
 1136                 }
 1137                 mtx_unlock(&nblock);
 1138         }
 1139 }
 1140 
 1141 /*
 1142  * Return true if we have too many dirty buffers.
 1143  */
 1144 int
 1145 buf_dirty_count_severe(void)
 1146 {
 1147 
 1148         return(numdirtybuffers >= hidirtybuffers);
 1149 }
 1150 
 1151 /*
 1152  *      brelse:
 1153  *
 1154  *      Release a busy buffer and, if requested, free its resources.  The
 1155  *      buffer will be stashed in the appropriate bufqueue[] allowing it
 1156  *      to be accessed later as a cache entity or reused for other purposes.
 1157  */
 1158 void
 1159 brelse(struct buf *bp)
 1160 {
 1161         CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
 1162             bp, bp->b_vp, bp->b_flags);
 1163         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
 1164             ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1165 
 1166         if (bp->b_flags & B_MANAGED) {
 1167                 bqrelse(bp);
 1168                 return;
 1169         }
 1170 
 1171         if (bp->b_iocmd == BIO_WRITE &&
 1172             (bp->b_ioflags & BIO_ERROR) &&
 1173             bp->b_error != ENXIO &&
 1174             !(bp->b_flags & B_INVAL)) {
 1175                 /*
 1176                  * Failed write, redirty.  Must clear BIO_ERROR to prevent
 1177                  * pages from being scrapped.  If B_INVAL is set then
 1178                  * this case is not run and the next case is run to 
 1179                  * destroy the buffer.  B_INVAL can occur if the buffer
 1180                  * is outside the range supported by the underlying device.
 1181                  * If the error is that the device went away (ENXIO), we
 1182                  * shouldn't redirty the buffer either, but discard the
 1183                  * data too.
 1184                  */
 1185                 bp->b_ioflags &= ~BIO_ERROR;
 1186                 bdirty(bp);
 1187         } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
 1188             (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
 1189                 /*
 1190                  * Either a failed I/O or we were asked to free or not
 1191                  * cache the buffer.
 1192                  */
 1193                 bp->b_flags |= B_INVAL;
 1194                 if (!LIST_EMPTY(&bp->b_dep))
 1195                         buf_deallocate(bp);
 1196                 if (bp->b_flags & B_DELWRI) {
 1197                         atomic_subtract_int(&numdirtybuffers, 1);
 1198                         numdirtywakeup(lodirtybuffers);
 1199                 }
 1200                 bp->b_flags &= ~(B_DELWRI | B_CACHE);
 1201                 if ((bp->b_flags & B_VMIO) == 0) {
 1202                         if (bp->b_bufsize)
 1203                                 allocbuf(bp, 0);
 1204                         if (bp->b_vp)
 1205                                 brelvp(bp);
 1206                 }
 1207         }
 1208 
 1209         /*
 1210          * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release() 
 1211          * is called with B_DELWRI set, the underlying pages may wind up
 1212          * getting freed causing a previous write (bdwrite()) to get 'lost'
 1213          * because pages associated with a B_DELWRI bp are marked clean.
 1214          * 
 1215          * We still allow the B_INVAL case to call vfs_vmio_release(), even
 1216          * if B_DELWRI is set.
 1217          *
 1218          * If B_DELWRI is not set we may have to set B_RELBUF if we are low
 1219          * on pages to return pages to the VM page queues.
 1220          */
 1221         if (bp->b_flags & B_DELWRI)
 1222                 bp->b_flags &= ~B_RELBUF;
 1223         else if (vm_page_count_severe()) {
 1224                 /*
 1225                  * XXX This lock may not be necessary since BKGRDINPROG
 1226                  * cannot be set while we hold the buf lock, it can only be
 1227                  * cleared if it is already pending.
 1228                  */
 1229                 if (bp->b_vp) {
 1230                         BO_LOCK(bp->b_bufobj);
 1231                         if (!(bp->b_vflags & BV_BKGRDINPROG))
 1232                                 bp->b_flags |= B_RELBUF;
 1233                         BO_UNLOCK(bp->b_bufobj);
 1234                 } else
 1235                         bp->b_flags |= B_RELBUF;
 1236         }
 1237 
 1238         /*
 1239          * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
 1240          * constituted, not even NFS buffers now.  Two flags effect this.  If
 1241          * B_INVAL, the struct buf is invalidated but the VM object is kept
 1242          * around ( i.e. so it is trivial to reconstitute the buffer later ).
 1243          *
 1244          * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
 1245          * invalidated.  BIO_ERROR cannot be set for a failed write unless the
 1246          * buffer is also B_INVAL because it hits the re-dirtying code above.
 1247          *
 1248          * Normally we can do this whether a buffer is B_DELWRI or not.  If
 1249          * the buffer is an NFS buffer, it is tracking piecemeal writes or
 1250          * the commit state and we cannot afford to lose the buffer. If the
 1251          * buffer has a background write in progress, we need to keep it
 1252          * around to prevent it from being reconstituted and starting a second
 1253          * background write.
 1254          */
 1255         if ((bp->b_flags & B_VMIO)
 1256             && !(bp->b_vp->v_mount != NULL &&
 1257                  (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
 1258                  !vn_isdisk(bp->b_vp, NULL) &&
 1259                  (bp->b_flags & B_DELWRI))
 1260             ) {
 1261 
 1262                 int i, j, resid;
 1263                 vm_page_t m;
 1264                 off_t foff;
 1265                 vm_pindex_t poff;
 1266                 vm_object_t obj;
 1267 
 1268                 obj = bp->b_bufobj->bo_object;
 1269 
 1270                 /*
 1271                  * Get the base offset and length of the buffer.  Note that 
 1272                  * in the VMIO case if the buffer block size is not
 1273                  * page-aligned then b_data pointer may not be page-aligned.
 1274                  * But our b_pages[] array *IS* page aligned.
 1275                  *
 1276                  * block sizes less then DEV_BSIZE (usually 512) are not 
 1277                  * supported due to the page granularity bits (m->valid,
 1278                  * m->dirty, etc...). 
 1279                  *
 1280                  * See man buf(9) for more information
 1281                  */
 1282                 resid = bp->b_bufsize;
 1283                 foff = bp->b_offset;
 1284                 VM_OBJECT_LOCK(obj);
 1285                 for (i = 0; i < bp->b_npages; i++) {
 1286                         int had_bogus = 0;
 1287 
 1288                         m = bp->b_pages[i];
 1289 
 1290                         /*
 1291                          * If we hit a bogus page, fixup *all* the bogus pages
 1292                          * now.
 1293                          */
 1294                         if (m == bogus_page) {
 1295                                 poff = OFF_TO_IDX(bp->b_offset);
 1296                                 had_bogus = 1;
 1297 
 1298                                 for (j = i; j < bp->b_npages; j++) {
 1299                                         vm_page_t mtmp;
 1300                                         mtmp = bp->b_pages[j];
 1301                                         if (mtmp == bogus_page) {
 1302                                                 mtmp = vm_page_lookup(obj, poff + j);
 1303                                                 if (!mtmp) {
 1304                                                         panic("brelse: page missing\n");
 1305                                                 }
 1306                                                 bp->b_pages[j] = mtmp;
 1307                                         }
 1308                                 }
 1309 
 1310                                 if ((bp->b_flags & B_INVAL) == 0) {
 1311                                         pmap_qenter(
 1312                                             trunc_page((vm_offset_t)bp->b_data),
 1313                                             bp->b_pages, bp->b_npages);
 1314                                 }
 1315                                 m = bp->b_pages[i];
 1316                         }
 1317                         if ((bp->b_flags & B_NOCACHE) ||
 1318                             (bp->b_ioflags & BIO_ERROR)) {
 1319                                 int poffset = foff & PAGE_MASK;
 1320                                 int presid = resid > (PAGE_SIZE - poffset) ?
 1321                                         (PAGE_SIZE - poffset) : resid;
 1322 
 1323                                 KASSERT(presid >= 0, ("brelse: extra page"));
 1324                                 vm_page_lock_queues();
 1325                                 vm_page_set_invalid(m, poffset, presid);
 1326                                 vm_page_unlock_queues();
 1327                                 if (had_bogus)
 1328                                         printf("avoided corruption bug in bogus_page/brelse code\n");
 1329                         }
 1330                         resid -= PAGE_SIZE - (foff & PAGE_MASK);
 1331                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 1332                 }
 1333                 VM_OBJECT_UNLOCK(obj);
 1334                 if (bp->b_flags & (B_INVAL | B_RELBUF))
 1335                         vfs_vmio_release(bp);
 1336 
 1337         } else if (bp->b_flags & B_VMIO) {
 1338 
 1339                 if (bp->b_flags & (B_INVAL | B_RELBUF)) {
 1340                         vfs_vmio_release(bp);
 1341                 }
 1342 
 1343         } else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) {
 1344                 if (bp->b_bufsize != 0)
 1345                         allocbuf(bp, 0);
 1346                 if (bp->b_vp != NULL)
 1347                         brelvp(bp);
 1348         }
 1349                         
 1350         if (BUF_REFCNT(bp) > 1) {
 1351                 /* do not release to free list */
 1352                 BUF_UNLOCK(bp);
 1353                 return;
 1354         }
 1355 
 1356         /* enqueue */
 1357         mtx_lock(&bqlock);
 1358         /* Handle delayed bremfree() processing. */
 1359         if (bp->b_flags & B_REMFREE)
 1360                 bremfreel(bp);
 1361         if (bp->b_qindex != QUEUE_NONE)
 1362                 panic("brelse: free buffer onto another queue???");
 1363 
 1364         /* buffers with no memory */
 1365         if (bp->b_bufsize == 0) {
 1366                 bp->b_flags |= B_INVAL;
 1367                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1368                 if (bp->b_vflags & BV_BKGRDINPROG)
 1369                         panic("losing buffer 1");
 1370                 if (bp->b_kvasize) {
 1371                         bp->b_qindex = QUEUE_EMPTYKVA;
 1372                 } else {
 1373                         bp->b_qindex = QUEUE_EMPTY;
 1374                 }
 1375                 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
 1376         /* buffers with junk contents */
 1377         } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
 1378             (bp->b_ioflags & BIO_ERROR)) {
 1379                 bp->b_flags |= B_INVAL;
 1380                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1381                 if (bp->b_vflags & BV_BKGRDINPROG)
 1382                         panic("losing buffer 2");
 1383                 bp->b_qindex = QUEUE_CLEAN;
 1384                 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
 1385         /* remaining buffers */
 1386         } else {
 1387                 if ((bp->b_flags & (B_DELWRI|B_NEEDSGIANT)) ==
 1388                     (B_DELWRI|B_NEEDSGIANT))
 1389                         bp->b_qindex = QUEUE_DIRTY_GIANT;
 1390                 else if (bp->b_flags & B_DELWRI)
 1391                         bp->b_qindex = QUEUE_DIRTY;
 1392                 else
 1393                         bp->b_qindex = QUEUE_CLEAN;
 1394                 if (bp->b_flags & B_AGE)
 1395                         TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
 1396                 else
 1397                         TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
 1398         }
 1399         mtx_unlock(&bqlock);
 1400 
 1401         /*
 1402          * If B_INVAL and B_DELWRI is set, clear B_DELWRI.  We have already
 1403          * placed the buffer on the correct queue.  We must also disassociate
 1404          * the device and vnode for a B_INVAL buffer so gbincore() doesn't
 1405          * find it.
 1406          */
 1407         if (bp->b_flags & B_INVAL) {
 1408                 if (bp->b_flags & B_DELWRI)
 1409                         bundirty(bp);
 1410                 if (bp->b_vp)
 1411                         brelvp(bp);
 1412         }
 1413 
 1414         /*
 1415          * Fixup numfreebuffers count.  The bp is on an appropriate queue
 1416          * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
 1417          * We've already handled the B_INVAL case ( B_DELWRI will be clear
 1418          * if B_INVAL is set ).
 1419          */
 1420 
 1421         if (!(bp->b_flags & B_DELWRI))
 1422                 bufcountwakeup();
 1423 
 1424         /*
 1425          * Something we can maybe free or reuse
 1426          */
 1427         if (bp->b_bufsize || bp->b_kvasize)
 1428                 bufspacewakeup();
 1429 
 1430         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
 1431         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 1432                 panic("brelse: not dirty");
 1433         /* unlock */
 1434         BUF_UNLOCK(bp);
 1435 }
 1436 
 1437 /*
 1438  * Release a buffer back to the appropriate queue but do not try to free
 1439  * it.  The buffer is expected to be used again soon.
 1440  *
 1441  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
 1442  * biodone() to requeue an async I/O on completion.  It is also used when
 1443  * known good buffers need to be requeued but we think we may need the data
 1444  * again soon.
 1445  *
 1446  * XXX we should be able to leave the B_RELBUF hint set on completion.
 1447  */
 1448 void
 1449 bqrelse(struct buf *bp)
 1450 {
 1451         CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 1452         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
 1453             ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1454 
 1455         if (BUF_REFCNT(bp) > 1) {
 1456                 /* do not release to free list */
 1457                 BUF_UNLOCK(bp);
 1458                 return;
 1459         }
 1460 
 1461         if (bp->b_flags & B_MANAGED) {
 1462                 if (bp->b_flags & B_REMFREE) {
 1463                         mtx_lock(&bqlock);
 1464                         bremfreel(bp);
 1465                         mtx_unlock(&bqlock);
 1466                 }
 1467                 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
 1468                 BUF_UNLOCK(bp);
 1469                 return;
 1470         }
 1471 
 1472         mtx_lock(&bqlock);
 1473         /* Handle delayed bremfree() processing. */
 1474         if (bp->b_flags & B_REMFREE)
 1475                 bremfreel(bp);
 1476         if (bp->b_qindex != QUEUE_NONE)
 1477                 panic("bqrelse: free buffer onto another queue???");
 1478         /* buffers with stale but valid contents */
 1479         if (bp->b_flags & B_DELWRI) {
 1480                 if (bp->b_flags & B_NEEDSGIANT)
 1481                         bp->b_qindex = QUEUE_DIRTY_GIANT;
 1482                 else
 1483                         bp->b_qindex = QUEUE_DIRTY;
 1484                 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
 1485         } else {
 1486                 /*
 1487                  * XXX This lock may not be necessary since BKGRDINPROG
 1488                  * cannot be set while we hold the buf lock, it can only be
 1489                  * cleared if it is already pending.
 1490                  */
 1491                 BO_LOCK(bp->b_bufobj);
 1492                 if (!vm_page_count_severe() || bp->b_vflags & BV_BKGRDINPROG) {
 1493                         BO_UNLOCK(bp->b_bufobj);
 1494                         bp->b_qindex = QUEUE_CLEAN;
 1495                         TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp,
 1496                             b_freelist);
 1497                 } else {
 1498                         /*
 1499                          * We are too low on memory, we have to try to free
 1500                          * the buffer (most importantly: the wired pages
 1501                          * making up its backing store) *now*.
 1502                          */
 1503                         BO_UNLOCK(bp->b_bufobj);
 1504                         mtx_unlock(&bqlock);
 1505                         brelse(bp);
 1506                         return;
 1507                 }
 1508         }
 1509         mtx_unlock(&bqlock);
 1510 
 1511         if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
 1512                 bufcountwakeup();
 1513 
 1514         /*
 1515          * Something we can maybe free or reuse.
 1516          */
 1517         if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
 1518                 bufspacewakeup();
 1519 
 1520         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
 1521         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 1522                 panic("bqrelse: not dirty");
 1523         /* unlock */
 1524         BUF_UNLOCK(bp);
 1525 }
 1526 
 1527 /* Give pages used by the bp back to the VM system (where possible) */
 1528 static void
 1529 vfs_vmio_release(struct buf *bp)
 1530 {
 1531         int i;
 1532         vm_page_t m;
 1533 
 1534         VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
 1535         vm_page_lock_queues();
 1536         for (i = 0; i < bp->b_npages; i++) {
 1537                 m = bp->b_pages[i];
 1538                 bp->b_pages[i] = NULL;
 1539                 /*
 1540                  * In order to keep page LRU ordering consistent, put
 1541                  * everything on the inactive queue.
 1542                  */
 1543                 vm_page_unwire(m, 0);
 1544                 /*
 1545                  * We don't mess with busy pages, it is
 1546                  * the responsibility of the process that
 1547                  * busied the pages to deal with them.
 1548                  */
 1549                 if ((m->oflags & VPO_BUSY) || (m->busy != 0))
 1550                         continue;
 1551                         
 1552                 if (m->wire_count == 0) {
 1553                         /*
 1554                          * Might as well free the page if we can and it has
 1555                          * no valid data.  We also free the page if the
 1556                          * buffer was used for direct I/O
 1557                          */
 1558                         if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
 1559                             m->hold_count == 0) {
 1560                                 vm_page_free(m);
 1561                         } else if (bp->b_flags & B_DIRECT) {
 1562                                 vm_page_try_to_free(m);
 1563                         } else if (vm_page_count_severe()) {
 1564                                 vm_page_try_to_cache(m);
 1565                         }
 1566                 }
 1567         }
 1568         vm_page_unlock_queues();
 1569         VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 1570         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
 1571         
 1572         if (bp->b_bufsize) {
 1573                 bufspacewakeup();
 1574                 bp->b_bufsize = 0;
 1575         }
 1576         bp->b_npages = 0;
 1577         bp->b_flags &= ~B_VMIO;
 1578         if (bp->b_vp)
 1579                 brelvp(bp);
 1580 }
 1581 
 1582 /*
 1583  * Check to see if a block at a particular lbn is available for a clustered
 1584  * write.
 1585  */
 1586 static int
 1587 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
 1588 {
 1589         struct buf *bpa;
 1590         int match;
 1591 
 1592         match = 0;
 1593 
 1594         /* If the buf isn't in core skip it */
 1595         if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
 1596                 return (0);
 1597 
 1598         /* If the buf is busy we don't want to wait for it */
 1599         if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 1600                 return (0);
 1601 
 1602         /* Only cluster with valid clusterable delayed write buffers */
 1603         if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
 1604             (B_DELWRI | B_CLUSTEROK))
 1605                 goto done;
 1606 
 1607         if (bpa->b_bufsize != size)
 1608                 goto done;
 1609 
 1610         /*
 1611          * Check to see if it is in the expected place on disk and that the
 1612          * block has been mapped.
 1613          */
 1614         if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
 1615                 match = 1;
 1616 done:
 1617         BUF_UNLOCK(bpa);
 1618         return (match);
 1619 }
 1620 
 1621 /*
 1622  *      vfs_bio_awrite:
 1623  *
 1624  *      Implement clustered async writes for clearing out B_DELWRI buffers.
 1625  *      This is much better then the old way of writing only one buffer at
 1626  *      a time.  Note that we may not be presented with the buffers in the 
 1627  *      correct order, so we search for the cluster in both directions.
 1628  */
 1629 int
 1630 vfs_bio_awrite(struct buf *bp)
 1631 {
 1632         int i;
 1633         int j;
 1634         daddr_t lblkno = bp->b_lblkno;
 1635         struct vnode *vp = bp->b_vp;
 1636         int ncl;
 1637         int nwritten;
 1638         int size;
 1639         int maxcl;
 1640 
 1641         /*
 1642          * right now we support clustered writing only to regular files.  If
 1643          * we find a clusterable block we could be in the middle of a cluster
 1644          * rather then at the beginning.
 1645          */
 1646         if ((vp->v_type == VREG) && 
 1647             (vp->v_mount != 0) && /* Only on nodes that have the size info */
 1648             (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
 1649 
 1650                 size = vp->v_mount->mnt_stat.f_iosize;
 1651                 maxcl = MAXPHYS / size;
 1652 
 1653                 VI_LOCK(vp);
 1654                 for (i = 1; i < maxcl; i++)
 1655                         if (vfs_bio_clcheck(vp, size, lblkno + i,
 1656                             bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
 1657                                 break;
 1658 
 1659                 for (j = 1; i + j <= maxcl && j <= lblkno; j++) 
 1660                         if (vfs_bio_clcheck(vp, size, lblkno - j,
 1661                             bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
 1662                                 break;
 1663 
 1664                 VI_UNLOCK(vp);
 1665                 --j;
 1666                 ncl = i + j;
 1667                 /*
 1668                  * this is a possible cluster write
 1669                  */
 1670                 if (ncl != 1) {
 1671                         BUF_UNLOCK(bp);
 1672                         nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
 1673                         return nwritten;
 1674                 }
 1675         }
 1676         bremfree(bp);
 1677         bp->b_flags |= B_ASYNC;
 1678         /*
 1679          * default (old) behavior, writing out only one block
 1680          *
 1681          * XXX returns b_bufsize instead of b_bcount for nwritten?
 1682          */
 1683         nwritten = bp->b_bufsize;
 1684         (void) bwrite(bp);
 1685 
 1686         return nwritten;
 1687 }
 1688 
 1689 /*
 1690  *      getnewbuf:
 1691  *
 1692  *      Find and initialize a new buffer header, freeing up existing buffers 
 1693  *      in the bufqueues as necessary.  The new buffer is returned locked.
 1694  *
 1695  *      Important:  B_INVAL is not set.  If the caller wishes to throw the
 1696  *      buffer away, the caller must set B_INVAL prior to calling brelse().
 1697  *
 1698  *      We block if:
 1699  *              We have insufficient buffer headers
 1700  *              We have insufficient buffer space
 1701  *              buffer_map is too fragmented ( space reservation fails )
 1702  *              If we have to flush dirty buffers ( but we try to avoid this )
 1703  *
 1704  *      To avoid VFS layer recursion we do not flush dirty buffers ourselves.
 1705  *      Instead we ask the buf daemon to do it for us.  We attempt to
 1706  *      avoid piecemeal wakeups of the pageout daemon.
 1707  */
 1708 
 1709 static struct buf *
 1710 getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
 1711 {
 1712         struct buf *bp;
 1713         struct buf *nbp;
 1714         int defrag = 0;
 1715         int nqindex;
 1716         static int flushingbufs;
 1717 
 1718         /*
 1719          * We can't afford to block since we might be holding a vnode lock,
 1720          * which may prevent system daemons from running.  We deal with
 1721          * low-memory situations by proactively returning memory and running
 1722          * async I/O rather then sync I/O.
 1723          */
 1724 
 1725         atomic_add_int(&getnewbufcalls, 1);
 1726         atomic_subtract_int(&getnewbufrestarts, 1);
 1727 restart:
 1728         atomic_add_int(&getnewbufrestarts, 1);
 1729 
 1730         /*
 1731          * Setup for scan.  If we do not have enough free buffers,
 1732          * we setup a degenerate case that immediately fails.  Note
 1733          * that if we are specially marked process, we are allowed to
 1734          * dip into our reserves.
 1735          *
 1736          * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
 1737          *
 1738          * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
 1739          * However, there are a number of cases (defragging, reusing, ...)
 1740          * where we cannot backup.
 1741          */
 1742         mtx_lock(&bqlock);
 1743         nqindex = QUEUE_EMPTYKVA;
 1744         nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
 1745 
 1746         if (nbp == NULL) {
 1747                 /*
 1748                  * If no EMPTYKVA buffers and we are either
 1749                  * defragging or reusing, locate a CLEAN buffer
 1750                  * to free or reuse.  If bufspace useage is low
 1751                  * skip this step so we can allocate a new buffer.
 1752                  */
 1753                 if (defrag || bufspace >= lobufspace) {
 1754                         nqindex = QUEUE_CLEAN;
 1755                         nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
 1756                 }
 1757 
 1758                 /*
 1759                  * If we could not find or were not allowed to reuse a
 1760                  * CLEAN buffer, check to see if it is ok to use an EMPTY
 1761                  * buffer.  We can only use an EMPTY buffer if allocating
 1762                  * its KVA would not otherwise run us out of buffer space.
 1763                  */
 1764                 if (nbp == NULL && defrag == 0 &&
 1765                     bufspace + maxsize < hibufspace) {
 1766                         nqindex = QUEUE_EMPTY;
 1767                         nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
 1768                 }
 1769         }
 1770 
 1771         /*
 1772          * Run scan, possibly freeing data and/or kva mappings on the fly
 1773          * depending.
 1774          */
 1775 
 1776         while ((bp = nbp) != NULL) {
 1777                 int qindex = nqindex;
 1778 
 1779                 /*
 1780                  * Calculate next bp ( we can only use it if we do not block
 1781                  * or do other fancy things ).
 1782                  */
 1783                 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
 1784                         switch(qindex) {
 1785                         case QUEUE_EMPTY:
 1786                                 nqindex = QUEUE_EMPTYKVA;
 1787                                 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
 1788                                         break;
 1789                                 /* FALLTHROUGH */
 1790                         case QUEUE_EMPTYKVA:
 1791                                 nqindex = QUEUE_CLEAN;
 1792                                 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
 1793                                         break;
 1794                                 /* FALLTHROUGH */
 1795                         case QUEUE_CLEAN:
 1796                                 /*
 1797                                  * nbp is NULL. 
 1798                                  */
 1799                                 break;
 1800                         }
 1801                 }
 1802                 /*
 1803                  * If we are defragging then we need a buffer with 
 1804                  * b_kvasize != 0.  XXX this situation should no longer
 1805                  * occur, if defrag is non-zero the buffer's b_kvasize
 1806                  * should also be non-zero at this point.  XXX
 1807                  */
 1808                 if (defrag && bp->b_kvasize == 0) {
 1809                         printf("Warning: defrag empty buffer %p\n", bp);
 1810                         continue;
 1811                 }
 1812 
 1813                 /*
 1814                  * Start freeing the bp.  This is somewhat involved.  nbp
 1815                  * remains valid only for QUEUE_EMPTY[KVA] bp's.
 1816                  */
 1817                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 1818                         continue;
 1819                 if (bp->b_vp) {
 1820                         BO_LOCK(bp->b_bufobj);
 1821                         if (bp->b_vflags & BV_BKGRDINPROG) {
 1822                                 BO_UNLOCK(bp->b_bufobj);
 1823                                 BUF_UNLOCK(bp);
 1824                                 continue;
 1825                         }
 1826                         BO_UNLOCK(bp->b_bufobj);
 1827                 }
 1828                 CTR6(KTR_BUF,
 1829                     "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d "
 1830                     "queue %d (recycling)", bp, bp->b_vp, bp->b_flags,
 1831                     bp->b_kvasize, bp->b_bufsize, qindex);
 1832 
 1833                 /*
 1834                  * Sanity Checks
 1835                  */
 1836                 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
 1837 
 1838                 /*
 1839                  * Note: we no longer distinguish between VMIO and non-VMIO
 1840                  * buffers.
 1841                  */
 1842 
 1843                 KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
 1844 
 1845                 bremfreel(bp);
 1846                 mtx_unlock(&bqlock);
 1847 
 1848                 if (qindex == QUEUE_CLEAN) {
 1849                         if (bp->b_flags & B_VMIO) {
 1850                                 bp->b_flags &= ~B_ASYNC;
 1851                                 vfs_vmio_release(bp);
 1852                         }
 1853                         if (bp->b_vp)
 1854                                 brelvp(bp);
 1855                 }
 1856 
 1857                 /*
 1858                  * NOTE:  nbp is now entirely invalid.  We can only restart
 1859                  * the scan from this point on.
 1860                  *
 1861                  * Get the rest of the buffer freed up.  b_kva* is still
 1862                  * valid after this operation.
 1863                  */
 1864 
 1865                 if (bp->b_rcred != NOCRED) {
 1866                         crfree(bp->b_rcred);
 1867                         bp->b_rcred = NOCRED;
 1868                 }
 1869                 if (bp->b_wcred != NOCRED) {
 1870                         crfree(bp->b_wcred);
 1871                         bp->b_wcred = NOCRED;
 1872                 }
 1873                 if (!LIST_EMPTY(&bp->b_dep))
 1874                         buf_deallocate(bp);
 1875                 if (bp->b_vflags & BV_BKGRDINPROG)
 1876                         panic("losing buffer 3");
 1877                 KASSERT(bp->b_vp == NULL,
 1878                     ("bp: %p still has vnode %p.  qindex: %d",
 1879                     bp, bp->b_vp, qindex));
 1880                 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
 1881                    ("bp: %p still on a buffer list. xflags %X",
 1882                     bp, bp->b_xflags));
 1883 
 1884                 if (bp->b_bufsize)
 1885                         allocbuf(bp, 0);
 1886 
 1887                 bp->b_flags = 0;
 1888                 bp->b_ioflags = 0;
 1889                 bp->b_xflags = 0;
 1890                 bp->b_vflags = 0;
 1891                 bp->b_vp = NULL;
 1892                 bp->b_blkno = bp->b_lblkno = 0;
 1893                 bp->b_offset = NOOFFSET;
 1894                 bp->b_iodone = 0;
 1895                 bp->b_error = 0;
 1896                 bp->b_resid = 0;
 1897                 bp->b_bcount = 0;
 1898                 bp->b_npages = 0;
 1899                 bp->b_dirtyoff = bp->b_dirtyend = 0;
 1900                 bp->b_bufobj = NULL;
 1901                 bp->b_pin_count = 0;
 1902                 bp->b_fsprivate1 = NULL;
 1903                 bp->b_fsprivate2 = NULL;
 1904                 bp->b_fsprivate3 = NULL;
 1905 
 1906                 LIST_INIT(&bp->b_dep);
 1907 
 1908                 /*
 1909                  * If we are defragging then free the buffer.
 1910                  */
 1911                 if (defrag) {
 1912                         bp->b_flags |= B_INVAL;
 1913                         bfreekva(bp);
 1914                         brelse(bp);
 1915                         defrag = 0;
 1916                         goto restart;
 1917                 }
 1918 
 1919                 /*
 1920                  * Notify any waiters for the buffer lock about
 1921                  * identity change by freeing the buffer.
 1922                  */
 1923                 if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp) > 0) {
 1924                         bp->b_flags |= B_INVAL;
 1925                         bfreekva(bp);
 1926                         brelse(bp);
 1927                         goto restart;
 1928                 }
 1929 
 1930                 /*
 1931                  * If we are overcomitted then recover the buffer and its
 1932                  * KVM space.  This occurs in rare situations when multiple
 1933                  * processes are blocked in getnewbuf() or allocbuf().
 1934                  */
 1935                 if (bufspace >= hibufspace)
 1936                         flushingbufs = 1;
 1937                 if (flushingbufs && bp->b_kvasize != 0) {
 1938                         bp->b_flags |= B_INVAL;
 1939                         bfreekva(bp);
 1940                         brelse(bp);
 1941                         goto restart;
 1942                 }
 1943                 if (bufspace < lobufspace)
 1944                         flushingbufs = 0;
 1945                 break;
 1946         }
 1947 
 1948         /*
 1949          * If we exhausted our list, sleep as appropriate.  We may have to
 1950          * wakeup various daemons and write out some dirty buffers.
 1951          *
 1952          * Generally we are sleeping due to insufficient buffer space.
 1953          */
 1954 
 1955         if (bp == NULL) {
 1956                 int flags;
 1957                 char *waitmsg;
 1958 
 1959                 if (defrag) {
 1960                         flags = VFS_BIO_NEED_BUFSPACE;
 1961                         waitmsg = "nbufkv";
 1962                 } else if (bufspace >= hibufspace) {
 1963                         waitmsg = "nbufbs";
 1964                         flags = VFS_BIO_NEED_BUFSPACE;
 1965                 } else {
 1966                         waitmsg = "newbuf";
 1967                         flags = VFS_BIO_NEED_ANY;
 1968                 }
 1969                 mtx_lock(&nblock);
 1970                 needsbuffer |= flags;
 1971                 mtx_unlock(&nblock);
 1972                 mtx_unlock(&bqlock);
 1973 
 1974                 bd_speedup();   /* heeeelp */
 1975 
 1976                 mtx_lock(&nblock);
 1977                 while (needsbuffer & flags) {
 1978                         if (msleep(&needsbuffer, &nblock,
 1979                             (PRIBIO + 4) | slpflag, waitmsg, slptimeo)) {
 1980                                 mtx_unlock(&nblock);
 1981                                 return (NULL);
 1982                         }
 1983                 }
 1984                 mtx_unlock(&nblock);
 1985         } else {
 1986                 /*
 1987                  * We finally have a valid bp.  We aren't quite out of the
 1988                  * woods, we still have to reserve kva space.  In order
 1989                  * to keep fragmentation sane we only allocate kva in
 1990                  * BKVASIZE chunks.
 1991                  */
 1992                 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
 1993 
 1994                 if (maxsize != bp->b_kvasize) {
 1995                         vm_offset_t addr = 0;
 1996 
 1997                         bfreekva(bp);
 1998 
 1999                         vm_map_lock(buffer_map);
 2000                         if (vm_map_findspace(buffer_map,
 2001                                 vm_map_min(buffer_map), maxsize, &addr)) {
 2002                                 /*
 2003                                  * Uh oh.  Buffer map is to fragmented.  We
 2004                                  * must defragment the map.
 2005                                  */
 2006                                 atomic_add_int(&bufdefragcnt, 1);
 2007                                 vm_map_unlock(buffer_map);
 2008                                 defrag = 1;
 2009                                 bp->b_flags |= B_INVAL;
 2010                                 brelse(bp);
 2011                                 goto restart;
 2012                         }
 2013                         if (addr) {
 2014                                 vm_map_insert(buffer_map, NULL, 0,
 2015                                         addr, addr + maxsize,
 2016                                         VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
 2017 
 2018                                 bp->b_kvabase = (caddr_t) addr;
 2019                                 bp->b_kvasize = maxsize;
 2020                                 atomic_add_int(&bufspace, bp->b_kvasize);
 2021                                 atomic_add_int(&bufreusecnt, 1);
 2022                         }
 2023                         vm_map_unlock(buffer_map);
 2024                 }
 2025                 bp->b_saveaddr = bp->b_kvabase;
 2026                 bp->b_data = bp->b_saveaddr;
 2027         }
 2028         return(bp);
 2029 }
 2030 
 2031 /*
 2032  *      buf_daemon:
 2033  *
 2034  *      buffer flushing daemon.  Buffers are normally flushed by the
 2035  *      update daemon but if it cannot keep up this process starts to
 2036  *      take the load in an attempt to prevent getnewbuf() from blocking.
 2037  */
 2038 
 2039 static struct kproc_desc buf_kp = {
 2040         "bufdaemon",
 2041         buf_daemon,
 2042         &bufdaemonproc
 2043 };
 2044 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
 2045 
 2046 static void
 2047 buf_daemon()
 2048 {
 2049 
 2050         /*
 2051          * This process needs to be suspended prior to shutdown sync.
 2052          */
 2053         EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
 2054             SHUTDOWN_PRI_LAST);
 2055 
 2056         /*
 2057          * This process is allowed to take the buffer cache to the limit
 2058          */
 2059         curthread->td_pflags |= TDP_NORUNNINGBUF;
 2060         mtx_lock(&bdlock);
 2061         for (;;) {
 2062                 bd_request = 0;
 2063                 mtx_unlock(&bdlock);
 2064 
 2065                 kthread_suspend_check(bufdaemonproc);
 2066 
 2067                 /*
 2068                  * Do the flush.  Limit the amount of in-transit I/O we
 2069                  * allow to build up, otherwise we would completely saturate
 2070                  * the I/O system.  Wakeup any waiting processes before we
 2071                  * normally would so they can run in parallel with our drain.
 2072                  */
 2073                 while (numdirtybuffers > lodirtybuffers) {
 2074                         int flushed;
 2075 
 2076                         flushed = flushbufqueues(QUEUE_DIRTY, 0);
 2077                         /* The list empty check here is slightly racy */
 2078                         if (!TAILQ_EMPTY(&bufqueues[QUEUE_DIRTY_GIANT])) {
 2079                                 mtx_lock(&Giant);
 2080                                 flushed += flushbufqueues(QUEUE_DIRTY_GIANT, 0);
 2081                                 mtx_unlock(&Giant);
 2082                         }
 2083                         if (flushed == 0) {
 2084                                 /*
 2085                                  * Could not find any buffers without rollback
 2086                                  * dependencies, so just write the first one
 2087                                  * in the hopes of eventually making progress.
 2088                                  */
 2089                                 flushbufqueues(QUEUE_DIRTY, 1);
 2090                                 if (!TAILQ_EMPTY(
 2091                                     &bufqueues[QUEUE_DIRTY_GIANT])) {
 2092                                         mtx_lock(&Giant);
 2093                                         flushbufqueues(QUEUE_DIRTY_GIANT, 1);
 2094                                         mtx_unlock(&Giant);
 2095                                 }
 2096                                 break;
 2097                         }
 2098                         uio_yield();
 2099                 }
 2100 
 2101                 /*
 2102                  * Only clear bd_request if we have reached our low water
 2103                  * mark.  The buf_daemon normally waits 1 second and
 2104                  * then incrementally flushes any dirty buffers that have
 2105                  * built up, within reason.
 2106                  *
 2107                  * If we were unable to hit our low water mark and couldn't
 2108                  * find any flushable buffers, we sleep half a second.
 2109                  * Otherwise we loop immediately.
 2110                  */
 2111                 mtx_lock(&bdlock);
 2112                 if (numdirtybuffers <= lodirtybuffers) {
 2113                         /*
 2114                          * We reached our low water mark, reset the
 2115                          * request and sleep until we are needed again.
 2116                          * The sleep is just so the suspend code works.
 2117                          */
 2118                         bd_request = 0;
 2119                         msleep(&bd_request, &bdlock, PVM, "psleep", hz);
 2120                 } else {
 2121                         /*
 2122                          * We couldn't find any flushable dirty buffers but
 2123                          * still have too many dirty buffers, we
 2124                          * have to sleep and try again.  (rare)
 2125                          */
 2126                         msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
 2127                 }
 2128         }
 2129 }
 2130 
 2131 /*
 2132  *      flushbufqueues:
 2133  *
 2134  *      Try to flush a buffer in the dirty queue.  We must be careful to
 2135  *      free up B_INVAL buffers instead of write them, which NFS is 
 2136  *      particularly sensitive to.
 2137  */
 2138 static int flushwithdeps = 0;
 2139 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
 2140     0, "Number of buffers flushed with dependecies that require rollbacks");
 2141 
 2142 static int
 2143 flushbufqueues(int queue, int flushdeps)
 2144 {
 2145         struct thread *td = curthread;
 2146         struct buf sentinel;
 2147         struct vnode *vp;
 2148         struct mount *mp;
 2149         struct buf *bp;
 2150         int hasdeps;
 2151         int flushed;
 2152         int target;
 2153 
 2154         target = numdirtybuffers - lodirtybuffers;
 2155         if (flushdeps && target > 2)
 2156                 target /= 2;
 2157         flushed = 0;
 2158         bp = NULL;
 2159         mtx_lock(&bqlock);
 2160         TAILQ_INSERT_TAIL(&bufqueues[queue], &sentinel, b_freelist);
 2161         while (flushed != target) {
 2162                 bp = TAILQ_FIRST(&bufqueues[queue]);
 2163                 if (bp == &sentinel)
 2164                         break;
 2165                 TAILQ_REMOVE(&bufqueues[queue], bp, b_freelist);
 2166                 TAILQ_INSERT_TAIL(&bufqueues[queue], bp, b_freelist);
 2167 
 2168                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 2169                         continue;
 2170                 if (bp->b_pin_count > 0) {
 2171                         BUF_UNLOCK(bp);
 2172                         continue;
 2173                 }
 2174                 BO_LOCK(bp->b_bufobj);
 2175                 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
 2176                     (bp->b_flags & B_DELWRI) == 0) {
 2177                         BO_UNLOCK(bp->b_bufobj);
 2178                         BUF_UNLOCK(bp);
 2179                         continue;
 2180                 }
 2181                 BO_UNLOCK(bp->b_bufobj);
 2182                 if (bp->b_flags & B_INVAL) {
 2183                         bremfreel(bp);
 2184                         mtx_unlock(&bqlock);
 2185                         brelse(bp);
 2186                         flushed++;
 2187                         numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
 2188                         mtx_lock(&bqlock);
 2189                         continue;
 2190                 }
 2191 
 2192                 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
 2193                         if (flushdeps == 0) {
 2194                                 BUF_UNLOCK(bp);
 2195                                 continue;
 2196                         }
 2197                         hasdeps = 1;
 2198                 } else
 2199                         hasdeps = 0;
 2200                 /*
 2201                  * We must hold the lock on a vnode before writing
 2202                  * one of its buffers. Otherwise we may confuse, or
 2203                  * in the case of a snapshot vnode, deadlock the
 2204                  * system.
 2205                  *
 2206                  * The lock order here is the reverse of the normal
 2207                  * of vnode followed by buf lock.  This is ok because
 2208                  * the NOWAIT will prevent deadlock.
 2209                  */
 2210                 vp = bp->b_vp;
 2211                 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
 2212                         BUF_UNLOCK(bp);
 2213                         continue;
 2214                 }
 2215                 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) == 0) {
 2216                         mtx_unlock(&bqlock);
 2217                         CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
 2218                             bp, bp->b_vp, bp->b_flags);
 2219                         vfs_bio_awrite(bp);
 2220                         vn_finished_write(mp);
 2221                         VOP_UNLOCK(vp, 0, td);
 2222                         flushwithdeps += hasdeps;
 2223                         flushed++;
 2224                         waitrunningbufspace();
 2225                         numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
 2226                         mtx_lock(&bqlock);
 2227                         continue;
 2228                 }
 2229                 vn_finished_write(mp);
 2230                 BUF_UNLOCK(bp);
 2231         }
 2232         TAILQ_REMOVE(&bufqueues[queue], &sentinel, b_freelist);
 2233         mtx_unlock(&bqlock);
 2234         return (flushed);
 2235 }
 2236 
 2237 /*
 2238  * Check to see if a block is currently memory resident.
 2239  */
 2240 struct buf *
 2241 incore(struct bufobj *bo, daddr_t blkno)
 2242 {
 2243         struct buf *bp;
 2244 
 2245         BO_LOCK(bo);
 2246         bp = gbincore(bo, blkno);
 2247         BO_UNLOCK(bo);
 2248         return (bp);
 2249 }
 2250 
 2251 /*
 2252  * Returns true if no I/O is needed to access the
 2253  * associated VM object.  This is like incore except
 2254  * it also hunts around in the VM system for the data.
 2255  */
 2256 
 2257 static int
 2258 inmem(struct vnode * vp, daddr_t blkno)
 2259 {
 2260         vm_object_t obj;
 2261         vm_offset_t toff, tinc, size;
 2262         vm_page_t m;
 2263         vm_ooffset_t off;
 2264 
 2265         ASSERT_VOP_LOCKED(vp, "inmem");
 2266 
 2267         if (incore(&vp->v_bufobj, blkno))
 2268                 return 1;
 2269         if (vp->v_mount == NULL)
 2270                 return 0;
 2271         obj = vp->v_object;
 2272         if (obj == NULL)
 2273                 return (0);
 2274 
 2275         size = PAGE_SIZE;
 2276         if (size > vp->v_mount->mnt_stat.f_iosize)
 2277                 size = vp->v_mount->mnt_stat.f_iosize;
 2278         off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
 2279 
 2280         VM_OBJECT_LOCK(obj);
 2281         for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
 2282                 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
 2283                 if (!m)
 2284                         goto notinmem;
 2285                 tinc = size;
 2286                 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
 2287                         tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
 2288                 if (vm_page_is_valid(m,
 2289                     (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
 2290                         goto notinmem;
 2291         }
 2292         VM_OBJECT_UNLOCK(obj);
 2293         return 1;
 2294 
 2295 notinmem:
 2296         VM_OBJECT_UNLOCK(obj);
 2297         return (0);
 2298 }
 2299 
 2300 /*
 2301  *      vfs_setdirty:
 2302  *
 2303  *      Sets the dirty range for a buffer based on the status of the dirty
 2304  *      bits in the pages comprising the buffer.
 2305  *
 2306  *      The range is limited to the size of the buffer.
 2307  *
 2308  *      This routine is primarily used by NFS, but is generalized for the
 2309  *      B_VMIO case.
 2310  */
 2311 static void
 2312 vfs_setdirty(struct buf *bp) 
 2313 {
 2314 
 2315         /*
 2316          * Degenerate case - empty buffer
 2317          */
 2318 
 2319         if (bp->b_bufsize == 0)
 2320                 return;
 2321 
 2322         /*
 2323          * We qualify the scan for modified pages on whether the
 2324          * object has been flushed yet.
 2325          */
 2326 
 2327         if ((bp->b_flags & B_VMIO) == 0)
 2328                 return;
 2329 
 2330         VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
 2331         vfs_setdirty_locked_object(bp);
 2332         VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 2333 }
 2334 
 2335 static void
 2336 vfs_setdirty_locked_object(struct buf *bp)
 2337 {
 2338         vm_object_t object;
 2339         int i;
 2340 
 2341         object = bp->b_bufobj->bo_object;
 2342         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 2343         if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
 2344                 vm_offset_t boffset;
 2345                 vm_offset_t eoffset;
 2346 
 2347                 vm_page_lock_queues();
 2348                 /*
 2349                  * test the pages to see if they have been modified directly
 2350                  * by users through the VM system.
 2351                  */
 2352                 for (i = 0; i < bp->b_npages; i++)
 2353                         vm_page_test_dirty(bp->b_pages[i]);
 2354 
 2355                 /*
 2356                  * Calculate the encompassing dirty range, boffset and eoffset,
 2357                  * (eoffset - boffset) bytes.
 2358                  */
 2359 
 2360                 for (i = 0; i < bp->b_npages; i++) {
 2361                         if (bp->b_pages[i]->dirty)
 2362                                 break;
 2363                 }
 2364                 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2365 
 2366                 for (i = bp->b_npages - 1; i >= 0; --i) {
 2367                         if (bp->b_pages[i]->dirty) {
 2368                                 break;
 2369                         }
 2370                 }
 2371                 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2372 
 2373                 vm_page_unlock_queues();
 2374                 /*
 2375                  * Fit it to the buffer.
 2376                  */
 2377 
 2378                 if (eoffset > bp->b_bcount)
 2379                         eoffset = bp->b_bcount;
 2380 
 2381                 /*
 2382                  * If we have a good dirty range, merge with the existing
 2383                  * dirty range.
 2384                  */
 2385 
 2386                 if (boffset < eoffset) {
 2387                         if (bp->b_dirtyoff > boffset)
 2388                                 bp->b_dirtyoff = boffset;
 2389                         if (bp->b_dirtyend < eoffset)
 2390                                 bp->b_dirtyend = eoffset;
 2391                 }
 2392         }
 2393 }
 2394 
 2395 /*
 2396  *      getblk:
 2397  *
 2398  *      Get a block given a specified block and offset into a file/device.
 2399  *      The buffers B_DONE bit will be cleared on return, making it almost
 2400  *      ready for an I/O initiation.  B_INVAL may or may not be set on 
 2401  *      return.  The caller should clear B_INVAL prior to initiating a
 2402  *      READ.
 2403  *
 2404  *      For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
 2405  *      an existing buffer.
 2406  *
 2407  *      For a VMIO buffer, B_CACHE is modified according to the backing VM.
 2408  *      If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
 2409  *      and then cleared based on the backing VM.  If the previous buffer is
 2410  *      non-0-sized but invalid, B_CACHE will be cleared.
 2411  *
 2412  *      If getblk() must create a new buffer, the new buffer is returned with
 2413  *      both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
 2414  *      case it is returned with B_INVAL clear and B_CACHE set based on the
 2415  *      backing VM.
 2416  *
 2417  *      getblk() also forces a bwrite() for any B_DELWRI buffer whos
 2418  *      B_CACHE bit is clear.
 2419  *      
 2420  *      What this means, basically, is that the caller should use B_CACHE to
 2421  *      determine whether the buffer is fully valid or not and should clear
 2422  *      B_INVAL prior to issuing a read.  If the caller intends to validate
 2423  *      the buffer by loading its data area with something, the caller needs
 2424  *      to clear B_INVAL.  If the caller does this without issuing an I/O, 
 2425  *      the caller should set B_CACHE ( as an optimization ), else the caller
 2426  *      should issue the I/O and biodone() will set B_CACHE if the I/O was
 2427  *      a write attempt or if it was a successfull read.  If the caller 
 2428  *      intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
 2429  *      prior to issuing the READ.  biodone() will *not* clear B_INVAL.
 2430  */
 2431 struct buf *
 2432 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
 2433     int flags)
 2434 {
 2435         struct buf *bp;
 2436         struct bufobj *bo;
 2437         int error;
 2438 
 2439         CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
 2440         ASSERT_VOP_LOCKED(vp, "getblk");
 2441         if (size > MAXBSIZE)
 2442                 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
 2443 
 2444         bo = &vp->v_bufobj;
 2445 loop:
 2446         /*
 2447          * Block if we are low on buffers.   Certain processes are allowed
 2448          * to completely exhaust the buffer cache.
 2449          *
 2450          * If this check ever becomes a bottleneck it may be better to
 2451          * move it into the else, when gbincore() fails.  At the moment
 2452          * it isn't a problem.
 2453          *
 2454          * XXX remove if 0 sections (clean this up after its proven)
 2455          */
 2456         if (numfreebuffers == 0) {
 2457                 if (TD_IS_IDLETHREAD(curthread))
 2458                         return NULL;
 2459                 mtx_lock(&nblock);
 2460                 needsbuffer |= VFS_BIO_NEED_ANY;
 2461                 mtx_unlock(&nblock);
 2462         }
 2463 
 2464         BO_LOCK(bo);
 2465         bp = gbincore(bo, blkno);
 2466         if (bp != NULL) {
 2467                 int lockflags;
 2468                 /*
 2469                  * Buffer is in-core.  If the buffer is not busy, it must
 2470                  * be on a queue.
 2471                  */
 2472                 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
 2473 
 2474                 if (flags & GB_LOCK_NOWAIT)
 2475                         lockflags |= LK_NOWAIT;
 2476 
 2477                 error = BUF_TIMELOCK(bp, lockflags,
 2478                     VI_MTX(vp), "getblk", slpflag, slptimeo);
 2479 
 2480                 /*
 2481                  * If we slept and got the lock we have to restart in case
 2482                  * the buffer changed identities.
 2483                  */
 2484                 if (error == ENOLCK)
 2485                         goto loop;
 2486                 /* We timed out or were interrupted. */
 2487                 else if (error)
 2488                         return (NULL);
 2489 
 2490                 /*
 2491                  * The buffer is locked.  B_CACHE is cleared if the buffer is 
 2492                  * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
 2493                  * and for a VMIO buffer B_CACHE is adjusted according to the
 2494                  * backing VM cache.
 2495                  */
 2496                 if (bp->b_flags & B_INVAL)
 2497                         bp->b_flags &= ~B_CACHE;
 2498                 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
 2499                         bp->b_flags |= B_CACHE;
 2500                 bremfree(bp);
 2501 
 2502                 /*
 2503                  * check for size inconsistancies for non-VMIO case.
 2504                  */
 2505 
 2506                 if (bp->b_bcount != size) {
 2507                         if ((bp->b_flags & B_VMIO) == 0 ||
 2508                             (size > bp->b_kvasize)) {
 2509                                 if (bp->b_flags & B_DELWRI) {
 2510                                         /*
 2511                                          * If buffer is pinned and caller does
 2512                                          * not want sleep  waiting for it to be
 2513                                          * unpinned, bail out
 2514                                          * */
 2515                                         if (bp->b_pin_count > 0) {
 2516                                                 if (flags & GB_LOCK_NOWAIT) {
 2517                                                         bqrelse(bp);
 2518                                                         return (NULL);
 2519                                                 } else {
 2520                                                         bunpin_wait(bp);
 2521                                                 }
 2522                                         }
 2523                                         bp->b_flags |= B_NOCACHE;
 2524                                         bwrite(bp);
 2525                                 } else {
 2526                                         if (LIST_EMPTY(&bp->b_dep)) {
 2527                                                 bp->b_flags |= B_RELBUF;
 2528                                                 brelse(bp);
 2529                                         } else {
 2530                                                 bp->b_flags |= B_NOCACHE;
 2531                                                 bwrite(bp);
 2532                                         }
 2533                                 }
 2534                                 goto loop;
 2535                         }
 2536                 }
 2537 
 2538                 /*
 2539                  * If the size is inconsistant in the VMIO case, we can resize
 2540                  * the buffer.  This might lead to B_CACHE getting set or
 2541                  * cleared.  If the size has not changed, B_CACHE remains
 2542                  * unchanged from its previous state.
 2543                  */
 2544 
 2545                 if (bp->b_bcount != size)
 2546                         allocbuf(bp, size);
 2547 
 2548                 KASSERT(bp->b_offset != NOOFFSET, 
 2549                     ("getblk: no buffer offset"));
 2550 
 2551                 /*
 2552                  * A buffer with B_DELWRI set and B_CACHE clear must
 2553                  * be committed before we can return the buffer in
 2554                  * order to prevent the caller from issuing a read
 2555                  * ( due to B_CACHE not being set ) and overwriting
 2556                  * it.
 2557                  *
 2558                  * Most callers, including NFS and FFS, need this to
 2559                  * operate properly either because they assume they
 2560                  * can issue a read if B_CACHE is not set, or because
 2561                  * ( for example ) an uncached B_DELWRI might loop due 
 2562                  * to softupdates re-dirtying the buffer.  In the latter
 2563                  * case, B_CACHE is set after the first write completes,
 2564                  * preventing further loops.
 2565                  * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
 2566                  * above while extending the buffer, we cannot allow the
 2567                  * buffer to remain with B_CACHE set after the write
 2568                  * completes or it will represent a corrupt state.  To
 2569                  * deal with this we set B_NOCACHE to scrap the buffer
 2570                  * after the write.
 2571                  *
 2572                  * We might be able to do something fancy, like setting
 2573                  * B_CACHE in bwrite() except if B_DELWRI is already set,
 2574                  * so the below call doesn't set B_CACHE, but that gets real
 2575                  * confusing.  This is much easier.
 2576                  */
 2577 
 2578                 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
 2579                         bp->b_flags |= B_NOCACHE;
 2580                         bwrite(bp);
 2581                         goto loop;
 2582                 }
 2583                 bp->b_flags &= ~B_DONE;
 2584         } else {
 2585                 int bsize, maxsize, vmio;
 2586                 off_t offset;
 2587 
 2588                 /*
 2589                  * Buffer is not in-core, create new buffer.  The buffer
 2590                  * returned by getnewbuf() is locked.  Note that the returned
 2591                  * buffer is also considered valid (not marked B_INVAL).
 2592                  */
 2593                 BO_UNLOCK(bo);
 2594                 /*
 2595                  * If the user does not want us to create the buffer, bail out
 2596                  * here.
 2597                  */
 2598                 if (flags & GB_NOCREAT)
 2599                         return NULL;
 2600                 bsize = bo->bo_bsize;
 2601                 offset = blkno * bsize;
 2602                 vmio = vp->v_object != NULL;
 2603                 maxsize = vmio ? size + (offset & PAGE_MASK) : size;
 2604                 maxsize = imax(maxsize, bsize);
 2605 
 2606                 bp = getnewbuf(slpflag, slptimeo, size, maxsize);
 2607                 if (bp == NULL) {
 2608                         if (slpflag || slptimeo)
 2609                                 return NULL;
 2610                         goto loop;
 2611                 }
 2612 
 2613                 /*
 2614                  * This code is used to make sure that a buffer is not
 2615                  * created while the getnewbuf routine is blocked.
 2616                  * This can be a problem whether the vnode is locked or not.
 2617                  * If the buffer is created out from under us, we have to
 2618                  * throw away the one we just created.
 2619                  *
 2620                  * Note: this must occur before we associate the buffer
 2621                  * with the vp especially considering limitations in
 2622                  * the splay tree implementation when dealing with duplicate
 2623                  * lblkno's.
 2624                  */
 2625                 BO_LOCK(bo);
 2626                 if (gbincore(bo, blkno)) {
 2627                         BO_UNLOCK(bo);
 2628                         bp->b_flags |= B_INVAL;
 2629                         brelse(bp);
 2630                         goto loop;
 2631                 }
 2632 
 2633                 /*
 2634                  * Insert the buffer into the hash, so that it can
 2635                  * be found by incore.
 2636                  */
 2637                 bp->b_blkno = bp->b_lblkno = blkno;
 2638                 bp->b_offset = offset;
 2639                 bgetvp(vp, bp);
 2640                 BO_UNLOCK(bo);
 2641 
 2642                 /*
 2643                  * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
 2644                  * buffer size starts out as 0, B_CACHE will be set by
 2645                  * allocbuf() for the VMIO case prior to it testing the
 2646                  * backing store for validity.
 2647                  */
 2648 
 2649                 if (vmio) {
 2650                         bp->b_flags |= B_VMIO;
 2651 #if defined(VFS_BIO_DEBUG)
 2652                         if (vn_canvmio(vp) != TRUE)
 2653                                 printf("getblk: VMIO on vnode type %d\n",
 2654                                         vp->v_type);
 2655 #endif
 2656                         KASSERT(vp->v_object == bp->b_bufobj->bo_object,
 2657                             ("ARGH! different b_bufobj->bo_object %p %p %p\n",
 2658                             bp, vp->v_object, bp->b_bufobj->bo_object));
 2659                 } else {
 2660                         bp->b_flags &= ~B_VMIO;
 2661                         KASSERT(bp->b_bufobj->bo_object == NULL,
 2662                             ("ARGH! has b_bufobj->bo_object %p %p\n",
 2663                             bp, bp->b_bufobj->bo_object));
 2664                 }
 2665 
 2666                 allocbuf(bp, size);
 2667                 bp->b_flags &= ~B_DONE;
 2668         }
 2669         CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
 2670         KASSERT(BUF_REFCNT(bp) == 1, ("getblk: bp %p not locked",bp));
 2671         KASSERT(bp->b_bufobj == bo,
 2672             ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
 2673         return (bp);
 2674 }
 2675 
 2676 /*
 2677  * Get an empty, disassociated buffer of given size.  The buffer is initially
 2678  * set to B_INVAL.
 2679  */
 2680 struct buf *
 2681 geteblk(int size)
 2682 {
 2683         struct buf *bp;
 2684         int maxsize;
 2685 
 2686         maxsize = (size + BKVAMASK) & ~BKVAMASK;
 2687         while ((bp = getnewbuf(0, 0, size, maxsize)) == 0)
 2688                 continue;
 2689         allocbuf(bp, size);
 2690         bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
 2691         KASSERT(BUF_REFCNT(bp) == 1, ("geteblk: bp %p not locked",bp));
 2692         return (bp);
 2693 }
 2694 
 2695 
 2696 /*
 2697  * This code constitutes the buffer memory from either anonymous system
 2698  * memory (in the case of non-VMIO operations) or from an associated
 2699  * VM object (in the case of VMIO operations).  This code is able to
 2700  * resize a buffer up or down.
 2701  *
 2702  * Note that this code is tricky, and has many complications to resolve
 2703  * deadlock or inconsistant data situations.  Tread lightly!!! 
 2704  * There are B_CACHE and B_DELWRI interactions that must be dealt with by 
 2705  * the caller.  Calling this code willy nilly can result in the loss of data.
 2706  *
 2707  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
 2708  * B_CACHE for the non-VMIO case.
 2709  */
 2710 
 2711 int
 2712 allocbuf(struct buf *bp, int size)
 2713 {
 2714         int newbsize, mbsize;
 2715         int i;
 2716 
 2717         if (BUF_REFCNT(bp) == 0)
 2718                 panic("allocbuf: buffer not busy");
 2719 
 2720         if (bp->b_kvasize < size)
 2721                 panic("allocbuf: buffer too small");
 2722 
 2723         if ((bp->b_flags & B_VMIO) == 0) {
 2724                 caddr_t origbuf;
 2725                 int origbufsize;
 2726                 /*
 2727                  * Just get anonymous memory from the kernel.  Don't
 2728                  * mess with B_CACHE.
 2729                  */
 2730                 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 2731                 if (bp->b_flags & B_MALLOC)
 2732                         newbsize = mbsize;
 2733                 else
 2734                         newbsize = round_page(size);
 2735 
 2736                 if (newbsize < bp->b_bufsize) {
 2737                         /*
 2738                          * malloced buffers are not shrunk
 2739                          */
 2740                         if (bp->b_flags & B_MALLOC) {
 2741                                 if (newbsize) {
 2742                                         bp->b_bcount = size;
 2743                                 } else {
 2744                                         free(bp->b_data, M_BIOBUF);
 2745                                         if (bp->b_bufsize) {
 2746                                                 atomic_subtract_int(
 2747                                                     &bufmallocspace,
 2748                                                     bp->b_bufsize);
 2749                                                 bufspacewakeup();
 2750                                                 bp->b_bufsize = 0;
 2751                                         }
 2752                                         bp->b_saveaddr = bp->b_kvabase;
 2753                                         bp->b_data = bp->b_saveaddr;
 2754                                         bp->b_bcount = 0;
 2755                                         bp->b_flags &= ~B_MALLOC;
 2756                                 }
 2757                                 return 1;
 2758                         }               
 2759                         vm_hold_free_pages(
 2760                             bp,
 2761                             (vm_offset_t) bp->b_data + newbsize,
 2762                             (vm_offset_t) bp->b_data + bp->b_bufsize);
 2763                 } else if (newbsize > bp->b_bufsize) {
 2764                         /*
 2765                          * We only use malloced memory on the first allocation.
 2766                          * and revert to page-allocated memory when the buffer
 2767                          * grows.
 2768                          */
 2769                         /*
 2770                          * There is a potential smp race here that could lead
 2771                          * to bufmallocspace slightly passing the max.  It
 2772                          * is probably extremely rare and not worth worrying
 2773                          * over.
 2774                          */
 2775                         if ( (bufmallocspace < maxbufmallocspace) &&
 2776                                 (bp->b_bufsize == 0) &&
 2777                                 (mbsize <= PAGE_SIZE/2)) {
 2778 
 2779                                 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
 2780                                 bp->b_bufsize = mbsize;
 2781                                 bp->b_bcount = size;
 2782                                 bp->b_flags |= B_MALLOC;
 2783                                 atomic_add_int(&bufmallocspace, mbsize);
 2784                                 return 1;
 2785                         }
 2786                         origbuf = NULL;
 2787                         origbufsize = 0;
 2788                         /*
 2789                          * If the buffer is growing on its other-than-first allocation,
 2790                          * then we revert to the page-allocation scheme.
 2791                          */
 2792                         if (bp->b_flags & B_MALLOC) {
 2793                                 origbuf = bp->b_data;
 2794                                 origbufsize = bp->b_bufsize;
 2795                                 bp->b_data = bp->b_kvabase;
 2796                                 if (bp->b_bufsize) {
 2797                                         atomic_subtract_int(&bufmallocspace,
 2798                                             bp->b_bufsize);
 2799                                         bufspacewakeup();
 2800                                         bp->b_bufsize = 0;
 2801                                 }
 2802                                 bp->b_flags &= ~B_MALLOC;
 2803                                 newbsize = round_page(newbsize);
 2804                         }
 2805                         vm_hold_load_pages(
 2806                             bp,
 2807                             (vm_offset_t) bp->b_data + bp->b_bufsize,
 2808                             (vm_offset_t) bp->b_data + newbsize);
 2809                         if (origbuf) {
 2810                                 bcopy(origbuf, bp->b_data, origbufsize);
 2811                                 free(origbuf, M_BIOBUF);
 2812                         }
 2813                 }
 2814         } else {
 2815                 int desiredpages;
 2816 
 2817                 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 2818                 desiredpages = (size == 0) ? 0 :
 2819                         num_pages((bp->b_offset & PAGE_MASK) + newbsize);
 2820 
 2821                 if (bp->b_flags & B_MALLOC)
 2822                         panic("allocbuf: VMIO buffer can't be malloced");
 2823                 /*
 2824                  * Set B_CACHE initially if buffer is 0 length or will become
 2825                  * 0-length.
 2826                  */
 2827                 if (size == 0 || bp->b_bufsize == 0)
 2828                         bp->b_flags |= B_CACHE;
 2829 
 2830                 if (newbsize < bp->b_bufsize) {
 2831                         /*
 2832                          * DEV_BSIZE aligned new buffer size is less then the
 2833                          * DEV_BSIZE aligned existing buffer size.  Figure out
 2834                          * if we have to remove any pages.
 2835                          */
 2836                         if (desiredpages < bp->b_npages) {
 2837                                 vm_page_t m;
 2838 
 2839                                 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
 2840                                 vm_page_lock_queues();
 2841                                 for (i = desiredpages; i < bp->b_npages; i++) {
 2842                                         /*
 2843                                          * the page is not freed here -- it
 2844                                          * is the responsibility of 
 2845                                          * vnode_pager_setsize
 2846                                          */
 2847                                         m = bp->b_pages[i];
 2848                                         KASSERT(m != bogus_page,
 2849                                             ("allocbuf: bogus page found"));
 2850                                         while (vm_page_sleep_if_busy(m, TRUE, "biodep"))
 2851                                                 vm_page_lock_queues();
 2852 
 2853                                         bp->b_pages[i] = NULL;
 2854                                         vm_page_unwire(m, 0);
 2855                                 }
 2856                                 vm_page_unlock_queues();
 2857                                 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 2858                                 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
 2859                                     (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
 2860                                 bp->b_npages = desiredpages;
 2861                         }
 2862                 } else if (size > bp->b_bcount) {
 2863                         /*
 2864                          * We are growing the buffer, possibly in a 
 2865                          * byte-granular fashion.
 2866                          */
 2867                         struct vnode *vp;
 2868                         vm_object_t obj;
 2869                         vm_offset_t toff;
 2870                         vm_offset_t tinc;
 2871 
 2872                         /*
 2873                          * Step 1, bring in the VM pages from the object, 
 2874                          * allocating them if necessary.  We must clear
 2875                          * B_CACHE if these pages are not valid for the 
 2876                          * range covered by the buffer.
 2877                          */
 2878 
 2879                         vp = bp->b_vp;
 2880                         obj = bp->b_bufobj->bo_object;
 2881 
 2882                         VM_OBJECT_LOCK(obj);
 2883                         while (bp->b_npages < desiredpages) {
 2884                                 vm_page_t m;
 2885                                 vm_pindex_t pi;
 2886 
 2887                                 pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
 2888                                 if ((m = vm_page_lookup(obj, pi)) == NULL) {
 2889                                         /*
 2890                                          * note: must allocate system pages
 2891                                          * since blocking here could intefere
 2892                                          * with paging I/O, no matter which
 2893                                          * process we are.
 2894                                          */
 2895                                         m = vm_page_alloc(obj, pi,
 2896                                             VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM |
 2897                                             VM_ALLOC_WIRED);
 2898                                         if (m == NULL) {
 2899                                                 atomic_add_int(&vm_pageout_deficit,
 2900                                                     desiredpages - bp->b_npages);
 2901                                                 VM_OBJECT_UNLOCK(obj);
 2902                                                 VM_WAIT;
 2903                                                 VM_OBJECT_LOCK(obj);
 2904                                         } else {
 2905                                                 if (m->valid == 0)
 2906                                                         bp->b_flags &= ~B_CACHE;
 2907                                                 bp->b_pages[bp->b_npages] = m;
 2908                                                 ++bp->b_npages;
 2909                                         }
 2910                                         continue;
 2911                                 }
 2912 
 2913                                 /*
 2914                                  * We found a page.  If we have to sleep on it,
 2915                                  * retry because it might have gotten freed out
 2916                                  * from under us.
 2917                                  *
 2918                                  * We can only test VPO_BUSY here.  Blocking on
 2919                                  * m->busy might lead to a deadlock:
 2920                                  *
 2921                                  *  vm_fault->getpages->cluster_read->allocbuf
 2922                                  *
 2923                                  */
 2924                                 if (vm_page_sleep_if_busy(m, FALSE, "pgtblk"))
 2925                                         continue;
 2926 
 2927                                 /*
 2928                                  * We have a good page.
 2929                                  */
 2930                                 vm_page_lock_queues();
 2931                                 vm_page_wire(m);
 2932                                 vm_page_unlock_queues();
 2933                                 bp->b_pages[bp->b_npages] = m;
 2934                                 ++bp->b_npages;
 2935                         }
 2936 
 2937                         /*
 2938                          * Step 2.  We've loaded the pages into the buffer,
 2939                          * we have to figure out if we can still have B_CACHE
 2940                          * set.  Note that B_CACHE is set according to the
 2941                          * byte-granular range ( bcount and size ), new the
 2942                          * aligned range ( newbsize ).
 2943                          *
 2944                          * The VM test is against m->valid, which is DEV_BSIZE
 2945                          * aligned.  Needless to say, the validity of the data
 2946                          * needs to also be DEV_BSIZE aligned.  Note that this
 2947                          * fails with NFS if the server or some other client
 2948                          * extends the file's EOF.  If our buffer is resized, 
 2949                          * B_CACHE may remain set! XXX
 2950                          */
 2951 
 2952                         toff = bp->b_bcount;
 2953                         tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
 2954 
 2955                         while ((bp->b_flags & B_CACHE) && toff < size) {
 2956                                 vm_pindex_t pi;
 2957 
 2958                                 if (tinc > (size - toff))
 2959                                         tinc = size - toff;
 2960 
 2961                                 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 
 2962                                     PAGE_SHIFT;
 2963 
 2964                                 vfs_buf_test_cache(
 2965                                     bp, 
 2966                                     bp->b_offset,
 2967                                     toff, 
 2968                                     tinc, 
 2969                                     bp->b_pages[pi]
 2970                                 );
 2971                                 toff += tinc;
 2972                                 tinc = PAGE_SIZE;
 2973                         }
 2974                         VM_OBJECT_UNLOCK(obj);
 2975 
 2976                         /*
 2977                          * Step 3, fixup the KVM pmap.  Remember that
 2978                          * bp->b_data is relative to bp->b_offset, but 
 2979                          * bp->b_offset may be offset into the first page.
 2980                          */
 2981 
 2982                         bp->b_data = (caddr_t)
 2983                             trunc_page((vm_offset_t)bp->b_data);
 2984                         pmap_qenter(
 2985                             (vm_offset_t)bp->b_data,
 2986                             bp->b_pages, 
 2987                             bp->b_npages
 2988                         );
 2989                         
 2990                         bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 
 2991                             (vm_offset_t)(bp->b_offset & PAGE_MASK));
 2992                 }
 2993         }
 2994         if (newbsize < bp->b_bufsize)
 2995                 bufspacewakeup();
 2996         bp->b_bufsize = newbsize;       /* actual buffer allocation     */
 2997         bp->b_bcount = size;            /* requested buffer size        */
 2998         return 1;
 2999 }
 3000 
 3001 void
 3002 biodone(struct bio *bp)
 3003 {
 3004         void (*done)(struct bio *);
 3005 
 3006         mtx_lock(&bdonelock);
 3007         bp->bio_flags |= BIO_DONE;
 3008         done = bp->bio_done;
 3009         if (done == NULL)
 3010                 wakeup(bp);
 3011         mtx_unlock(&bdonelock);
 3012         if (done != NULL)
 3013                 done(bp);
 3014 }
 3015 
 3016 /*
 3017  * Wait for a BIO to finish.
 3018  *
 3019  * XXX: resort to a timeout for now.  The optimal locking (if any) for this
 3020  * case is not yet clear.
 3021  */
 3022 int
 3023 biowait(struct bio *bp, const char *wchan)
 3024 {
 3025 
 3026         mtx_lock(&bdonelock);
 3027         while ((bp->bio_flags & BIO_DONE) == 0)
 3028                 msleep(bp, &bdonelock, PRIBIO, wchan, hz / 10);
 3029         mtx_unlock(&bdonelock);
 3030         if (bp->bio_error != 0)
 3031                 return (bp->bio_error);
 3032         if (!(bp->bio_flags & BIO_ERROR))
 3033                 return (0);
 3034         return (EIO);
 3035 }
 3036 
 3037 void
 3038 biofinish(struct bio *bp, struct devstat *stat, int error)
 3039 {
 3040         
 3041         if (error) {
 3042                 bp->bio_error = error;
 3043                 bp->bio_flags |= BIO_ERROR;
 3044         }
 3045         if (stat != NULL)
 3046                 devstat_end_transaction_bio(stat, bp);
 3047         biodone(bp);
 3048 }
 3049 
 3050 /*
 3051  *      bufwait:
 3052  *
 3053  *      Wait for buffer I/O completion, returning error status.  The buffer
 3054  *      is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
 3055  *      error and cleared.
 3056  */
 3057 int
 3058 bufwait(struct buf *bp)
 3059 {
 3060         if (bp->b_iocmd == BIO_READ)
 3061                 bwait(bp, PRIBIO, "biord");
 3062         else
 3063                 bwait(bp, PRIBIO, "biowr");
 3064         if (bp->b_flags & B_EINTR) {
 3065                 bp->b_flags &= ~B_EINTR;
 3066                 return (EINTR);
 3067         }
 3068         if (bp->b_ioflags & BIO_ERROR) {
 3069                 return (bp->b_error ? bp->b_error : EIO);
 3070         } else {
 3071                 return (0);
 3072         }
 3073 }
 3074 
 3075  /*
 3076   * Call back function from struct bio back up to struct buf.
 3077   */
 3078 static void
 3079 bufdonebio(struct bio *bip)
 3080 {
 3081         struct buf *bp;
 3082 
 3083         bp = bip->bio_caller2;
 3084         bp->b_resid = bp->b_bcount - bip->bio_completed;
 3085         bp->b_resid = bip->bio_resid;   /* XXX: remove */
 3086         bp->b_ioflags = bip->bio_flags;
 3087         bp->b_error = bip->bio_error;
 3088         if (bp->b_error)
 3089                 bp->b_ioflags |= BIO_ERROR;
 3090         bufdone(bp);
 3091         g_destroy_bio(bip);
 3092 }
 3093 
 3094 void
 3095 dev_strategy(struct cdev *dev, struct buf *bp)
 3096 {
 3097         struct cdevsw *csw;
 3098         struct bio *bip;
 3099 
 3100         if ((!bp->b_iocmd) || (bp->b_iocmd & (bp->b_iocmd - 1)))
 3101                 panic("b_iocmd botch");
 3102         for (;;) {
 3103                 bip = g_new_bio();
 3104                 if (bip != NULL)
 3105                         break;
 3106                 /* Try again later */
 3107                 tsleep(&bp, PRIBIO, "dev_strat", hz/10);
 3108         }
 3109         bip->bio_cmd = bp->b_iocmd;
 3110         bip->bio_offset = bp->b_iooffset;
 3111         bip->bio_length = bp->b_bcount;
 3112         bip->bio_bcount = bp->b_bcount; /* XXX: remove */
 3113         bip->bio_data = bp->b_data;
 3114         bip->bio_done = bufdonebio;
 3115         bip->bio_caller2 = bp;
 3116         bip->bio_dev = dev;
 3117         KASSERT(dev->si_refcount > 0,
 3118             ("dev_strategy on un-referenced struct cdev *(%s)",
 3119             devtoname(dev)));
 3120         csw = dev_refthread(dev);
 3121         if (csw == NULL) {
 3122                 g_destroy_bio(bip);
 3123                 bp->b_error = ENXIO;
 3124                 bp->b_ioflags = BIO_ERROR;
 3125                 bufdone(bp);
 3126                 return;
 3127         }
 3128         (*csw->d_strategy)(bip);
 3129         dev_relthread(dev);
 3130 }
 3131 
 3132 /*
 3133  *      bufdone:
 3134  *
 3135  *      Finish I/O on a buffer, optionally calling a completion function.
 3136  *      This is usually called from an interrupt so process blocking is
 3137  *      not allowed.
 3138  *
 3139  *      biodone is also responsible for setting B_CACHE in a B_VMIO bp.
 3140  *      In a non-VMIO bp, B_CACHE will be set on the next getblk() 
 3141  *      assuming B_INVAL is clear.
 3142  *
 3143  *      For the VMIO case, we set B_CACHE if the op was a read and no
 3144  *      read error occured, or if the op was a write.  B_CACHE is never
 3145  *      set if the buffer is invalid or otherwise uncacheable.
 3146  *
 3147  *      biodone does not mess with B_INVAL, allowing the I/O routine or the
 3148  *      initiator to leave B_INVAL set to brelse the buffer out of existance
 3149  *      in the biodone routine.
 3150  */
 3151 void
 3152 bufdone(struct buf *bp)
 3153 {
 3154         struct bufobj *dropobj;
 3155         void    (*biodone)(struct buf *);
 3156 
 3157         CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 3158         dropobj = NULL;
 3159 
 3160         KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp,
 3161             BUF_REFCNT(bp)));
 3162         KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
 3163 
 3164         runningbufwakeup(bp);
 3165         if (bp->b_iocmd == BIO_WRITE)
 3166                 dropobj = bp->b_bufobj;
 3167         /* call optional completion function if requested */
 3168         if (bp->b_iodone != NULL) {
 3169                 biodone = bp->b_iodone;
 3170                 bp->b_iodone = NULL;
 3171                 (*biodone) (bp);
 3172                 if (dropobj)
 3173                         bufobj_wdrop(dropobj);
 3174                 return;
 3175         }
 3176 
 3177         bufdone_finish(bp);
 3178 
 3179         if (dropobj)
 3180                 bufobj_wdrop(dropobj);
 3181 }
 3182 
 3183 void
 3184 bufdone_finish(struct buf *bp)
 3185 {
 3186         KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp,
 3187             BUF_REFCNT(bp)));
 3188 
 3189         if (!LIST_EMPTY(&bp->b_dep))
 3190                 buf_complete(bp);
 3191 
 3192         if (bp->b_flags & B_VMIO) {
 3193                 int i;
 3194                 vm_ooffset_t foff;
 3195                 vm_page_t m;
 3196                 vm_object_t obj;
 3197                 int iosize;
 3198                 struct vnode *vp = bp->b_vp;
 3199                 boolean_t are_queues_locked;
 3200 
 3201                 obj = bp->b_bufobj->bo_object;
 3202 
 3203 #if defined(VFS_BIO_DEBUG)
 3204                 mp_fixme("usecount and vflag accessed without locks.");
 3205                 if (vp->v_usecount == 0) {
 3206                         panic("biodone: zero vnode ref count");
 3207                 }
 3208 
 3209                 KASSERT(vp->v_object != NULL,
 3210                         ("biodone: vnode %p has no vm_object", vp));
 3211 #endif
 3212 
 3213                 foff = bp->b_offset;
 3214                 KASSERT(bp->b_offset != NOOFFSET,
 3215                     ("biodone: no buffer offset"));
 3216 
 3217                 VM_OBJECT_LOCK(obj);
 3218 #if defined(VFS_BIO_DEBUG)
 3219                 if (obj->paging_in_progress < bp->b_npages) {
 3220                         printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
 3221                             obj->paging_in_progress, bp->b_npages);
 3222                 }
 3223 #endif
 3224 
 3225                 /*
 3226                  * Set B_CACHE if the op was a normal read and no error
 3227                  * occured.  B_CACHE is set for writes in the b*write()
 3228                  * routines.
 3229                  */
 3230                 iosize = bp->b_bcount - bp->b_resid;
 3231                 if (bp->b_iocmd == BIO_READ &&
 3232                     !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
 3233                     !(bp->b_ioflags & BIO_ERROR)) {
 3234                         bp->b_flags |= B_CACHE;
 3235                 }
 3236                 if (bp->b_iocmd == BIO_READ) {
 3237                         vm_page_lock_queues();
 3238                         are_queues_locked = TRUE;
 3239                 } else
 3240                         are_queues_locked = FALSE;
 3241                 for (i = 0; i < bp->b_npages; i++) {
 3242                         int bogusflag = 0;
 3243                         int resid;
 3244 
 3245                         resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
 3246                         if (resid > iosize)
 3247                                 resid = iosize;
 3248 
 3249                         /*
 3250                          * cleanup bogus pages, restoring the originals
 3251                          */
 3252                         m = bp->b_pages[i];
 3253                         if (m == bogus_page) {
 3254                                 bogusflag = 1;
 3255                                 m = vm_page_lookup(obj, OFF_TO_IDX(foff));
 3256                                 if (m == NULL)
 3257                                         panic("biodone: page disappeared!");
 3258                                 bp->b_pages[i] = m;
 3259                                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 3260                                     bp->b_pages, bp->b_npages);
 3261                         }
 3262 #if defined(VFS_BIO_DEBUG)
 3263                         if (OFF_TO_IDX(foff) != m->pindex) {
 3264                                 printf(
 3265 "biodone: foff(%jd)/m->pindex(%ju) mismatch\n",
 3266                                     (intmax_t)foff, (uintmax_t)m->pindex);
 3267                         }
 3268 #endif
 3269 
 3270                         /*
 3271                          * In the write case, the valid and clean bits are
 3272                          * already changed correctly ( see bdwrite() ), so we 
 3273                          * only need to do this here in the read case.
 3274                          */
 3275                         if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
 3276                                 vfs_page_set_valid(bp, foff, i, m);
 3277                         }
 3278 
 3279                         /*
 3280                          * when debugging new filesystems or buffer I/O methods, this
 3281                          * is the most common error that pops up.  if you see this, you
 3282                          * have not set the page busy flag correctly!!!
 3283                          */
 3284                         if (m->busy == 0) {
 3285                                 printf("biodone: page busy < 0, "
 3286                                     "pindex: %d, foff: 0x(%x,%x), "
 3287                                     "resid: %d, index: %d\n",
 3288                                     (int) m->pindex, (int)(foff >> 32),
 3289                                                 (int) foff & 0xffffffff, resid, i);
 3290                                 if (!vn_isdisk(vp, NULL))
 3291                                         printf(" iosize: %jd, lblkno: %jd, flags: 0x%x, npages: %d\n",
 3292                                             (intmax_t)bp->b_vp->v_mount->mnt_stat.f_iosize,
 3293                                             (intmax_t) bp->b_lblkno,
 3294                                             bp->b_flags, bp->b_npages);
 3295                                 else
 3296                                         printf(" VDEV, lblkno: %jd, flags: 0x%x, npages: %d\n",
 3297                                             (intmax_t) bp->b_lblkno,
 3298                                             bp->b_flags, bp->b_npages);
 3299                                 printf(" valid: 0x%lx, dirty: 0x%lx, wired: %d\n",
 3300                                     (u_long)m->valid, (u_long)m->dirty,
 3301                                     m->wire_count);
 3302                                 panic("biodone: page busy < 0\n");
 3303                         }
 3304                         vm_page_io_finish(m);
 3305                         vm_object_pip_subtract(obj, 1);
 3306                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3307                         iosize -= resid;
 3308                 }
 3309                 if (are_queues_locked)
 3310                         vm_page_unlock_queues();
 3311                 vm_object_pip_wakeupn(obj, 0);
 3312                 VM_OBJECT_UNLOCK(obj);
 3313         }
 3314 
 3315         /*
 3316          * For asynchronous completions, release the buffer now. The brelse
 3317          * will do a wakeup there if necessary - so no need to do a wakeup
 3318          * here in the async case. The sync case always needs to do a wakeup.
 3319          */
 3320 
 3321         if (bp->b_flags & B_ASYNC) {
 3322                 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
 3323                         brelse(bp);
 3324                 else
 3325                         bqrelse(bp);
 3326         } else
 3327                 bdone(bp);
 3328 }
 3329 
 3330 /*
 3331  * This routine is called in lieu of iodone in the case of
 3332  * incomplete I/O.  This keeps the busy status for pages
 3333  * consistant.
 3334  */
 3335 void
 3336 vfs_unbusy_pages(struct buf *bp)
 3337 {
 3338         int i;
 3339         vm_object_t obj;
 3340         vm_page_t m;
 3341 
 3342         runningbufwakeup(bp);
 3343         if (!(bp->b_flags & B_VMIO))
 3344                 return;
 3345 
 3346         obj = bp->b_bufobj->bo_object;
 3347         VM_OBJECT_LOCK(obj);
 3348         for (i = 0; i < bp->b_npages; i++) {
 3349                 m = bp->b_pages[i];
 3350                 if (m == bogus_page) {
 3351                         m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
 3352                         if (!m)
 3353                                 panic("vfs_unbusy_pages: page missing\n");
 3354                         bp->b_pages[i] = m;
 3355                         pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 3356                             bp->b_pages, bp->b_npages);
 3357                 }
 3358                 vm_object_pip_subtract(obj, 1);
 3359                 vm_page_io_finish(m);
 3360         }
 3361         vm_object_pip_wakeupn(obj, 0);
 3362         VM_OBJECT_UNLOCK(obj);
 3363 }
 3364 
 3365 /*
 3366  * vfs_page_set_valid:
 3367  *
 3368  *      Set the valid bits in a page based on the supplied offset.   The
 3369  *      range is restricted to the buffer's size.
 3370  *
 3371  *      This routine is typically called after a read completes.
 3372  */
 3373 static void
 3374 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
 3375 {
 3376         vm_ooffset_t soff, eoff;
 3377 
 3378         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3379         /*
 3380          * Start and end offsets in buffer.  eoff - soff may not cross a
 3381          * page boundry or cross the end of the buffer.  The end of the
 3382          * buffer, in this case, is our file EOF, not the allocation size
 3383          * of the buffer.
 3384          */
 3385         soff = off;
 3386         eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3387         if (eoff > bp->b_offset + bp->b_bcount)
 3388                 eoff = bp->b_offset + bp->b_bcount;
 3389 
 3390         /*
 3391          * Set valid range.  This is typically the entire buffer and thus the
 3392          * entire page.
 3393          */
 3394         if (eoff > soff) {
 3395                 vm_page_set_validclean(
 3396                     m,
 3397                    (vm_offset_t) (soff & PAGE_MASK),
 3398                    (vm_offset_t) (eoff - soff)
 3399                 );
 3400         }
 3401 }
 3402 
 3403 /*
 3404  * This routine is called before a device strategy routine.
 3405  * It is used to tell the VM system that paging I/O is in
 3406  * progress, and treat the pages associated with the buffer
 3407  * almost as being VPO_BUSY.  Also the object paging_in_progress
 3408  * flag is handled to make sure that the object doesn't become
 3409  * inconsistant.
 3410  *
 3411  * Since I/O has not been initiated yet, certain buffer flags
 3412  * such as BIO_ERROR or B_INVAL may be in an inconsistant state
 3413  * and should be ignored.
 3414  */
 3415 void
 3416 vfs_busy_pages(struct buf *bp, int clear_modify)
 3417 {
 3418         int i, bogus;
 3419         vm_object_t obj;
 3420         vm_ooffset_t foff;
 3421         vm_page_t m;
 3422 
 3423         if (!(bp->b_flags & B_VMIO))
 3424                 return;
 3425 
 3426         obj = bp->b_bufobj->bo_object;
 3427         foff = bp->b_offset;
 3428         KASSERT(bp->b_offset != NOOFFSET,
 3429             ("vfs_busy_pages: no buffer offset"));
 3430         VM_OBJECT_LOCK(obj);
 3431         if (bp->b_bufsize != 0)
 3432                 vfs_setdirty_locked_object(bp);
 3433 retry:
 3434         for (i = 0; i < bp->b_npages; i++) {
 3435                 m = bp->b_pages[i];
 3436 
 3437                 if (vm_page_sleep_if_busy(m, FALSE, "vbpage"))
 3438                         goto retry;
 3439         }
 3440         bogus = 0;
 3441         vm_page_lock_queues();
 3442         for (i = 0; i < bp->b_npages; i++) {
 3443                 m = bp->b_pages[i];
 3444 
 3445                 if ((bp->b_flags & B_CLUSTER) == 0) {
 3446                         vm_object_pip_add(obj, 1);
 3447                         vm_page_io_start(m);
 3448                 }
 3449                 /*
 3450                  * When readying a buffer for a read ( i.e
 3451                  * clear_modify == 0 ), it is important to do
 3452                  * bogus_page replacement for valid pages in 
 3453                  * partially instantiated buffers.  Partially 
 3454                  * instantiated buffers can, in turn, occur when
 3455                  * reconstituting a buffer from its VM backing store
 3456                  * base.  We only have to do this if B_CACHE is
 3457                  * clear ( which causes the I/O to occur in the
 3458                  * first place ).  The replacement prevents the read
 3459                  * I/O from overwriting potentially dirty VM-backed
 3460                  * pages.  XXX bogus page replacement is, uh, bogus.
 3461                  * It may not work properly with small-block devices.
 3462                  * We need to find a better way.
 3463                  */
 3464                 pmap_remove_all(m);
 3465                 if (clear_modify)
 3466                         vfs_page_set_valid(bp, foff, i, m);
 3467                 else if (m->valid == VM_PAGE_BITS_ALL &&
 3468                     (bp->b_flags & B_CACHE) == 0) {
 3469                         bp->b_pages[i] = bogus_page;
 3470                         bogus++;
 3471                 }
 3472                 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3473         }
 3474         vm_page_unlock_queues();
 3475         VM_OBJECT_UNLOCK(obj);
 3476         if (bogus)
 3477                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 3478                     bp->b_pages, bp->b_npages);
 3479 }
 3480 
 3481 /*
 3482  * Tell the VM system that the pages associated with this buffer
 3483  * are clean.  This is used for delayed writes where the data is
 3484  * going to go to disk eventually without additional VM intevention.
 3485  *
 3486  * Note that while we only really need to clean through to b_bcount, we
 3487  * just go ahead and clean through to b_bufsize.
 3488  */
 3489 static void
 3490 vfs_clean_pages(struct buf *bp)
 3491 {
 3492         int i;
 3493         vm_ooffset_t foff, noff, eoff;
 3494         vm_page_t m;
 3495 
 3496         if (!(bp->b_flags & B_VMIO))
 3497                 return;
 3498 
 3499         foff = bp->b_offset;
 3500         KASSERT(bp->b_offset != NOOFFSET,
 3501             ("vfs_clean_pages: no buffer offset"));
 3502         VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
 3503         vm_page_lock_queues();
 3504         for (i = 0; i < bp->b_npages; i++) {
 3505                 m = bp->b_pages[i];
 3506                 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3507                 eoff = noff;
 3508 
 3509                 if (eoff > bp->b_offset + bp->b_bufsize)
 3510                         eoff = bp->b_offset + bp->b_bufsize;
 3511                 vfs_page_set_valid(bp, foff, i, m);
 3512                 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
 3513                 foff = noff;
 3514         }
 3515         vm_page_unlock_queues();
 3516         VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 3517 }
 3518 
 3519 /*
 3520  *      vfs_bio_set_validclean:
 3521  *
 3522  *      Set the range within the buffer to valid and clean.  The range is 
 3523  *      relative to the beginning of the buffer, b_offset.  Note that b_offset
 3524  *      itself may be offset from the beginning of the first page.
 3525  *
 3526  */
 3527 
 3528 void   
 3529 vfs_bio_set_validclean(struct buf *bp, int base, int size)
 3530 {
 3531         int i, n;
 3532         vm_page_t m;
 3533 
 3534         if (!(bp->b_flags & B_VMIO))
 3535                 return;
 3536         /*
 3537          * Fixup base to be relative to beginning of first page.
 3538          * Set initial n to be the maximum number of bytes in the
 3539          * first page that can be validated.
 3540          */
 3541 
 3542         base += (bp->b_offset & PAGE_MASK);
 3543         n = PAGE_SIZE - (base & PAGE_MASK);
 3544 
 3545         VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
 3546         vm_page_lock_queues();
 3547         for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
 3548                 m = bp->b_pages[i];
 3549                 if (n > size)
 3550                         n = size;
 3551                 vm_page_set_validclean(m, base & PAGE_MASK, n);
 3552                 base += n;
 3553                 size -= n;
 3554                 n = PAGE_SIZE;
 3555         }
 3556         vm_page_unlock_queues();
 3557         VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 3558 }
 3559 
 3560 /*
 3561  *      vfs_bio_clrbuf:
 3562  *
 3563  *      clear a buffer.  This routine essentially fakes an I/O, so we need
 3564  *      to clear BIO_ERROR and B_INVAL.
 3565  *
 3566  *      Note that while we only theoretically need to clear through b_bcount,
 3567  *      we go ahead and clear through b_bufsize.
 3568  */
 3569 
 3570 void
 3571 vfs_bio_clrbuf(struct buf *bp) 
 3572 {
 3573         int i, j, mask = 0;
 3574         caddr_t sa, ea;
 3575 
 3576         if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
 3577                 clrbuf(bp);
 3578                 return;
 3579         }
 3580 
 3581         bp->b_flags &= ~B_INVAL;
 3582         bp->b_ioflags &= ~BIO_ERROR;
 3583         VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
 3584         if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
 3585             (bp->b_offset & PAGE_MASK) == 0) {
 3586                 if (bp->b_pages[0] == bogus_page)
 3587                         goto unlock;
 3588                 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
 3589                 VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
 3590                 if ((bp->b_pages[0]->valid & mask) == mask)
 3591                         goto unlock;
 3592                 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
 3593                     ((bp->b_pages[0]->valid & mask) == 0)) {
 3594                         bzero(bp->b_data, bp->b_bufsize);
 3595                         bp->b_pages[0]->valid |= mask;
 3596                         goto unlock;
 3597                 }
 3598         }
 3599         ea = sa = bp->b_data;
 3600         for(i = 0; i < bp->b_npages; i++, sa = ea) {
 3601                 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
 3602                 ea = (caddr_t)(vm_offset_t)ulmin(
 3603                     (u_long)(vm_offset_t)ea,
 3604                     (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
 3605                 if (bp->b_pages[i] == bogus_page)
 3606                         continue;
 3607                 j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
 3608                 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
 3609                 VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
 3610                 if ((bp->b_pages[i]->valid & mask) == mask)
 3611                         continue;
 3612                 if ((bp->b_pages[i]->valid & mask) == 0) {
 3613                         if ((bp->b_pages[i]->flags & PG_ZERO) == 0)
 3614                                 bzero(sa, ea - sa);
 3615                 } else {
 3616                         for (; sa < ea; sa += DEV_BSIZE, j++) {
 3617                                 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
 3618                                     (bp->b_pages[i]->valid & (1 << j)) == 0)
 3619                                         bzero(sa, DEV_BSIZE);
 3620                         }
 3621                 }
 3622                 bp->b_pages[i]->valid |= mask;
 3623         }
 3624 unlock:
 3625         VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 3626         bp->b_resid = 0;
 3627 }
 3628 
 3629 /*
 3630  * vm_hold_load_pages and vm_hold_free_pages get pages into
 3631  * a buffers address space.  The pages are anonymous and are
 3632  * not associated with a file object.
 3633  */
 3634 static void
 3635 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
 3636 {
 3637         vm_offset_t pg;
 3638         vm_page_t p;
 3639         int index;
 3640 
 3641         to = round_page(to);
 3642         from = round_page(from);
 3643         index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 3644 
 3645         VM_OBJECT_LOCK(kernel_object);
 3646         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 3647 tryagain:
 3648                 /*
 3649                  * note: must allocate system pages since blocking here
 3650                  * could intefere with paging I/O, no matter which
 3651                  * process we are.
 3652                  */
 3653                 p = vm_page_alloc(kernel_object,
 3654                         ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
 3655                     VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
 3656                 if (!p) {
 3657                         atomic_add_int(&vm_pageout_deficit,
 3658                             (to - pg) >> PAGE_SHIFT);
 3659                         VM_OBJECT_UNLOCK(kernel_object);
 3660                         VM_WAIT;
 3661                         VM_OBJECT_LOCK(kernel_object);
 3662                         goto tryagain;
 3663                 }
 3664                 p->valid = VM_PAGE_BITS_ALL;
 3665                 pmap_qenter(pg, &p, 1);
 3666                 bp->b_pages[index] = p;
 3667         }
 3668         VM_OBJECT_UNLOCK(kernel_object);
 3669         bp->b_npages = index;
 3670 }
 3671 
 3672 /* Return pages associated with this buf to the vm system */
 3673 static void
 3674 vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
 3675 {
 3676         vm_offset_t pg;
 3677         vm_page_t p;
 3678         int index, newnpages;
 3679 
 3680         from = round_page(from);
 3681         to = round_page(to);
 3682         newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 3683 
 3684         VM_OBJECT_LOCK(kernel_object);
 3685         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 3686                 p = bp->b_pages[index];
 3687                 if (p && (index < bp->b_npages)) {
 3688                         if (p->busy) {
 3689                                 printf(
 3690                             "vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
 3691                                     (intmax_t)bp->b_blkno,
 3692                                     (intmax_t)bp->b_lblkno);
 3693                         }
 3694                         bp->b_pages[index] = NULL;
 3695                         pmap_qremove(pg, 1);
 3696                         vm_page_lock_queues();
 3697                         vm_page_unwire(p, 0);
 3698                         vm_page_free(p);
 3699                         vm_page_unlock_queues();
 3700                 }
 3701         }
 3702         VM_OBJECT_UNLOCK(kernel_object);
 3703         bp->b_npages = newnpages;
 3704 }
 3705 
 3706 /*
 3707  * Map an IO request into kernel virtual address space.
 3708  *
 3709  * All requests are (re)mapped into kernel VA space.
 3710  * Notice that we use b_bufsize for the size of the buffer
 3711  * to be mapped.  b_bcount might be modified by the driver.
 3712  *
 3713  * Note that even if the caller determines that the address space should
 3714  * be valid, a race or a smaller-file mapped into a larger space may
 3715  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
 3716  * check the return value.
 3717  */
 3718 int
 3719 vmapbuf(struct buf *bp)
 3720 {
 3721         caddr_t addr, kva;
 3722         vm_prot_t prot;
 3723         int pidx, i;
 3724         struct vm_page *m;
 3725         struct pmap *pmap = &curproc->p_vmspace->vm_pmap;
 3726 
 3727         if (bp->b_bufsize < 0)
 3728                 return (-1);
 3729         prot = VM_PROT_READ;
 3730         if (bp->b_iocmd == BIO_READ)
 3731                 prot |= VM_PROT_WRITE;  /* Less backwards than it looks */
 3732         for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
 3733              addr < bp->b_data + bp->b_bufsize;
 3734              addr += PAGE_SIZE, pidx++) {
 3735                 /*
 3736                  * Do the vm_fault if needed; do the copy-on-write thing
 3737                  * when reading stuff off device into memory.
 3738                  *
 3739                  * NOTE! Must use pmap_extract() because addr may be in
 3740                  * the userland address space, and kextract is only guarenteed
 3741                  * to work for the kernland address space (see: sparc64 port).
 3742                  */
 3743 retry:
 3744                 if (vm_fault_quick(addr >= bp->b_data ? addr : bp->b_data,
 3745                     prot) < 0) {
 3746                         vm_page_lock_queues();
 3747                         for (i = 0; i < pidx; ++i) {
 3748                                 vm_page_unhold(bp->b_pages[i]);
 3749                                 bp->b_pages[i] = NULL;
 3750                         }
 3751                         vm_page_unlock_queues();
 3752                         return(-1);
 3753                 }
 3754                 m = pmap_extract_and_hold(pmap, (vm_offset_t)addr, prot);
 3755                 if (m == NULL)
 3756                         goto retry;
 3757                 bp->b_pages[pidx] = m;
 3758         }
 3759         if (pidx > btoc(MAXPHYS))
 3760                 panic("vmapbuf: mapped more than MAXPHYS");
 3761         pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
 3762         
 3763         kva = bp->b_saveaddr;
 3764         bp->b_npages = pidx;
 3765         bp->b_saveaddr = bp->b_data;
 3766         bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
 3767         return(0);
 3768 }
 3769 
 3770 /*
 3771  * Free the io map PTEs associated with this IO operation.
 3772  * We also invalidate the TLB entries and restore the original b_addr.
 3773  */
 3774 void
 3775 vunmapbuf(struct buf *bp)
 3776 {
 3777         int pidx;
 3778         int npages;
 3779 
 3780         npages = bp->b_npages;
 3781         pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
 3782         vm_page_lock_queues();
 3783         for (pidx = 0; pidx < npages; pidx++)
 3784                 vm_page_unhold(bp->b_pages[pidx]);
 3785         vm_page_unlock_queues();
 3786 
 3787         bp->b_data = bp->b_saveaddr;
 3788 }
 3789 
 3790 void
 3791 bdone(struct buf *bp)
 3792 {
 3793 
 3794         mtx_lock(&bdonelock);
 3795         bp->b_flags |= B_DONE;
 3796         wakeup(bp);
 3797         mtx_unlock(&bdonelock);
 3798 }
 3799 
 3800 void
 3801 bwait(struct buf *bp, u_char pri, const char *wchan)
 3802 {
 3803 
 3804         mtx_lock(&bdonelock);
 3805         while ((bp->b_flags & B_DONE) == 0)
 3806                 msleep(bp, &bdonelock, pri, wchan, 0);
 3807         mtx_unlock(&bdonelock);
 3808 }
 3809 
 3810 int
 3811 bufsync(struct bufobj *bo, int waitfor, struct thread *td)
 3812 {
 3813 
 3814         return (VOP_FSYNC(bo->__bo_vnode, waitfor, td));
 3815 }
 3816 
 3817 void
 3818 bufstrategy(struct bufobj *bo, struct buf *bp)
 3819 {
 3820         int i = 0;
 3821         struct vnode *vp;
 3822 
 3823         vp = bp->b_vp;
 3824         KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
 3825         KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
 3826             ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
 3827         i = VOP_STRATEGY(vp, bp);
 3828         KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
 3829 }
 3830 
 3831 void
 3832 bufobj_wrefl(struct bufobj *bo)
 3833 {
 3834 
 3835         KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
 3836         ASSERT_BO_LOCKED(bo);
 3837         bo->bo_numoutput++;
 3838 }
 3839 
 3840 void
 3841 bufobj_wref(struct bufobj *bo)
 3842 {
 3843 
 3844         KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
 3845         BO_LOCK(bo);
 3846         bo->bo_numoutput++;
 3847         BO_UNLOCK(bo);
 3848 }
 3849 
 3850 void
 3851 bufobj_wdrop(struct bufobj *bo)
 3852 {
 3853 
 3854         KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
 3855         BO_LOCK(bo);
 3856         KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
 3857         if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
 3858                 bo->bo_flag &= ~BO_WWAIT;
 3859                 wakeup(&bo->bo_numoutput);
 3860         }
 3861         BO_UNLOCK(bo);
 3862 }
 3863 
 3864 int
 3865 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
 3866 {
 3867         int error;
 3868 
 3869         KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
 3870         ASSERT_BO_LOCKED(bo);
 3871         error = 0;
 3872         while (bo->bo_numoutput) {
 3873                 bo->bo_flag |= BO_WWAIT;
 3874                 error = msleep(&bo->bo_numoutput, BO_MTX(bo),
 3875                     slpflag | (PRIBIO + 1), "bo_wwait", timeo);
 3876                 if (error)
 3877                         break;
 3878         }
 3879         return (error);
 3880 }
 3881 
 3882 void
 3883 bpin(struct buf *bp)
 3884 {
 3885         mtx_lock(&bpinlock);
 3886         bp->b_pin_count++;
 3887         mtx_unlock(&bpinlock);
 3888 }
 3889 
 3890 void
 3891 bunpin(struct buf *bp)
 3892 {
 3893         mtx_lock(&bpinlock);
 3894         if (--bp->b_pin_count == 0)
 3895                 wakeup(bp);
 3896         mtx_unlock(&bpinlock);
 3897 }
 3898 
 3899 void
 3900 bunpin_wait(struct buf *bp)
 3901 {
 3902         mtx_lock(&bpinlock);
 3903         while (bp->b_pin_count > 0)
 3904                 msleep(bp, &bpinlock, PRIBIO, "bwunpin", 0);
 3905         mtx_unlock(&bpinlock);
 3906 }
 3907 
 3908 #include "opt_ddb.h"
 3909 #ifdef DDB
 3910 #include <ddb/ddb.h>
 3911 
 3912 /* DDB command to show buffer data */
 3913 DB_SHOW_COMMAND(buffer, db_show_buffer)
 3914 {
 3915         /* get args */
 3916         struct buf *bp = (struct buf *)addr;
 3917 
 3918         if (!have_addr) {
 3919                 db_printf("usage: show buffer <addr>\n");
 3920                 return;
 3921         }
 3922 
 3923         db_printf("buf at %p\n", bp);
 3924         db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
 3925         db_printf(
 3926             "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
 3927             "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_dep = %p\n",
 3928             bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
 3929             bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
 3930             bp->b_dep.lh_first);
 3931         if (bp->b_npages) {
 3932                 int i;
 3933                 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
 3934                 for (i = 0; i < bp->b_npages; i++) {
 3935                         vm_page_t m;
 3936                         m = bp->b_pages[i];
 3937                         db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
 3938                             (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
 3939                         if ((i + 1) < bp->b_npages)
 3940                                 db_printf(",");
 3941                 }
 3942                 db_printf("\n");
 3943         }
 3944         lockmgr_printinfo(&bp->b_lock);
 3945 }
 3946 
 3947 DB_SHOW_COMMAND(lockedbufs, lockedbufs)
 3948 {
 3949         struct buf *bp;
 3950         int i;
 3951 
 3952         for (i = 0; i < nbuf; i++) {
 3953                 bp = &buf[i];
 3954                 if (lockcount(&bp->b_lock)) {
 3955                         db_show_buffer((uintptr_t)bp, 1, 0, NULL);
 3956                         db_printf("\n");
 3957                 }
 3958         }
 3959 }
 3960 
 3961 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
 3962 {
 3963         struct vnode *vp;
 3964         struct buf *bp;
 3965 
 3966         if (!have_addr) {
 3967                 db_printf("usage: show vnodebufs <addr>\n");
 3968                 return;
 3969         }
 3970         vp = (struct vnode *)addr;
 3971         db_printf("Clean buffers:\n");
 3972         TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
 3973                 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
 3974                 db_printf("\n");
 3975         }
 3976         db_printf("Dirty buffers:\n");
 3977         TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
 3978                 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
 3979                 db_printf("\n");
 3980         }
 3981 }
 3982 #endif /* DDB */

Cache object: c632b2dba9ddffc6d138791f0d815b55


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.