The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_bio.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2004 Poul-Henning Kamp
    3  * Copyright (c) 1994,1997 John S. Dyson
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 /*
   29  * this file contains a new buffer I/O scheme implementing a coherent
   30  * VM object and buffer cache scheme.  Pains have been taken to make
   31  * sure that the performance degradation associated with schemes such
   32  * as this is not realized.
   33  *
   34  * Author:  John S. Dyson
   35  * Significant help during the development and debugging phases
   36  * had been provided by David Greenman, also of the FreeBSD core team.
   37  *
   38  * see man buf(9) for more info.
   39  */
   40 
   41 #include <sys/cdefs.h>
   42 __FBSDID("$FreeBSD: releng/6.0/sys/kern/vfs_bio.c 151853 2005-10-29 07:00:45Z scottl $");
   43 
   44 #include <sys/param.h>
   45 #include <sys/systm.h>
   46 #include <sys/bio.h>
   47 #include <sys/conf.h>
   48 #include <sys/buf.h>
   49 #include <sys/devicestat.h>
   50 #include <sys/eventhandler.h>
   51 #include <sys/lock.h>
   52 #include <sys/malloc.h>
   53 #include <sys/mount.h>
   54 #include <sys/mutex.h>
   55 #include <sys/kernel.h>
   56 #include <sys/kthread.h>
   57 #include <sys/proc.h>
   58 #include <sys/resourcevar.h>
   59 #include <sys/sysctl.h>
   60 #include <sys/vmmeter.h>
   61 #include <sys/vnode.h>
   62 #include <geom/geom.h>
   63 #include <vm/vm.h>
   64 #include <vm/vm_param.h>
   65 #include <vm/vm_kern.h>
   66 #include <vm/vm_pageout.h>
   67 #include <vm/vm_page.h>
   68 #include <vm/vm_object.h>
   69 #include <vm/vm_extern.h>
   70 #include <vm/vm_map.h>
   71 #include "opt_directio.h"
   72 #include "opt_swap.h"
   73 
   74 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
   75 
   76 struct  bio_ops bioops;         /* I/O operation notification */
   77 
   78 struct  buf_ops buf_ops_bio = {
   79         .bop_name       =       "buf_ops_bio",
   80         .bop_write      =       bufwrite,
   81         .bop_strategy   =       bufstrategy,
   82         .bop_sync       =       bufsync,
   83 };
   84 
   85 /*
   86  * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
   87  * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
   88  */
   89 struct buf *buf;                /* buffer header pool */
   90 
   91 static struct proc *bufdaemonproc;
   92 
   93 static int inmem(struct vnode *vp, daddr_t blkno);
   94 static void vm_hold_free_pages(struct buf *bp, vm_offset_t from,
   95                 vm_offset_t to);
   96 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
   97                 vm_offset_t to);
   98 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
   99                                int pageno, vm_page_t m);
  100 static void vfs_clean_pages(struct buf *bp);
  101 static void vfs_setdirty(struct buf *bp);
  102 static void vfs_vmio_release(struct buf *bp);
  103 static int vfs_bio_clcheck(struct vnode *vp, int size,
  104                 daddr_t lblkno, daddr_t blkno);
  105 static int flushbufqueues(int flushdeps);
  106 static void buf_daemon(void);
  107 static void bremfreel(struct buf *bp);
  108 
  109 int vmiodirenable = TRUE;
  110 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
  111     "Use the VM system for directory writes");
  112 int runningbufspace;
  113 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
  114     "Amount of presently outstanding async buffer io");
  115 static int bufspace;
  116 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
  117     "KVA memory used for bufs");
  118 static int maxbufspace;
  119 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
  120     "Maximum allowed value of bufspace (including buf_daemon)");
  121 static int bufmallocspace;
  122 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
  123     "Amount of malloced memory for buffers");
  124 static int maxbufmallocspace;
  125 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
  126     "Maximum amount of malloced memory for buffers");
  127 static int lobufspace;
  128 SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
  129     "Minimum amount of buffers we want to have");
  130 int hibufspace;
  131 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
  132     "Maximum allowed value of bufspace (excluding buf_daemon)");
  133 static int bufreusecnt;
  134 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
  135     "Number of times we have reused a buffer");
  136 static int buffreekvacnt;
  137 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
  138     "Number of times we have freed the KVA space from some buffer");
  139 static int bufdefragcnt;
  140 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
  141     "Number of times we have had to repeat buffer allocation to defragment");
  142 static int lorunningspace;
  143 SYSCTL_INT(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
  144     "Minimum preferred space used for in-progress I/O");
  145 static int hirunningspace;
  146 SYSCTL_INT(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
  147     "Maximum amount of space to use for in-progress I/O");
  148 static int dirtybufferflushes;
  149 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
  150     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
  151 static int altbufferflushes;
  152 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
  153     0, "Number of fsync flushes to limit dirty buffers");
  154 static int recursiveflushes;
  155 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
  156     0, "Number of flushes skipped due to being recursive");
  157 static int numdirtybuffers;
  158 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
  159     "Number of buffers that are dirty (has unwritten changes) at the moment");
  160 static int lodirtybuffers;
  161 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
  162     "How many buffers we want to have free before bufdaemon can sleep");
  163 static int hidirtybuffers;
  164 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
  165     "When the number of dirty buffers is considered severe");
  166 static int dirtybufthresh;
  167 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
  168     0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
  169 static int numfreebuffers;
  170 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
  171     "Number of free buffers");
  172 static int lofreebuffers;
  173 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
  174    "XXX Unused");
  175 static int hifreebuffers;
  176 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
  177    "XXX Complicatedly unused");
  178 static int getnewbufcalls;
  179 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
  180    "Number of calls to getnewbuf");
  181 static int getnewbufrestarts;
  182 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
  183     "Number of times getnewbuf has had to restart a buffer aquisition");
  184 
  185 /*
  186  * Wakeup point for bufdaemon, as well as indicator of whether it is already
  187  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
  188  * is idling.
  189  */
  190 static int bd_request;
  191 
  192 /*
  193  * This lock synchronizes access to bd_request.
  194  */
  195 static struct mtx bdlock;
  196 
  197 /*
  198  * bogus page -- for I/O to/from partially complete buffers
  199  * this is a temporary solution to the problem, but it is not
  200  * really that bad.  it would be better to split the buffer
  201  * for input in the case of buffers partially already in memory,
  202  * but the code is intricate enough already.
  203  */
  204 vm_page_t bogus_page;
  205 
  206 /*
  207  * Synchronization (sleep/wakeup) variable for active buffer space requests.
  208  * Set when wait starts, cleared prior to wakeup().
  209  * Used in runningbufwakeup() and waitrunningbufspace().
  210  */
  211 static int runningbufreq;
  212 
  213 /*
  214  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
  215  * waitrunningbufspace().
  216  */
  217 static struct mtx rbreqlock;
  218 
  219 /* 
  220  * Synchronization (sleep/wakeup) variable for buffer requests.
  221  * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
  222  * by and/or.
  223  * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
  224  * getnewbuf(), and getblk().
  225  */
  226 static int needsbuffer;
  227 
  228 /*
  229  * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
  230  */
  231 static struct mtx nblock;
  232 
  233 /*
  234  * Lock that protects against bwait()/bdone()/B_DONE races.
  235  */
  236 
  237 static struct mtx bdonelock;
  238 
  239 /*
  240  * Definitions for the buffer free lists.
  241  */
  242 #define BUFFER_QUEUES   5       /* number of free buffer queues */
  243 
  244 #define QUEUE_NONE      0       /* on no queue */
  245 #define QUEUE_CLEAN     1       /* non-B_DELWRI buffers */
  246 #define QUEUE_DIRTY     2       /* B_DELWRI buffers */
  247 #define QUEUE_EMPTYKVA  3       /* empty buffer headers w/KVA assignment */
  248 #define QUEUE_EMPTY     4       /* empty buffer headers */
  249 
  250 /* Queues for free buffers with various properties */
  251 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
  252 
  253 /* Lock for the bufqueues */
  254 static struct mtx bqlock;
  255 
  256 /*
  257  * Single global constant for BUF_WMESG, to avoid getting multiple references.
  258  * buf_wmesg is referred from macros.
  259  */
  260 const char *buf_wmesg = BUF_WMESG;
  261 
  262 #define VFS_BIO_NEED_ANY        0x01    /* any freeable buffer */
  263 #define VFS_BIO_NEED_DIRTYFLUSH 0x02    /* waiting for dirty buffer flush */
  264 #define VFS_BIO_NEED_FREE       0x04    /* wait for free bufs, hi hysteresis */
  265 #define VFS_BIO_NEED_BUFSPACE   0x08    /* wait for buf space, lo hysteresis */
  266 
  267 #ifdef DIRECTIO
  268 extern void ffs_rawread_setup(void);
  269 #endif /* DIRECTIO */
  270 /*
  271  *      numdirtywakeup:
  272  *
  273  *      If someone is blocked due to there being too many dirty buffers,
  274  *      and numdirtybuffers is now reasonable, wake them up.
  275  */
  276 
  277 static __inline void
  278 numdirtywakeup(int level)
  279 {
  280 
  281         if (numdirtybuffers <= level) {
  282                 mtx_lock(&nblock);
  283                 if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
  284                         needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
  285                         wakeup(&needsbuffer);
  286                 }
  287                 mtx_unlock(&nblock);
  288         }
  289 }
  290 
  291 /*
  292  *      bufspacewakeup:
  293  *
  294  *      Called when buffer space is potentially available for recovery.
  295  *      getnewbuf() will block on this flag when it is unable to free 
  296  *      sufficient buffer space.  Buffer space becomes recoverable when 
  297  *      bp's get placed back in the queues.
  298  */
  299 
  300 static __inline void
  301 bufspacewakeup(void)
  302 {
  303 
  304         /*
  305          * If someone is waiting for BUF space, wake them up.  Even
  306          * though we haven't freed the kva space yet, the waiting
  307          * process will be able to now.
  308          */
  309         mtx_lock(&nblock);
  310         if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
  311                 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
  312                 wakeup(&needsbuffer);
  313         }
  314         mtx_unlock(&nblock);
  315 }
  316 
  317 /*
  318  * runningbufwakeup() - in-progress I/O accounting.
  319  *
  320  */
  321 void
  322 runningbufwakeup(struct buf *bp)
  323 {
  324 
  325         if (bp->b_runningbufspace) {
  326                 atomic_subtract_int(&runningbufspace, bp->b_runningbufspace);
  327                 bp->b_runningbufspace = 0;
  328                 mtx_lock(&rbreqlock);
  329                 if (runningbufreq && runningbufspace <= lorunningspace) {
  330                         runningbufreq = 0;
  331                         wakeup(&runningbufreq);
  332                 }
  333                 mtx_unlock(&rbreqlock);
  334         }
  335 }
  336 
  337 /*
  338  *      bufcountwakeup:
  339  *
  340  *      Called when a buffer has been added to one of the free queues to
  341  *      account for the buffer and to wakeup anyone waiting for free buffers.
  342  *      This typically occurs when large amounts of metadata are being handled
  343  *      by the buffer cache ( else buffer space runs out first, usually ).
  344  */
  345 
  346 static __inline void
  347 bufcountwakeup(void) 
  348 {
  349 
  350         atomic_add_int(&numfreebuffers, 1);
  351         mtx_lock(&nblock);
  352         if (needsbuffer) {
  353                 needsbuffer &= ~VFS_BIO_NEED_ANY;
  354                 if (numfreebuffers >= hifreebuffers)
  355                         needsbuffer &= ~VFS_BIO_NEED_FREE;
  356                 wakeup(&needsbuffer);
  357         }
  358         mtx_unlock(&nblock);
  359 }
  360 
  361 /*
  362  *      waitrunningbufspace()
  363  *
  364  *      runningbufspace is a measure of the amount of I/O currently
  365  *      running.  This routine is used in async-write situations to
  366  *      prevent creating huge backups of pending writes to a device.
  367  *      Only asynchronous writes are governed by this function.
  368  *
  369  *      Reads will adjust runningbufspace, but will not block based on it.
  370  *      The read load has a side effect of reducing the allowed write load.
  371  *
  372  *      This does NOT turn an async write into a sync write.  It waits  
  373  *      for earlier writes to complete and generally returns before the
  374  *      caller's write has reached the device.
  375  */
  376 void
  377 waitrunningbufspace(void)
  378 {
  379 
  380         mtx_lock(&rbreqlock);
  381         while (runningbufspace > hirunningspace) {
  382                 ++runningbufreq;
  383                 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
  384         }
  385         mtx_unlock(&rbreqlock);
  386 }
  387 
  388 
  389 /*
  390  *      vfs_buf_test_cache:
  391  *
  392  *      Called when a buffer is extended.  This function clears the B_CACHE
  393  *      bit if the newly extended portion of the buffer does not contain
  394  *      valid data.
  395  */
  396 static __inline
  397 void
  398 vfs_buf_test_cache(struct buf *bp,
  399                   vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
  400                   vm_page_t m)
  401 {
  402 
  403         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  404         if (bp->b_flags & B_CACHE) {
  405                 int base = (foff + off) & PAGE_MASK;
  406                 if (vm_page_is_valid(m, base, size) == 0)
  407                         bp->b_flags &= ~B_CACHE;
  408         }
  409 }
  410 
  411 /* Wake up the buffer deamon if necessary */
  412 static __inline
  413 void
  414 bd_wakeup(int dirtybuflevel)
  415 {
  416 
  417         mtx_lock(&bdlock);
  418         if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
  419                 bd_request = 1;
  420                 wakeup(&bd_request);
  421         }
  422         mtx_unlock(&bdlock);
  423 }
  424 
  425 /*
  426  * bd_speedup - speedup the buffer cache flushing code
  427  */
  428 
  429 static __inline
  430 void
  431 bd_speedup(void)
  432 {
  433 
  434         bd_wakeup(1);
  435 }
  436 
  437 /*
  438  * Calculating buffer cache scaling values and reserve space for buffer
  439  * headers.  This is called during low level kernel initialization and
  440  * may be called more then once.  We CANNOT write to the memory area
  441  * being reserved at this time.
  442  */
  443 caddr_t
  444 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
  445 {
  446 
  447         /*
  448          * physmem_est is in pages.  Convert it to kilobytes (assumes
  449          * PAGE_SIZE is >= 1K)
  450          */
  451         physmem_est = physmem_est * (PAGE_SIZE / 1024);
  452 
  453         /*
  454          * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
  455          * For the first 64MB of ram nominally allocate sufficient buffers to
  456          * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
  457          * buffers to cover 1/20 of our ram over 64MB.  When auto-sizing
  458          * the buffer cache we limit the eventual kva reservation to
  459          * maxbcache bytes.
  460          *
  461          * factor represents the 1/4 x ram conversion.
  462          */
  463         if (nbuf == 0) {
  464                 int factor = 4 * BKVASIZE / 1024;
  465 
  466                 nbuf = 50;
  467                 if (physmem_est > 4096)
  468                         nbuf += min((physmem_est - 4096) / factor,
  469                             65536 / factor);
  470                 if (physmem_est > 65536)
  471                         nbuf += (physmem_est - 65536) * 2 / (factor * 5);
  472 
  473                 if (maxbcache && nbuf > maxbcache / BKVASIZE)
  474                         nbuf = maxbcache / BKVASIZE;
  475         }
  476 
  477 #if 0
  478         /*
  479          * Do not allow the buffer_map to be more then 1/2 the size of the
  480          * kernel_map.
  481          */
  482         if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 
  483             (BKVASIZE * 2)) {
  484                 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 
  485                     (BKVASIZE * 2);
  486                 printf("Warning: nbufs capped at %d\n", nbuf);
  487         }
  488 #endif
  489 
  490         /*
  491          * swbufs are used as temporary holders for I/O, such as paging I/O.
  492          * We have no less then 16 and no more then 256.
  493          */
  494         nswbuf = max(min(nbuf/4, 256), 16);
  495 #ifdef NSWBUF_MIN
  496         if (nswbuf < NSWBUF_MIN)
  497                 nswbuf = NSWBUF_MIN;
  498 #endif
  499 #ifdef DIRECTIO
  500         ffs_rawread_setup();
  501 #endif
  502 
  503         /*
  504          * Reserve space for the buffer cache buffers
  505          */
  506         swbuf = (void *)v;
  507         v = (caddr_t)(swbuf + nswbuf);
  508         buf = (void *)v;
  509         v = (caddr_t)(buf + nbuf);
  510 
  511         return(v);
  512 }
  513 
  514 /* Initialize the buffer subsystem.  Called before use of any buffers. */
  515 void
  516 bufinit(void)
  517 {
  518         struct buf *bp;
  519         int i;
  520 
  521         mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF);
  522         mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
  523         mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
  524         mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
  525         mtx_init(&bdonelock, "bdone lock", NULL, MTX_DEF);
  526 
  527         /* next, make a null set of free lists */
  528         for (i = 0; i < BUFFER_QUEUES; i++)
  529                 TAILQ_INIT(&bufqueues[i]);
  530 
  531         /* finally, initialize each buffer header and stick on empty q */
  532         for (i = 0; i < nbuf; i++) {
  533                 bp = &buf[i];
  534                 bzero(bp, sizeof *bp);
  535                 bp->b_flags = B_INVAL;  /* we're just an empty header */
  536                 bp->b_rcred = NOCRED;
  537                 bp->b_wcred = NOCRED;
  538                 bp->b_qindex = QUEUE_EMPTY;
  539                 bp->b_vflags = 0;
  540                 bp->b_xflags = 0;
  541                 LIST_INIT(&bp->b_dep);
  542                 BUF_LOCKINIT(bp);
  543                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
  544         }
  545 
  546         /*
  547          * maxbufspace is the absolute maximum amount of buffer space we are 
  548          * allowed to reserve in KVM and in real terms.  The absolute maximum
  549          * is nominally used by buf_daemon.  hibufspace is the nominal maximum
  550          * used by most other processes.  The differential is required to 
  551          * ensure that buf_daemon is able to run when other processes might 
  552          * be blocked waiting for buffer space.
  553          *
  554          * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
  555          * this may result in KVM fragmentation which is not handled optimally
  556          * by the system.
  557          */
  558         maxbufspace = nbuf * BKVASIZE;
  559         hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
  560         lobufspace = hibufspace - MAXBSIZE;
  561 
  562         lorunningspace = 512 * 1024;
  563         hirunningspace = 1024 * 1024;
  564 
  565 /*
  566  * Limit the amount of malloc memory since it is wired permanently into
  567  * the kernel space.  Even though this is accounted for in the buffer
  568  * allocation, we don't want the malloced region to grow uncontrolled.
  569  * The malloc scheme improves memory utilization significantly on average
  570  * (small) directories.
  571  */
  572         maxbufmallocspace = hibufspace / 20;
  573 
  574 /*
  575  * Reduce the chance of a deadlock occuring by limiting the number
  576  * of delayed-write dirty buffers we allow to stack up.
  577  */
  578         hidirtybuffers = nbuf / 4 + 20;
  579         dirtybufthresh = hidirtybuffers * 9 / 10;
  580         numdirtybuffers = 0;
  581 /*
  582  * To support extreme low-memory systems, make sure hidirtybuffers cannot
  583  * eat up all available buffer space.  This occurs when our minimum cannot
  584  * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
  585  * BKVASIZE'd (8K) buffers.
  586  */
  587         while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
  588                 hidirtybuffers >>= 1;
  589         }
  590         lodirtybuffers = hidirtybuffers / 2;
  591 
  592 /*
  593  * Try to keep the number of free buffers in the specified range,
  594  * and give special processes (e.g. like buf_daemon) access to an 
  595  * emergency reserve.
  596  */
  597         lofreebuffers = nbuf / 18 + 5;
  598         hifreebuffers = 2 * lofreebuffers;
  599         numfreebuffers = nbuf;
  600 
  601 /*
  602  * Maximum number of async ops initiated per buf_daemon loop.  This is
  603  * somewhat of a hack at the moment, we really need to limit ourselves
  604  * based on the number of bytes of I/O in-transit that were initiated
  605  * from buf_daemon.
  606  */
  607 
  608         bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
  609             VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
  610 }
  611 
  612 /*
  613  * bfreekva() - free the kva allocation for a buffer.
  614  *
  615  *      Since this call frees up buffer space, we call bufspacewakeup().
  616  */
  617 static void
  618 bfreekva(struct buf *bp)
  619 {
  620 
  621         if (bp->b_kvasize) {
  622                 atomic_add_int(&buffreekvacnt, 1);
  623                 atomic_subtract_int(&bufspace, bp->b_kvasize);
  624                 vm_map_lock(buffer_map);
  625                 vm_map_delete(buffer_map,
  626                     (vm_offset_t) bp->b_kvabase,
  627                     (vm_offset_t) bp->b_kvabase + bp->b_kvasize
  628                 );
  629                 vm_map_unlock(buffer_map);
  630                 bp->b_kvasize = 0;
  631                 bufspacewakeup();
  632         }
  633 }
  634 
  635 /*
  636  *      bremfree:
  637  *
  638  *      Mark the buffer for removal from the appropriate free list in brelse.
  639  *      
  640  */
  641 void
  642 bremfree(struct buf *bp)
  643 {
  644 
  645         CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
  646         KASSERT(BUF_REFCNT(bp), ("bremfree: buf must be locked."));
  647         KASSERT((bp->b_flags & B_REMFREE) == 0,
  648             ("bremfree: buffer %p already marked for delayed removal.", bp));
  649         KASSERT(bp->b_qindex != QUEUE_NONE,
  650             ("bremfree: buffer %p not on a queue.", bp));
  651 
  652         bp->b_flags |= B_REMFREE;
  653         /* Fixup numfreebuffers count.  */
  654         if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)
  655                 atomic_subtract_int(&numfreebuffers, 1);
  656 }
  657 
  658 /*
  659  *      bremfreef:
  660  *
  661  *      Force an immediate removal from a free list.  Used only in nfs when
  662  *      it abuses the b_freelist pointer.
  663  */
  664 void
  665 bremfreef(struct buf *bp)
  666 {
  667         mtx_lock(&bqlock);
  668         bremfreel(bp);
  669         mtx_unlock(&bqlock);
  670 }
  671 
  672 /*
  673  *      bremfreel:
  674  *
  675  *      Removes a buffer from the free list, must be called with the
  676  *      bqlock held.
  677  */
  678 static void
  679 bremfreel(struct buf *bp)
  680 {
  681         CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
  682             bp, bp->b_vp, bp->b_flags);
  683         KASSERT(BUF_REFCNT(bp), ("bremfreel: buffer %p not locked.", bp));
  684         KASSERT(bp->b_qindex != QUEUE_NONE,
  685             ("bremfreel: buffer %p not on a queue.", bp));
  686         mtx_assert(&bqlock, MA_OWNED);
  687 
  688         TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
  689         bp->b_qindex = QUEUE_NONE;
  690         /*
  691          * If this was a delayed bremfree() we only need to remove the buffer
  692          * from the queue and return the stats are already done.
  693          */
  694         if (bp->b_flags & B_REMFREE) {
  695                 bp->b_flags &= ~B_REMFREE;
  696                 return;
  697         }
  698         /*
  699          * Fixup numfreebuffers count.  If the buffer is invalid or not
  700          * delayed-write, the buffer was free and we must decrement
  701          * numfreebuffers.
  702          */
  703         if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)
  704                 atomic_subtract_int(&numfreebuffers, 1);
  705 }
  706 
  707 
  708 /*
  709  * Get a buffer with the specified data.  Look in the cache first.  We
  710  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
  711  * is set, the buffer is valid and we do not have to do anything ( see
  712  * getblk() ).  This is really just a special case of breadn().
  713  */
  714 int
  715 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
  716     struct buf **bpp)
  717 {
  718 
  719         return (breadn(vp, blkno, size, 0, 0, 0, cred, bpp));
  720 }
  721 
  722 /*
  723  * Operates like bread, but also starts asynchronous I/O on
  724  * read-ahead blocks.  We must clear BIO_ERROR and B_INVAL prior
  725  * to initiating I/O . If B_CACHE is set, the buffer is valid 
  726  * and we do not have to do anything.
  727  */
  728 int
  729 breadn(struct vnode * vp, daddr_t blkno, int size,
  730     daddr_t * rablkno, int *rabsize,
  731     int cnt, struct ucred * cred, struct buf **bpp)
  732 {
  733         struct buf *bp, *rabp;
  734         int i;
  735         int rv = 0, readwait = 0;
  736 
  737         CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
  738         *bpp = bp = getblk(vp, blkno, size, 0, 0, 0);
  739 
  740         /* if not found in cache, do some I/O */
  741         if ((bp->b_flags & B_CACHE) == 0) {
  742                 if (curthread != PCPU_GET(idlethread))
  743                         curthread->td_proc->p_stats->p_ru.ru_inblock++;
  744                 bp->b_iocmd = BIO_READ;
  745                 bp->b_flags &= ~B_INVAL;
  746                 bp->b_ioflags &= ~BIO_ERROR;
  747                 if (bp->b_rcred == NOCRED && cred != NOCRED)
  748                         bp->b_rcred = crhold(cred);
  749                 vfs_busy_pages(bp, 0);
  750                 bp->b_iooffset = dbtob(bp->b_blkno);
  751                 bstrategy(bp);
  752                 ++readwait;
  753         }
  754 
  755         for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
  756                 if (inmem(vp, *rablkno))
  757                         continue;
  758                 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
  759 
  760                 if ((rabp->b_flags & B_CACHE) == 0) {
  761                         if (curthread != PCPU_GET(idlethread))
  762                                 curthread->td_proc->p_stats->p_ru.ru_inblock++;
  763                         rabp->b_flags |= B_ASYNC;
  764                         rabp->b_flags &= ~B_INVAL;
  765                         rabp->b_ioflags &= ~BIO_ERROR;
  766                         rabp->b_iocmd = BIO_READ;
  767                         if (rabp->b_rcred == NOCRED && cred != NOCRED)
  768                                 rabp->b_rcred = crhold(cred);
  769                         vfs_busy_pages(rabp, 0);
  770                         BUF_KERNPROC(rabp);
  771                         rabp->b_iooffset = dbtob(rabp->b_blkno);
  772                         bstrategy(rabp);
  773                 } else {
  774                         brelse(rabp);
  775                 }
  776         }
  777 
  778         if (readwait) {
  779                 rv = bufwait(bp);
  780         }
  781         return (rv);
  782 }
  783 
  784 /*
  785  * Write, release buffer on completion.  (Done by iodone
  786  * if async).  Do not bother writing anything if the buffer
  787  * is invalid.
  788  *
  789  * Note that we set B_CACHE here, indicating that buffer is
  790  * fully valid and thus cacheable.  This is true even of NFS
  791  * now so we set it generally.  This could be set either here 
  792  * or in biodone() since the I/O is synchronous.  We put it
  793  * here.
  794  */
  795 int
  796 bufwrite(struct buf *bp)
  797 {
  798         int oldflags;
  799 
  800         CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
  801         if (bp->b_flags & B_INVAL) {
  802                 brelse(bp);
  803                 return (0);
  804         }
  805 
  806         oldflags = bp->b_flags;
  807 
  808         if (BUF_REFCNT(bp) == 0)
  809                 panic("bufwrite: buffer is not busy???");
  810         KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
  811             ("FFS background buffer should not get here %p", bp));
  812 
  813         /* Mark the buffer clean */
  814         bundirty(bp);
  815 
  816         bp->b_flags &= ~B_DONE;
  817         bp->b_ioflags &= ~BIO_ERROR;
  818         bp->b_flags |= B_CACHE;
  819         bp->b_iocmd = BIO_WRITE;
  820 
  821         bufobj_wref(bp->b_bufobj);
  822         vfs_busy_pages(bp, 1);
  823 
  824         /*
  825          * Normal bwrites pipeline writes
  826          */
  827         bp->b_runningbufspace = bp->b_bufsize;
  828         atomic_add_int(&runningbufspace, bp->b_runningbufspace);
  829 
  830         if (curthread != PCPU_GET(idlethread))
  831                 curthread->td_proc->p_stats->p_ru.ru_oublock++;
  832         if (oldflags & B_ASYNC)
  833                 BUF_KERNPROC(bp);
  834         bp->b_iooffset = dbtob(bp->b_blkno);
  835         bstrategy(bp);
  836 
  837         if ((oldflags & B_ASYNC) == 0) {
  838                 int rtval = bufwait(bp);
  839                 brelse(bp);
  840                 return (rtval);
  841         } else {
  842                 /*
  843                  * don't allow the async write to saturate the I/O
  844                  * system.  We will not deadlock here because
  845                  * we are blocking waiting for I/O that is already in-progress
  846                  * to complete. We do not block here if it is the update
  847                  * or syncer daemon trying to clean up as that can lead
  848                  * to deadlock.
  849                  */
  850                 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0)
  851                         waitrunningbufspace();
  852         }
  853 
  854         return (0);
  855 }
  856 
  857 /*
  858  * Delayed write. (Buffer is marked dirty).  Do not bother writing
  859  * anything if the buffer is marked invalid.
  860  *
  861  * Note that since the buffer must be completely valid, we can safely
  862  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
  863  * biodone() in order to prevent getblk from writing the buffer
  864  * out synchronously.
  865  */
  866 void
  867 bdwrite(struct buf *bp)
  868 {
  869         struct thread *td = curthread;
  870         struct vnode *vp;
  871         struct buf *nbp;
  872         struct bufobj *bo;
  873 
  874         CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
  875         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
  876         KASSERT(BUF_REFCNT(bp) != 0, ("bdwrite: buffer is not busy"));
  877 
  878         if (bp->b_flags & B_INVAL) {
  879                 brelse(bp);
  880                 return;
  881         }
  882 
  883         /*
  884          * If we have too many dirty buffers, don't create any more.
  885          * If we are wildly over our limit, then force a complete
  886          * cleanup. Otherwise, just keep the situation from getting
  887          * out of control. Note that we have to avoid a recursive
  888          * disaster and not try to clean up after our own cleanup!
  889          */
  890         vp = bp->b_vp;
  891         bo = bp->b_bufobj;
  892         if ((td->td_pflags & TDP_COWINPROGRESS) == 0) {
  893                 BO_LOCK(bo);
  894                 if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
  895                         BO_UNLOCK(bo);
  896                         (void) VOP_FSYNC(vp, MNT_NOWAIT, td);
  897                         altbufferflushes++;
  898                 } else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
  899                         /*
  900                          * Try to find a buffer to flush.
  901                          */
  902                         TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
  903                                 if ((nbp->b_vflags & BV_BKGRDINPROG) ||
  904                                     BUF_LOCK(nbp,
  905                                     LK_EXCLUSIVE | LK_NOWAIT, NULL))
  906                                         continue;
  907                                 if (bp == nbp)
  908                                         panic("bdwrite: found ourselves");
  909                                 BO_UNLOCK(bo);
  910                                 /* Don't countdeps with the bo lock held. */
  911                                 if (buf_countdeps(nbp, 0)) {
  912                                         BO_LOCK(bo);
  913                                         BUF_UNLOCK(nbp);
  914                                         continue;
  915                                 }
  916                                 if (nbp->b_flags & B_CLUSTEROK) {
  917                                         vfs_bio_awrite(nbp);
  918                                 } else {
  919                                         bremfree(nbp);
  920                                         bawrite(nbp);
  921                                 }
  922                                 dirtybufferflushes++;
  923                                 break;
  924                         }
  925                         if (nbp == NULL)
  926                                 BO_UNLOCK(bo);
  927                 } else
  928                         BO_UNLOCK(bo);
  929         } else
  930                 recursiveflushes++;
  931 
  932         bdirty(bp);
  933         /*
  934          * Set B_CACHE, indicating that the buffer is fully valid.  This is
  935          * true even of NFS now.
  936          */
  937         bp->b_flags |= B_CACHE;
  938 
  939         /*
  940          * This bmap keeps the system from needing to do the bmap later,
  941          * perhaps when the system is attempting to do a sync.  Since it
  942          * is likely that the indirect block -- or whatever other datastructure
  943          * that the filesystem needs is still in memory now, it is a good
  944          * thing to do this.  Note also, that if the pageout daemon is
  945          * requesting a sync -- there might not be enough memory to do
  946          * the bmap then...  So, this is important to do.
  947          */
  948         if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
  949                 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
  950         }
  951 
  952         /*
  953          * Set the *dirty* buffer range based upon the VM system dirty pages.
  954          */
  955         vfs_setdirty(bp);
  956 
  957         /*
  958          * We need to do this here to satisfy the vnode_pager and the
  959          * pageout daemon, so that it thinks that the pages have been
  960          * "cleaned".  Note that since the pages are in a delayed write
  961          * buffer -- the VFS layer "will" see that the pages get written
  962          * out on the next sync, or perhaps the cluster will be completed.
  963          */
  964         vfs_clean_pages(bp);
  965         bqrelse(bp);
  966 
  967         /*
  968          * Wakeup the buffer flushing daemon if we have a lot of dirty
  969          * buffers (midpoint between our recovery point and our stall
  970          * point).
  971          */
  972         bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
  973 
  974         /*
  975          * note: we cannot initiate I/O from a bdwrite even if we wanted to,
  976          * due to the softdep code.
  977          */
  978 }
  979 
  980 /*
  981  *      bdirty:
  982  *
  983  *      Turn buffer into delayed write request.  We must clear BIO_READ and
  984  *      B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to 
  985  *      itself to properly update it in the dirty/clean lists.  We mark it
  986  *      B_DONE to ensure that any asynchronization of the buffer properly
  987  *      clears B_DONE ( else a panic will occur later ).  
  988  *
  989  *      bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
  990  *      might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
  991  *      should only be called if the buffer is known-good.
  992  *
  993  *      Since the buffer is not on a queue, we do not update the numfreebuffers
  994  *      count.
  995  *
  996  *      The buffer must be on QUEUE_NONE.
  997  */
  998 void
  999 bdirty(struct buf *bp)
 1000 {
 1001 
 1002         CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
 1003             bp, bp->b_vp, bp->b_flags);
 1004         KASSERT(BUF_REFCNT(bp) == 1, ("bdirty: bp %p not locked",bp));
 1005         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
 1006         KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
 1007             ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1008         bp->b_flags &= ~(B_RELBUF);
 1009         bp->b_iocmd = BIO_WRITE;
 1010 
 1011         if ((bp->b_flags & B_DELWRI) == 0) {
 1012                 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
 1013                 reassignbuf(bp);
 1014                 atomic_add_int(&numdirtybuffers, 1);
 1015                 bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
 1016         }
 1017 }
 1018 
 1019 /*
 1020  *      bundirty:
 1021  *
 1022  *      Clear B_DELWRI for buffer.
 1023  *
 1024  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 1025  *      count.
 1026  *      
 1027  *      The buffer must be on QUEUE_NONE.
 1028  */
 1029 
 1030 void
 1031 bundirty(struct buf *bp)
 1032 {
 1033 
 1034         CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 1035         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
 1036         KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
 1037             ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1038         KASSERT(BUF_REFCNT(bp) == 1, ("bundirty: bp %p not locked",bp));
 1039 
 1040         if (bp->b_flags & B_DELWRI) {
 1041                 bp->b_flags &= ~B_DELWRI;
 1042                 reassignbuf(bp);
 1043                 atomic_subtract_int(&numdirtybuffers, 1);
 1044                 numdirtywakeup(lodirtybuffers);
 1045         }
 1046         /*
 1047          * Since it is now being written, we can clear its deferred write flag.
 1048          */
 1049         bp->b_flags &= ~B_DEFERRED;
 1050 }
 1051 
 1052 /*
 1053  *      bawrite:
 1054  *
 1055  *      Asynchronous write.  Start output on a buffer, but do not wait for
 1056  *      it to complete.  The buffer is released when the output completes.
 1057  *
 1058  *      bwrite() ( or the VOP routine anyway ) is responsible for handling 
 1059  *      B_INVAL buffers.  Not us.
 1060  */
 1061 void
 1062 bawrite(struct buf *bp)
 1063 {
 1064 
 1065         bp->b_flags |= B_ASYNC;
 1066         (void) bwrite(bp);
 1067 }
 1068 
 1069 /*
 1070  *      bwillwrite:
 1071  *
 1072  *      Called prior to the locking of any vnodes when we are expecting to
 1073  *      write.  We do not want to starve the buffer cache with too many
 1074  *      dirty buffers so we block here.  By blocking prior to the locking
 1075  *      of any vnodes we attempt to avoid the situation where a locked vnode
 1076  *      prevents the various system daemons from flushing related buffers.
 1077  */
 1078 
 1079 void
 1080 bwillwrite(void)
 1081 {
 1082 
 1083         if (numdirtybuffers >= hidirtybuffers) {
 1084                 mtx_lock(&nblock);
 1085                 while (numdirtybuffers >= hidirtybuffers) {
 1086                         bd_wakeup(1);
 1087                         needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
 1088                         msleep(&needsbuffer, &nblock,
 1089                             (PRIBIO + 4), "flswai", 0);
 1090                 }
 1091                 mtx_unlock(&nblock);
 1092         }
 1093 }
 1094 
 1095 /*
 1096  * Return true if we have too many dirty buffers.
 1097  */
 1098 int
 1099 buf_dirty_count_severe(void)
 1100 {
 1101 
 1102         return(numdirtybuffers >= hidirtybuffers);
 1103 }
 1104 
 1105 /*
 1106  *      brelse:
 1107  *
 1108  *      Release a busy buffer and, if requested, free its resources.  The
 1109  *      buffer will be stashed in the appropriate bufqueue[] allowing it
 1110  *      to be accessed later as a cache entity or reused for other purposes.
 1111  */
 1112 void
 1113 brelse(struct buf *bp)
 1114 {
 1115         CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
 1116             bp, bp->b_vp, bp->b_flags);
 1117         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
 1118             ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1119 
 1120         if (bp->b_iocmd == BIO_WRITE &&
 1121             (bp->b_ioflags & BIO_ERROR) &&
 1122             !(bp->b_flags & B_INVAL)) {
 1123                 /*
 1124                  * Failed write, redirty.  Must clear BIO_ERROR to prevent
 1125                  * pages from being scrapped.  If B_INVAL is set then
 1126                  * this case is not run and the next case is run to 
 1127                  * destroy the buffer.  B_INVAL can occur if the buffer
 1128                  * is outside the range supported by the underlying device.
 1129                  */
 1130                 bp->b_ioflags &= ~BIO_ERROR;
 1131                 bdirty(bp);
 1132         } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
 1133             (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
 1134                 /*
 1135                  * Either a failed I/O or we were asked to free or not
 1136                  * cache the buffer.
 1137                  */
 1138                 bp->b_flags |= B_INVAL;
 1139                 if (LIST_FIRST(&bp->b_dep) != NULL)
 1140                         buf_deallocate(bp);
 1141                 if (bp->b_flags & B_DELWRI) {
 1142                         atomic_subtract_int(&numdirtybuffers, 1);
 1143                         numdirtywakeup(lodirtybuffers);
 1144                 }
 1145                 bp->b_flags &= ~(B_DELWRI | B_CACHE);
 1146                 if ((bp->b_flags & B_VMIO) == 0) {
 1147                         if (bp->b_bufsize)
 1148                                 allocbuf(bp, 0);
 1149                         if (bp->b_vp)
 1150                                 brelvp(bp);
 1151                 }
 1152         }
 1153 
 1154         /*
 1155          * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release() 
 1156          * is called with B_DELWRI set, the underlying pages may wind up
 1157          * getting freed causing a previous write (bdwrite()) to get 'lost'
 1158          * because pages associated with a B_DELWRI bp are marked clean.
 1159          * 
 1160          * We still allow the B_INVAL case to call vfs_vmio_release(), even
 1161          * if B_DELWRI is set.
 1162          *
 1163          * If B_DELWRI is not set we may have to set B_RELBUF if we are low
 1164          * on pages to return pages to the VM page queues.
 1165          */
 1166         if (bp->b_flags & B_DELWRI)
 1167                 bp->b_flags &= ~B_RELBUF;
 1168         else if (vm_page_count_severe()) {
 1169                 /*
 1170                  * XXX This lock may not be necessary since BKGRDINPROG
 1171                  * cannot be set while we hold the buf lock, it can only be
 1172                  * cleared if it is already pending.
 1173                  */
 1174                 if (bp->b_vp) {
 1175                         BO_LOCK(bp->b_bufobj);
 1176                         if (!(bp->b_vflags & BV_BKGRDINPROG))
 1177                                 bp->b_flags |= B_RELBUF;
 1178                         BO_UNLOCK(bp->b_bufobj);
 1179                 } else
 1180                         bp->b_flags |= B_RELBUF;
 1181         }
 1182 
 1183         /*
 1184          * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
 1185          * constituted, not even NFS buffers now.  Two flags effect this.  If
 1186          * B_INVAL, the struct buf is invalidated but the VM object is kept
 1187          * around ( i.e. so it is trivial to reconstitute the buffer later ).
 1188          *
 1189          * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
 1190          * invalidated.  BIO_ERROR cannot be set for a failed write unless the
 1191          * buffer is also B_INVAL because it hits the re-dirtying code above.
 1192          *
 1193          * Normally we can do this whether a buffer is B_DELWRI or not.  If
 1194          * the buffer is an NFS buffer, it is tracking piecemeal writes or
 1195          * the commit state and we cannot afford to lose the buffer. If the
 1196          * buffer has a background write in progress, we need to keep it
 1197          * around to prevent it from being reconstituted and starting a second
 1198          * background write.
 1199          */
 1200         if ((bp->b_flags & B_VMIO)
 1201             && !(bp->b_vp->v_mount != NULL &&
 1202                  (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
 1203                  !vn_isdisk(bp->b_vp, NULL) &&
 1204                  (bp->b_flags & B_DELWRI))
 1205             ) {
 1206 
 1207                 int i, j, resid;
 1208                 vm_page_t m;
 1209                 off_t foff;
 1210                 vm_pindex_t poff;
 1211                 vm_object_t obj;
 1212 
 1213                 obj = bp->b_bufobj->bo_object;
 1214 
 1215                 /*
 1216                  * Get the base offset and length of the buffer.  Note that 
 1217                  * in the VMIO case if the buffer block size is not
 1218                  * page-aligned then b_data pointer may not be page-aligned.
 1219                  * But our b_pages[] array *IS* page aligned.
 1220                  *
 1221                  * block sizes less then DEV_BSIZE (usually 512) are not 
 1222                  * supported due to the page granularity bits (m->valid,
 1223                  * m->dirty, etc...). 
 1224                  *
 1225                  * See man buf(9) for more information
 1226                  */
 1227                 resid = bp->b_bufsize;
 1228                 foff = bp->b_offset;
 1229                 VM_OBJECT_LOCK(obj);
 1230                 for (i = 0; i < bp->b_npages; i++) {
 1231                         int had_bogus = 0;
 1232 
 1233                         m = bp->b_pages[i];
 1234 
 1235                         /*
 1236                          * If we hit a bogus page, fixup *all* the bogus pages
 1237                          * now.
 1238                          */
 1239                         if (m == bogus_page) {
 1240                                 poff = OFF_TO_IDX(bp->b_offset);
 1241                                 had_bogus = 1;
 1242 
 1243                                 for (j = i; j < bp->b_npages; j++) {
 1244                                         vm_page_t mtmp;
 1245                                         mtmp = bp->b_pages[j];
 1246                                         if (mtmp == bogus_page) {
 1247                                                 mtmp = vm_page_lookup(obj, poff + j);
 1248                                                 if (!mtmp) {
 1249                                                         panic("brelse: page missing\n");
 1250                                                 }
 1251                                                 bp->b_pages[j] = mtmp;
 1252                                         }
 1253                                 }
 1254 
 1255                                 if ((bp->b_flags & B_INVAL) == 0) {
 1256                                         pmap_qenter(
 1257                                             trunc_page((vm_offset_t)bp->b_data),
 1258                                             bp->b_pages, bp->b_npages);
 1259                                 }
 1260                                 m = bp->b_pages[i];
 1261                         }
 1262                         if ((bp->b_flags & B_NOCACHE) ||
 1263                             (bp->b_ioflags & BIO_ERROR)) {
 1264                                 int poffset = foff & PAGE_MASK;
 1265                                 int presid = resid > (PAGE_SIZE - poffset) ?
 1266                                         (PAGE_SIZE - poffset) : resid;
 1267 
 1268                                 KASSERT(presid >= 0, ("brelse: extra page"));
 1269                                 vm_page_lock_queues();
 1270                                 vm_page_set_invalid(m, poffset, presid);
 1271                                 vm_page_unlock_queues();
 1272                                 if (had_bogus)
 1273                                         printf("avoided corruption bug in bogus_page/brelse code\n");
 1274                         }
 1275                         resid -= PAGE_SIZE - (foff & PAGE_MASK);
 1276                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 1277                 }
 1278                 VM_OBJECT_UNLOCK(obj);
 1279                 if (bp->b_flags & (B_INVAL | B_RELBUF))
 1280                         vfs_vmio_release(bp);
 1281 
 1282         } else if (bp->b_flags & B_VMIO) {
 1283 
 1284                 if (bp->b_flags & (B_INVAL | B_RELBUF)) {
 1285                         vfs_vmio_release(bp);
 1286                 }
 1287 
 1288         }
 1289                         
 1290         if (BUF_REFCNT(bp) > 1) {
 1291                 /* do not release to free list */
 1292                 BUF_UNLOCK(bp);
 1293                 return;
 1294         }
 1295 
 1296         /* enqueue */
 1297         mtx_lock(&bqlock);
 1298         /* Handle delayed bremfree() processing. */
 1299         if (bp->b_flags & B_REMFREE)
 1300                 bremfreel(bp);
 1301         if (bp->b_qindex != QUEUE_NONE)
 1302                 panic("brelse: free buffer onto another queue???");
 1303 
 1304         /* buffers with no memory */
 1305         if (bp->b_bufsize == 0) {
 1306                 bp->b_flags |= B_INVAL;
 1307                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1308                 if (bp->b_vflags & BV_BKGRDINPROG)
 1309                         panic("losing buffer 1");
 1310                 if (bp->b_kvasize) {
 1311                         bp->b_qindex = QUEUE_EMPTYKVA;
 1312                 } else {
 1313                         bp->b_qindex = QUEUE_EMPTY;
 1314                 }
 1315                 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
 1316         /* buffers with junk contents */
 1317         } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
 1318             (bp->b_ioflags & BIO_ERROR)) {
 1319                 bp->b_flags |= B_INVAL;
 1320                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1321                 if (bp->b_vflags & BV_BKGRDINPROG)
 1322                         panic("losing buffer 2");
 1323                 bp->b_qindex = QUEUE_CLEAN;
 1324                 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
 1325         /* remaining buffers */
 1326         } else {
 1327                 if (bp->b_flags & B_DELWRI)
 1328                         bp->b_qindex = QUEUE_DIRTY;
 1329                 else
 1330                         bp->b_qindex = QUEUE_CLEAN;
 1331                 if (bp->b_flags & B_AGE)
 1332                         TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
 1333                 else
 1334                         TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
 1335         }
 1336         mtx_unlock(&bqlock);
 1337 
 1338         /*
 1339          * If B_INVAL and B_DELWRI is set, clear B_DELWRI.  We have already
 1340          * placed the buffer on the correct queue.  We must also disassociate
 1341          * the device and vnode for a B_INVAL buffer so gbincore() doesn't
 1342          * find it.
 1343          */
 1344         if (bp->b_flags & B_INVAL) {
 1345                 if (bp->b_flags & B_DELWRI)
 1346                         bundirty(bp);
 1347                 if (bp->b_vp)
 1348                         brelvp(bp);
 1349         }
 1350 
 1351         /*
 1352          * Fixup numfreebuffers count.  The bp is on an appropriate queue
 1353          * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
 1354          * We've already handled the B_INVAL case ( B_DELWRI will be clear
 1355          * if B_INVAL is set ).
 1356          */
 1357 
 1358         if (!(bp->b_flags & B_DELWRI))
 1359                 bufcountwakeup();
 1360 
 1361         /*
 1362          * Something we can maybe free or reuse
 1363          */
 1364         if (bp->b_bufsize || bp->b_kvasize)
 1365                 bufspacewakeup();
 1366 
 1367         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
 1368         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 1369                 panic("brelse: not dirty");
 1370         /* unlock */
 1371         BUF_UNLOCK(bp);
 1372 }
 1373 
 1374 /*
 1375  * Release a buffer back to the appropriate queue but do not try to free
 1376  * it.  The buffer is expected to be used again soon.
 1377  *
 1378  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
 1379  * biodone() to requeue an async I/O on completion.  It is also used when
 1380  * known good buffers need to be requeued but we think we may need the data
 1381  * again soon.
 1382  *
 1383  * XXX we should be able to leave the B_RELBUF hint set on completion.
 1384  */
 1385 void
 1386 bqrelse(struct buf *bp)
 1387 {
 1388         CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 1389         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
 1390             ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1391 
 1392         if (BUF_REFCNT(bp) > 1) {
 1393                 /* do not release to free list */
 1394                 BUF_UNLOCK(bp);
 1395                 return;
 1396         }
 1397         mtx_lock(&bqlock);
 1398         /* Handle delayed bremfree() processing. */
 1399         if (bp->b_flags & B_REMFREE)
 1400                 bremfreel(bp);
 1401         if (bp->b_qindex != QUEUE_NONE)
 1402                 panic("bqrelse: free buffer onto another queue???");
 1403         /* buffers with stale but valid contents */
 1404         if (bp->b_flags & B_DELWRI) {
 1405                 bp->b_qindex = QUEUE_DIRTY;
 1406                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist);
 1407         } else {
 1408                 /*
 1409                  * XXX This lock may not be necessary since BKGRDINPROG
 1410                  * cannot be set while we hold the buf lock, it can only be
 1411                  * cleared if it is already pending.
 1412                  */
 1413                 BO_LOCK(bp->b_bufobj);
 1414                 if (!vm_page_count_severe() || bp->b_vflags & BV_BKGRDINPROG) {
 1415                         BO_UNLOCK(bp->b_bufobj);
 1416                         bp->b_qindex = QUEUE_CLEAN;
 1417                         TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp,
 1418                             b_freelist);
 1419                 } else {
 1420                         /*
 1421                          * We are too low on memory, we have to try to free
 1422                          * the buffer (most importantly: the wired pages
 1423                          * making up its backing store) *now*.
 1424                          */
 1425                         BO_UNLOCK(bp->b_bufobj);
 1426                         mtx_unlock(&bqlock);
 1427                         brelse(bp);
 1428                         return;
 1429                 }
 1430         }
 1431         mtx_unlock(&bqlock);
 1432 
 1433         if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
 1434                 bufcountwakeup();
 1435 
 1436         /*
 1437          * Something we can maybe free or reuse.
 1438          */
 1439         if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
 1440                 bufspacewakeup();
 1441 
 1442         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
 1443         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 1444                 panic("bqrelse: not dirty");
 1445         /* unlock */
 1446         BUF_UNLOCK(bp);
 1447 }
 1448 
 1449 /* Give pages used by the bp back to the VM system (where possible) */
 1450 static void
 1451 vfs_vmio_release(struct buf *bp)
 1452 {
 1453         int i;
 1454         vm_page_t m;
 1455 
 1456         VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
 1457         vm_page_lock_queues();
 1458         for (i = 0; i < bp->b_npages; i++) {
 1459                 m = bp->b_pages[i];
 1460                 bp->b_pages[i] = NULL;
 1461                 /*
 1462                  * In order to keep page LRU ordering consistent, put
 1463                  * everything on the inactive queue.
 1464                  */
 1465                 vm_page_unwire(m, 0);
 1466                 /*
 1467                  * We don't mess with busy pages, it is
 1468                  * the responsibility of the process that
 1469                  * busied the pages to deal with them.
 1470                  */
 1471                 if ((m->flags & PG_BUSY) || (m->busy != 0))
 1472                         continue;
 1473                         
 1474                 if (m->wire_count == 0) {
 1475                         /*
 1476                          * Might as well free the page if we can and it has
 1477                          * no valid data.  We also free the page if the
 1478                          * buffer was used for direct I/O
 1479                          */
 1480                         if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
 1481                             m->hold_count == 0) {
 1482                                 pmap_remove_all(m);
 1483                                 vm_page_free(m);
 1484                         } else if (bp->b_flags & B_DIRECT) {
 1485                                 vm_page_try_to_free(m);
 1486                         } else if (vm_page_count_severe()) {
 1487                                 vm_page_try_to_cache(m);
 1488                         }
 1489                 }
 1490         }
 1491         vm_page_unlock_queues();
 1492         VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 1493         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
 1494         
 1495         if (bp->b_bufsize) {
 1496                 bufspacewakeup();
 1497                 bp->b_bufsize = 0;
 1498         }
 1499         bp->b_npages = 0;
 1500         bp->b_flags &= ~B_VMIO;
 1501         if (bp->b_vp)
 1502                 brelvp(bp);
 1503 }
 1504 
 1505 /*
 1506  * Check to see if a block at a particular lbn is available for a clustered
 1507  * write.
 1508  */
 1509 static int
 1510 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
 1511 {
 1512         struct buf *bpa;
 1513         int match;
 1514 
 1515         match = 0;
 1516 
 1517         /* If the buf isn't in core skip it */
 1518         if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
 1519                 return (0);
 1520 
 1521         /* If the buf is busy we don't want to wait for it */
 1522         if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 1523                 return (0);
 1524 
 1525         /* Only cluster with valid clusterable delayed write buffers */
 1526         if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
 1527             (B_DELWRI | B_CLUSTEROK))
 1528                 goto done;
 1529 
 1530         if (bpa->b_bufsize != size)
 1531                 goto done;
 1532 
 1533         /*
 1534          * Check to see if it is in the expected place on disk and that the
 1535          * block has been mapped.
 1536          */
 1537         if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
 1538                 match = 1;
 1539 done:
 1540         BUF_UNLOCK(bpa);
 1541         return (match);
 1542 }
 1543 
 1544 /*
 1545  *      vfs_bio_awrite:
 1546  *
 1547  *      Implement clustered async writes for clearing out B_DELWRI buffers.
 1548  *      This is much better then the old way of writing only one buffer at
 1549  *      a time.  Note that we may not be presented with the buffers in the 
 1550  *      correct order, so we search for the cluster in both directions.
 1551  */
 1552 int
 1553 vfs_bio_awrite(struct buf *bp)
 1554 {
 1555         int i;
 1556         int j;
 1557         daddr_t lblkno = bp->b_lblkno;
 1558         struct vnode *vp = bp->b_vp;
 1559         int ncl;
 1560         int nwritten;
 1561         int size;
 1562         int maxcl;
 1563 
 1564         /*
 1565          * right now we support clustered writing only to regular files.  If
 1566          * we find a clusterable block we could be in the middle of a cluster
 1567          * rather then at the beginning.
 1568          */
 1569         if ((vp->v_type == VREG) && 
 1570             (vp->v_mount != 0) && /* Only on nodes that have the size info */
 1571             (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
 1572 
 1573                 size = vp->v_mount->mnt_stat.f_iosize;
 1574                 maxcl = MAXPHYS / size;
 1575 
 1576                 VI_LOCK(vp);
 1577                 for (i = 1; i < maxcl; i++)
 1578                         if (vfs_bio_clcheck(vp, size, lblkno + i,
 1579                             bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
 1580                                 break;
 1581 
 1582                 for (j = 1; i + j <= maxcl && j <= lblkno; j++) 
 1583                         if (vfs_bio_clcheck(vp, size, lblkno - j,
 1584                             bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
 1585                                 break;
 1586 
 1587                 VI_UNLOCK(vp);
 1588                 --j;
 1589                 ncl = i + j;
 1590                 /*
 1591                  * this is a possible cluster write
 1592                  */
 1593                 if (ncl != 1) {
 1594                         BUF_UNLOCK(bp);
 1595                         nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
 1596                         return nwritten;
 1597                 }
 1598         }
 1599         bremfree(bp);
 1600         bp->b_flags |= B_ASYNC;
 1601         /*
 1602          * default (old) behavior, writing out only one block
 1603          *
 1604          * XXX returns b_bufsize instead of b_bcount for nwritten?
 1605          */
 1606         nwritten = bp->b_bufsize;
 1607         (void) bwrite(bp);
 1608 
 1609         return nwritten;
 1610 }
 1611 
 1612 /*
 1613  *      getnewbuf:
 1614  *
 1615  *      Find and initialize a new buffer header, freeing up existing buffers 
 1616  *      in the bufqueues as necessary.  The new buffer is returned locked.
 1617  *
 1618  *      Important:  B_INVAL is not set.  If the caller wishes to throw the
 1619  *      buffer away, the caller must set B_INVAL prior to calling brelse().
 1620  *
 1621  *      We block if:
 1622  *              We have insufficient buffer headers
 1623  *              We have insufficient buffer space
 1624  *              buffer_map is too fragmented ( space reservation fails )
 1625  *              If we have to flush dirty buffers ( but we try to avoid this )
 1626  *
 1627  *      To avoid VFS layer recursion we do not flush dirty buffers ourselves.
 1628  *      Instead we ask the buf daemon to do it for us.  We attempt to
 1629  *      avoid piecemeal wakeups of the pageout daemon.
 1630  */
 1631 
 1632 static struct buf *
 1633 getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
 1634 {
 1635         struct buf *bp;
 1636         struct buf *nbp;
 1637         int defrag = 0;
 1638         int nqindex;
 1639         static int flushingbufs;
 1640 
 1641         /*
 1642          * We can't afford to block since we might be holding a vnode lock,
 1643          * which may prevent system daemons from running.  We deal with
 1644          * low-memory situations by proactively returning memory and running
 1645          * async I/O rather then sync I/O.
 1646          */
 1647 
 1648         atomic_add_int(&getnewbufcalls, 1);
 1649         atomic_subtract_int(&getnewbufrestarts, 1);
 1650 restart:
 1651         atomic_add_int(&getnewbufrestarts, 1);
 1652 
 1653         /*
 1654          * Setup for scan.  If we do not have enough free buffers,
 1655          * we setup a degenerate case that immediately fails.  Note
 1656          * that if we are specially marked process, we are allowed to
 1657          * dip into our reserves.
 1658          *
 1659          * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
 1660          *
 1661          * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
 1662          * However, there are a number of cases (defragging, reusing, ...)
 1663          * where we cannot backup.
 1664          */
 1665         mtx_lock(&bqlock);
 1666         nqindex = QUEUE_EMPTYKVA;
 1667         nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
 1668 
 1669         if (nbp == NULL) {
 1670                 /*
 1671                  * If no EMPTYKVA buffers and we are either
 1672                  * defragging or reusing, locate a CLEAN buffer
 1673                  * to free or reuse.  If bufspace useage is low
 1674                  * skip this step so we can allocate a new buffer.
 1675                  */
 1676                 if (defrag || bufspace >= lobufspace) {
 1677                         nqindex = QUEUE_CLEAN;
 1678                         nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
 1679                 }
 1680 
 1681                 /*
 1682                  * If we could not find or were not allowed to reuse a
 1683                  * CLEAN buffer, check to see if it is ok to use an EMPTY
 1684                  * buffer.  We can only use an EMPTY buffer if allocating
 1685                  * its KVA would not otherwise run us out of buffer space.
 1686                  */
 1687                 if (nbp == NULL && defrag == 0 &&
 1688                     bufspace + maxsize < hibufspace) {
 1689                         nqindex = QUEUE_EMPTY;
 1690                         nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
 1691                 }
 1692         }
 1693 
 1694         /*
 1695          * Run scan, possibly freeing data and/or kva mappings on the fly
 1696          * depending.
 1697          */
 1698 
 1699         while ((bp = nbp) != NULL) {
 1700                 int qindex = nqindex;
 1701 
 1702                 /*
 1703                  * Calculate next bp ( we can only use it if we do not block
 1704                  * or do other fancy things ).
 1705                  */
 1706                 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
 1707                         switch(qindex) {
 1708                         case QUEUE_EMPTY:
 1709                                 nqindex = QUEUE_EMPTYKVA;
 1710                                 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
 1711                                         break;
 1712                                 /* FALLTHROUGH */
 1713                         case QUEUE_EMPTYKVA:
 1714                                 nqindex = QUEUE_CLEAN;
 1715                                 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
 1716                                         break;
 1717                                 /* FALLTHROUGH */
 1718                         case QUEUE_CLEAN:
 1719                                 /*
 1720                                  * nbp is NULL. 
 1721                                  */
 1722                                 break;
 1723                         }
 1724                 }
 1725                 /*
 1726                  * If we are defragging then we need a buffer with 
 1727                  * b_kvasize != 0.  XXX this situation should no longer
 1728                  * occur, if defrag is non-zero the buffer's b_kvasize
 1729                  * should also be non-zero at this point.  XXX
 1730                  */
 1731                 if (defrag && bp->b_kvasize == 0) {
 1732                         printf("Warning: defrag empty buffer %p\n", bp);
 1733                         continue;
 1734                 }
 1735 
 1736                 /*
 1737                  * Start freeing the bp.  This is somewhat involved.  nbp
 1738                  * remains valid only for QUEUE_EMPTY[KVA] bp's.
 1739                  */
 1740                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 1741                         continue;
 1742                 if (bp->b_vp) {
 1743                         BO_LOCK(bp->b_bufobj);
 1744                         if (bp->b_vflags & BV_BKGRDINPROG) {
 1745                                 BO_UNLOCK(bp->b_bufobj);
 1746                                 BUF_UNLOCK(bp);
 1747                                 continue;
 1748                         }
 1749                         BO_UNLOCK(bp->b_bufobj);
 1750                 }
 1751                 CTR6(KTR_BUF,
 1752                     "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d "
 1753                     "queue %d (recycling)", bp, bp->b_vp, bp->b_flags,
 1754                     bp->b_kvasize, bp->b_bufsize, qindex);
 1755 
 1756                 /*
 1757                  * Sanity Checks
 1758                  */
 1759                 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
 1760 
 1761                 /*
 1762                  * Note: we no longer distinguish between VMIO and non-VMIO
 1763                  * buffers.
 1764                  */
 1765 
 1766                 KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
 1767 
 1768                 bremfreel(bp);
 1769                 mtx_unlock(&bqlock);
 1770 
 1771                 if (qindex == QUEUE_CLEAN) {
 1772                         if (bp->b_flags & B_VMIO) {
 1773                                 bp->b_flags &= ~B_ASYNC;
 1774                                 vfs_vmio_release(bp);
 1775                         }
 1776                         if (bp->b_vp)
 1777                                 brelvp(bp);
 1778                 }
 1779 
 1780                 /*
 1781                  * NOTE:  nbp is now entirely invalid.  We can only restart
 1782                  * the scan from this point on.
 1783                  *
 1784                  * Get the rest of the buffer freed up.  b_kva* is still
 1785                  * valid after this operation.
 1786                  */
 1787 
 1788                 if (bp->b_rcred != NOCRED) {
 1789                         crfree(bp->b_rcred);
 1790                         bp->b_rcred = NOCRED;
 1791                 }
 1792                 if (bp->b_wcred != NOCRED) {
 1793                         crfree(bp->b_wcred);
 1794                         bp->b_wcred = NOCRED;
 1795                 }
 1796                 if (LIST_FIRST(&bp->b_dep) != NULL)
 1797                         buf_deallocate(bp);
 1798                 if (bp->b_vflags & BV_BKGRDINPROG)
 1799                         panic("losing buffer 3");
 1800                 KASSERT(bp->b_vp == NULL,
 1801                     ("bp: %p still has vnode %p.  qindex: %d",
 1802                     bp, bp->b_vp, qindex));
 1803                 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
 1804                    ("bp: %p still on a buffer list. xflags %X",
 1805                     bp, bp->b_xflags));
 1806 
 1807                 if (bp->b_bufsize)
 1808                         allocbuf(bp, 0);
 1809 
 1810                 bp->b_flags = 0;
 1811                 bp->b_ioflags = 0;
 1812                 bp->b_xflags = 0;
 1813                 bp->b_vflags = 0;
 1814                 bp->b_vp = NULL;
 1815                 bp->b_blkno = bp->b_lblkno = 0;
 1816                 bp->b_offset = NOOFFSET;
 1817                 bp->b_iodone = 0;
 1818                 bp->b_error = 0;
 1819                 bp->b_resid = 0;
 1820                 bp->b_bcount = 0;
 1821                 bp->b_npages = 0;
 1822                 bp->b_dirtyoff = bp->b_dirtyend = 0;
 1823                 bp->b_bufobj = NULL;
 1824 
 1825                 LIST_INIT(&bp->b_dep);
 1826 
 1827                 /*
 1828                  * If we are defragging then free the buffer.
 1829                  */
 1830                 if (defrag) {
 1831                         bp->b_flags |= B_INVAL;
 1832                         bfreekva(bp);
 1833                         brelse(bp);
 1834                         defrag = 0;
 1835                         goto restart;
 1836                 }
 1837 
 1838                 /*
 1839                  * If we are overcomitted then recover the buffer and its
 1840                  * KVM space.  This occurs in rare situations when multiple
 1841                  * processes are blocked in getnewbuf() or allocbuf().
 1842                  */
 1843                 if (bufspace >= hibufspace)
 1844                         flushingbufs = 1;
 1845                 if (flushingbufs && bp->b_kvasize != 0) {
 1846                         bp->b_flags |= B_INVAL;
 1847                         bfreekva(bp);
 1848                         brelse(bp);
 1849                         goto restart;
 1850                 }
 1851                 if (bufspace < lobufspace)
 1852                         flushingbufs = 0;
 1853                 break;
 1854         }
 1855 
 1856         /*
 1857          * If we exhausted our list, sleep as appropriate.  We may have to
 1858          * wakeup various daemons and write out some dirty buffers.
 1859          *
 1860          * Generally we are sleeping due to insufficient buffer space.
 1861          */
 1862 
 1863         if (bp == NULL) {
 1864                 int flags;
 1865                 char *waitmsg;
 1866 
 1867                 mtx_unlock(&bqlock);
 1868                 if (defrag) {
 1869                         flags = VFS_BIO_NEED_BUFSPACE;
 1870                         waitmsg = "nbufkv";
 1871                 } else if (bufspace >= hibufspace) {
 1872                         waitmsg = "nbufbs";
 1873                         flags = VFS_BIO_NEED_BUFSPACE;
 1874                 } else {
 1875                         waitmsg = "newbuf";
 1876                         flags = VFS_BIO_NEED_ANY;
 1877                 }
 1878 
 1879                 bd_speedup();   /* heeeelp */
 1880 
 1881                 mtx_lock(&nblock);
 1882                 needsbuffer |= flags;
 1883                 while (needsbuffer & flags) {
 1884                         if (msleep(&needsbuffer, &nblock,
 1885                             (PRIBIO + 4) | slpflag, waitmsg, slptimeo)) {
 1886                                 mtx_unlock(&nblock);
 1887                                 return (NULL);
 1888                         }
 1889                 }
 1890                 mtx_unlock(&nblock);
 1891         } else {
 1892                 /*
 1893                  * We finally have a valid bp.  We aren't quite out of the
 1894                  * woods, we still have to reserve kva space.  In order
 1895                  * to keep fragmentation sane we only allocate kva in
 1896                  * BKVASIZE chunks.
 1897                  */
 1898                 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
 1899 
 1900                 if (maxsize != bp->b_kvasize) {
 1901                         vm_offset_t addr = 0;
 1902 
 1903                         bfreekva(bp);
 1904 
 1905                         vm_map_lock(buffer_map);
 1906                         if (vm_map_findspace(buffer_map,
 1907                                 vm_map_min(buffer_map), maxsize, &addr)) {
 1908                                 /*
 1909                                  * Uh oh.  Buffer map is to fragmented.  We
 1910                                  * must defragment the map.
 1911                                  */
 1912                                 atomic_add_int(&bufdefragcnt, 1);
 1913                                 vm_map_unlock(buffer_map);
 1914                                 defrag = 1;
 1915                                 bp->b_flags |= B_INVAL;
 1916                                 brelse(bp);
 1917                                 goto restart;
 1918                         }
 1919                         if (addr) {
 1920                                 vm_map_insert(buffer_map, NULL, 0,
 1921                                         addr, addr + maxsize,
 1922                                         VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
 1923 
 1924                                 bp->b_kvabase = (caddr_t) addr;
 1925                                 bp->b_kvasize = maxsize;
 1926                                 atomic_add_int(&bufspace, bp->b_kvasize);
 1927                                 atomic_add_int(&bufreusecnt, 1);
 1928                         }
 1929                         vm_map_unlock(buffer_map);
 1930                 }
 1931                 bp->b_saveaddr = bp->b_kvabase;
 1932                 bp->b_data = bp->b_saveaddr;
 1933         }
 1934         return(bp);
 1935 }
 1936 
 1937 /*
 1938  *      buf_daemon:
 1939  *
 1940  *      buffer flushing daemon.  Buffers are normally flushed by the
 1941  *      update daemon but if it cannot keep up this process starts to
 1942  *      take the load in an attempt to prevent getnewbuf() from blocking.
 1943  */
 1944 
 1945 static struct kproc_desc buf_kp = {
 1946         "bufdaemon",
 1947         buf_daemon,
 1948         &bufdaemonproc
 1949 };
 1950 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp)
 1951 
 1952 static void
 1953 buf_daemon()
 1954 {
 1955         mtx_lock(&Giant);
 1956 
 1957         /*
 1958          * This process needs to be suspended prior to shutdown sync.
 1959          */
 1960         EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
 1961             SHUTDOWN_PRI_LAST);
 1962 
 1963         /*
 1964          * This process is allowed to take the buffer cache to the limit
 1965          */
 1966         curthread->td_pflags |= TDP_NORUNNINGBUF;
 1967         mtx_lock(&bdlock);
 1968         for (;;) {
 1969                 bd_request = 0;
 1970                 mtx_unlock(&bdlock);
 1971 
 1972                 kthread_suspend_check(bufdaemonproc);
 1973 
 1974                 /*
 1975                  * Do the flush.  Limit the amount of in-transit I/O we
 1976                  * allow to build up, otherwise we would completely saturate
 1977                  * the I/O system.  Wakeup any waiting processes before we
 1978                  * normally would so they can run in parallel with our drain.
 1979                  */
 1980                 while (numdirtybuffers > lodirtybuffers) {
 1981                         if (flushbufqueues(0) == 0) {
 1982                                 /*
 1983                                  * Could not find any buffers without rollback
 1984                                  * dependencies, so just write the first one
 1985                                  * in the hopes of eventually making progress.
 1986                                  */
 1987                                 flushbufqueues(1);
 1988                                 break;
 1989                         }
 1990                         uio_yield();
 1991                 }
 1992 
 1993                 /*
 1994                  * Only clear bd_request if we have reached our low water
 1995                  * mark.  The buf_daemon normally waits 1 second and
 1996                  * then incrementally flushes any dirty buffers that have
 1997                  * built up, within reason.
 1998                  *
 1999                  * If we were unable to hit our low water mark and couldn't
 2000                  * find any flushable buffers, we sleep half a second.
 2001                  * Otherwise we loop immediately.
 2002                  */
 2003                 mtx_lock(&bdlock);
 2004                 if (numdirtybuffers <= lodirtybuffers) {
 2005                         /*
 2006                          * We reached our low water mark, reset the
 2007                          * request and sleep until we are needed again.
 2008                          * The sleep is just so the suspend code works.
 2009                          */
 2010                         bd_request = 0;
 2011                         msleep(&bd_request, &bdlock, PVM, "psleep", hz);
 2012                 } else {
 2013                         /*
 2014                          * We couldn't find any flushable dirty buffers but
 2015                          * still have too many dirty buffers, we
 2016                          * have to sleep and try again.  (rare)
 2017                          */
 2018                         msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
 2019                 }
 2020         }
 2021 }
 2022 
 2023 /*
 2024  *      flushbufqueues:
 2025  *
 2026  *      Try to flush a buffer in the dirty queue.  We must be careful to
 2027  *      free up B_INVAL buffers instead of write them, which NFS is 
 2028  *      particularly sensitive to.
 2029  */
 2030 static int flushwithdeps = 0;
 2031 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
 2032     0, "Number of buffers flushed with dependecies that require rollbacks");
 2033 
 2034 static int
 2035 flushbufqueues(int flushdeps)
 2036 {
 2037         struct thread *td = curthread;
 2038         struct buf sentinel;
 2039         struct vnode *vp;
 2040         struct mount *mp;
 2041         struct buf *bp;
 2042         int hasdeps;
 2043         int flushed;
 2044         int target;
 2045 
 2046         target = numdirtybuffers - lodirtybuffers;
 2047         if (flushdeps && target > 2)
 2048                 target /= 2;
 2049         flushed = 0;
 2050         bp = NULL;
 2051         mtx_lock(&bqlock);
 2052         TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], &sentinel, b_freelist);
 2053         while (flushed != target) {
 2054                 bp = TAILQ_FIRST(&bufqueues[QUEUE_DIRTY]);
 2055                 if (bp == &sentinel)
 2056                         break;
 2057                 TAILQ_REMOVE(&bufqueues[QUEUE_DIRTY], bp, b_freelist);
 2058                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist);
 2059 
 2060                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 2061                         continue;
 2062                 BO_LOCK(bp->b_bufobj);
 2063                 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
 2064                     (bp->b_flags & B_DELWRI) == 0) {
 2065                         BO_UNLOCK(bp->b_bufobj);
 2066                         BUF_UNLOCK(bp);
 2067                         continue;
 2068                 }
 2069                 BO_UNLOCK(bp->b_bufobj);
 2070                 if (bp->b_flags & B_INVAL) {
 2071                         bremfreel(bp);
 2072                         mtx_unlock(&bqlock);
 2073                         brelse(bp);
 2074                         flushed++;
 2075                         numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
 2076                         mtx_lock(&bqlock);
 2077                         continue;
 2078                 }
 2079 
 2080                 if (LIST_FIRST(&bp->b_dep) != NULL && buf_countdeps(bp, 0)) {
 2081                         if (flushdeps == 0) {
 2082                                 BUF_UNLOCK(bp);
 2083                                 continue;
 2084                         }
 2085                         hasdeps = 1;
 2086                 } else
 2087                         hasdeps = 0;
 2088                 /*
 2089                  * We must hold the lock on a vnode before writing
 2090                  * one of its buffers. Otherwise we may confuse, or
 2091                  * in the case of a snapshot vnode, deadlock the
 2092                  * system.
 2093                  *
 2094                  * The lock order here is the reverse of the normal
 2095                  * of vnode followed by buf lock.  This is ok because
 2096                  * the NOWAIT will prevent deadlock.
 2097                  */
 2098                 vp = bp->b_vp;
 2099                 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
 2100                         BUF_UNLOCK(bp);
 2101                         continue;
 2102                 }
 2103                 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) == 0) {
 2104                         mtx_unlock(&bqlock);
 2105                         CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
 2106                             bp, bp->b_vp, bp->b_flags);
 2107                         vfs_bio_awrite(bp);
 2108                         vn_finished_write(mp);
 2109                         VOP_UNLOCK(vp, 0, td);
 2110                         flushwithdeps += hasdeps;
 2111                         flushed++;
 2112                         waitrunningbufspace();
 2113                         numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
 2114                         mtx_lock(&bqlock);
 2115                         continue;
 2116                 }
 2117                 vn_finished_write(mp);
 2118                 BUF_UNLOCK(bp);
 2119         }
 2120         TAILQ_REMOVE(&bufqueues[QUEUE_DIRTY], &sentinel, b_freelist);
 2121         mtx_unlock(&bqlock);
 2122         return (flushed);
 2123 }
 2124 
 2125 /*
 2126  * Check to see if a block is currently memory resident.
 2127  */
 2128 struct buf *
 2129 incore(struct bufobj *bo, daddr_t blkno)
 2130 {
 2131         struct buf *bp;
 2132 
 2133         BO_LOCK(bo);
 2134         bp = gbincore(bo, blkno);
 2135         BO_UNLOCK(bo);
 2136         return (bp);
 2137 }
 2138 
 2139 /*
 2140  * Returns true if no I/O is needed to access the
 2141  * associated VM object.  This is like incore except
 2142  * it also hunts around in the VM system for the data.
 2143  */
 2144 
 2145 static int
 2146 inmem(struct vnode * vp, daddr_t blkno)
 2147 {
 2148         vm_object_t obj;
 2149         vm_offset_t toff, tinc, size;
 2150         vm_page_t m;
 2151         vm_ooffset_t off;
 2152 
 2153         ASSERT_VOP_LOCKED(vp, "inmem");
 2154 
 2155         if (incore(&vp->v_bufobj, blkno))
 2156                 return 1;
 2157         if (vp->v_mount == NULL)
 2158                 return 0;
 2159         obj = vp->v_object;
 2160         if (obj == NULL)
 2161                 return (0);
 2162 
 2163         size = PAGE_SIZE;
 2164         if (size > vp->v_mount->mnt_stat.f_iosize)
 2165                 size = vp->v_mount->mnt_stat.f_iosize;
 2166         off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
 2167 
 2168         VM_OBJECT_LOCK(obj);
 2169         for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
 2170                 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
 2171                 if (!m)
 2172                         goto notinmem;
 2173                 tinc = size;
 2174                 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
 2175                         tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
 2176                 if (vm_page_is_valid(m,
 2177                     (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
 2178                         goto notinmem;
 2179         }
 2180         VM_OBJECT_UNLOCK(obj);
 2181         return 1;
 2182 
 2183 notinmem:
 2184         VM_OBJECT_UNLOCK(obj);
 2185         return (0);
 2186 }
 2187 
 2188 /*
 2189  *      vfs_setdirty:
 2190  *
 2191  *      Sets the dirty range for a buffer based on the status of the dirty
 2192  *      bits in the pages comprising the buffer.
 2193  *
 2194  *      The range is limited to the size of the buffer.
 2195  *
 2196  *      This routine is primarily used by NFS, but is generalized for the
 2197  *      B_VMIO case.
 2198  */
 2199 static void
 2200 vfs_setdirty(struct buf *bp) 
 2201 {
 2202         int i;
 2203         vm_object_t object;
 2204 
 2205         /*
 2206          * Degenerate case - empty buffer
 2207          */
 2208 
 2209         if (bp->b_bufsize == 0)
 2210                 return;
 2211 
 2212         /*
 2213          * We qualify the scan for modified pages on whether the
 2214          * object has been flushed yet.  The OBJ_WRITEABLE flag
 2215          * is not cleared simply by protecting pages off.
 2216          */
 2217 
 2218         if ((bp->b_flags & B_VMIO) == 0)
 2219                 return;
 2220 
 2221         object = bp->b_pages[0]->object;
 2222         VM_OBJECT_LOCK(object);
 2223         if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
 2224                 printf("Warning: object %p writeable but not mightbedirty\n", object);
 2225         if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
 2226                 printf("Warning: object %p mightbedirty but not writeable\n", object);
 2227 
 2228         if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
 2229                 vm_offset_t boffset;
 2230                 vm_offset_t eoffset;
 2231 
 2232                 vm_page_lock_queues();
 2233                 /*
 2234                  * test the pages to see if they have been modified directly
 2235                  * by users through the VM system.
 2236                  */
 2237                 for (i = 0; i < bp->b_npages; i++)
 2238                         vm_page_test_dirty(bp->b_pages[i]);
 2239 
 2240                 /*
 2241                  * Calculate the encompassing dirty range, boffset and eoffset,
 2242                  * (eoffset - boffset) bytes.
 2243                  */
 2244 
 2245                 for (i = 0; i < bp->b_npages; i++) {
 2246                         if (bp->b_pages[i]->dirty)
 2247                                 break;
 2248                 }
 2249                 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2250 
 2251                 for (i = bp->b_npages - 1; i >= 0; --i) {
 2252                         if (bp->b_pages[i]->dirty) {
 2253                                 break;
 2254                         }
 2255                 }
 2256                 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2257 
 2258                 vm_page_unlock_queues();
 2259                 /*
 2260                  * Fit it to the buffer.
 2261                  */
 2262 
 2263                 if (eoffset > bp->b_bcount)
 2264                         eoffset = bp->b_bcount;
 2265 
 2266                 /*
 2267                  * If we have a good dirty range, merge with the existing
 2268                  * dirty range.
 2269                  */
 2270 
 2271                 if (boffset < eoffset) {
 2272                         if (bp->b_dirtyoff > boffset)
 2273                                 bp->b_dirtyoff = boffset;
 2274                         if (bp->b_dirtyend < eoffset)
 2275                                 bp->b_dirtyend = eoffset;
 2276                 }
 2277         }
 2278         VM_OBJECT_UNLOCK(object);
 2279 }
 2280 
 2281 /*
 2282  *      getblk:
 2283  *
 2284  *      Get a block given a specified block and offset into a file/device.
 2285  *      The buffers B_DONE bit will be cleared on return, making it almost
 2286  *      ready for an I/O initiation.  B_INVAL may or may not be set on 
 2287  *      return.  The caller should clear B_INVAL prior to initiating a
 2288  *      READ.
 2289  *
 2290  *      For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
 2291  *      an existing buffer.
 2292  *
 2293  *      For a VMIO buffer, B_CACHE is modified according to the backing VM.
 2294  *      If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
 2295  *      and then cleared based on the backing VM.  If the previous buffer is
 2296  *      non-0-sized but invalid, B_CACHE will be cleared.
 2297  *
 2298  *      If getblk() must create a new buffer, the new buffer is returned with
 2299  *      both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
 2300  *      case it is returned with B_INVAL clear and B_CACHE set based on the
 2301  *      backing VM.
 2302  *
 2303  *      getblk() also forces a bwrite() for any B_DELWRI buffer whos
 2304  *      B_CACHE bit is clear.
 2305  *      
 2306  *      What this means, basically, is that the caller should use B_CACHE to
 2307  *      determine whether the buffer is fully valid or not and should clear
 2308  *      B_INVAL prior to issuing a read.  If the caller intends to validate
 2309  *      the buffer by loading its data area with something, the caller needs
 2310  *      to clear B_INVAL.  If the caller does this without issuing an I/O, 
 2311  *      the caller should set B_CACHE ( as an optimization ), else the caller
 2312  *      should issue the I/O and biodone() will set B_CACHE if the I/O was
 2313  *      a write attempt or if it was a successfull read.  If the caller 
 2314  *      intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
 2315  *      prior to issuing the READ.  biodone() will *not* clear B_INVAL.
 2316  */
 2317 struct buf *
 2318 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
 2319     int flags)
 2320 {
 2321         struct buf *bp;
 2322         struct bufobj *bo;
 2323         int error;
 2324 
 2325         CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
 2326         ASSERT_VOP_LOCKED(vp, "getblk");
 2327         if (size > MAXBSIZE)
 2328                 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
 2329 
 2330         bo = &vp->v_bufobj;
 2331 loop:
 2332         /*
 2333          * Block if we are low on buffers.   Certain processes are allowed
 2334          * to completely exhaust the buffer cache.
 2335          *
 2336          * If this check ever becomes a bottleneck it may be better to
 2337          * move it into the else, when gbincore() fails.  At the moment
 2338          * it isn't a problem.
 2339          *
 2340          * XXX remove if 0 sections (clean this up after its proven)
 2341          */
 2342         if (numfreebuffers == 0) {
 2343                 if (curthread == PCPU_GET(idlethread))
 2344                         return NULL;
 2345                 mtx_lock(&nblock);
 2346                 needsbuffer |= VFS_BIO_NEED_ANY;
 2347                 mtx_unlock(&nblock);
 2348         }
 2349 
 2350         VI_LOCK(vp);
 2351         bp = gbincore(bo, blkno);
 2352         if (bp != NULL) {
 2353                 int lockflags;
 2354                 /*
 2355                  * Buffer is in-core.  If the buffer is not busy, it must
 2356                  * be on a queue.
 2357                  */
 2358                 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
 2359 
 2360                 if (flags & GB_LOCK_NOWAIT)
 2361                         lockflags |= LK_NOWAIT;
 2362 
 2363                 error = BUF_TIMELOCK(bp, lockflags,
 2364                     VI_MTX(vp), "getblk", slpflag, slptimeo);
 2365 
 2366                 /*
 2367                  * If we slept and got the lock we have to restart in case
 2368                  * the buffer changed identities.
 2369                  */
 2370                 if (error == ENOLCK)
 2371                         goto loop;
 2372                 /* We timed out or were interrupted. */
 2373                 else if (error)
 2374                         return (NULL);
 2375 
 2376                 /*
 2377                  * The buffer is locked.  B_CACHE is cleared if the buffer is 
 2378                  * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
 2379                  * and for a VMIO buffer B_CACHE is adjusted according to the
 2380                  * backing VM cache.
 2381                  */
 2382                 if (bp->b_flags & B_INVAL)
 2383                         bp->b_flags &= ~B_CACHE;
 2384                 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
 2385                         bp->b_flags |= B_CACHE;
 2386                 bremfree(bp);
 2387 
 2388                 /*
 2389                  * check for size inconsistancies for non-VMIO case.
 2390                  */
 2391 
 2392                 if (bp->b_bcount != size) {
 2393                         if ((bp->b_flags & B_VMIO) == 0 ||
 2394                             (size > bp->b_kvasize)) {
 2395                                 if (bp->b_flags & B_DELWRI) {
 2396                                         bp->b_flags |= B_NOCACHE;
 2397                                         bwrite(bp);
 2398                                 } else {
 2399                                         if (LIST_FIRST(&bp->b_dep) == NULL) {
 2400                                                 bp->b_flags |= B_RELBUF;
 2401                                                 brelse(bp);
 2402                                         } else {
 2403                                                 bp->b_flags |= B_NOCACHE;
 2404                                                 bwrite(bp);
 2405                                         }
 2406                                 }
 2407                                 goto loop;
 2408                         }
 2409                 }
 2410 
 2411                 /*
 2412                  * If the size is inconsistant in the VMIO case, we can resize
 2413                  * the buffer.  This might lead to B_CACHE getting set or
 2414                  * cleared.  If the size has not changed, B_CACHE remains
 2415                  * unchanged from its previous state.
 2416                  */
 2417 
 2418                 if (bp->b_bcount != size)
 2419                         allocbuf(bp, size);
 2420 
 2421                 KASSERT(bp->b_offset != NOOFFSET, 
 2422                     ("getblk: no buffer offset"));
 2423 
 2424                 /*
 2425                  * A buffer with B_DELWRI set and B_CACHE clear must
 2426                  * be committed before we can return the buffer in
 2427                  * order to prevent the caller from issuing a read
 2428                  * ( due to B_CACHE not being set ) and overwriting
 2429                  * it.
 2430                  *
 2431                  * Most callers, including NFS and FFS, need this to
 2432                  * operate properly either because they assume they
 2433                  * can issue a read if B_CACHE is not set, or because
 2434                  * ( for example ) an uncached B_DELWRI might loop due 
 2435                  * to softupdates re-dirtying the buffer.  In the latter
 2436                  * case, B_CACHE is set after the first write completes,
 2437                  * preventing further loops.
 2438                  * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
 2439                  * above while extending the buffer, we cannot allow the
 2440                  * buffer to remain with B_CACHE set after the write
 2441                  * completes or it will represent a corrupt state.  To
 2442                  * deal with this we set B_NOCACHE to scrap the buffer
 2443                  * after the write.
 2444                  *
 2445                  * We might be able to do something fancy, like setting
 2446                  * B_CACHE in bwrite() except if B_DELWRI is already set,
 2447                  * so the below call doesn't set B_CACHE, but that gets real
 2448                  * confusing.  This is much easier.
 2449                  */
 2450 
 2451                 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
 2452                         bp->b_flags |= B_NOCACHE;
 2453                         bwrite(bp);
 2454                         goto loop;
 2455                 }
 2456                 bp->b_flags &= ~B_DONE;
 2457         } else {
 2458                 int bsize, maxsize, vmio;
 2459                 off_t offset;
 2460 
 2461                 /*
 2462                  * Buffer is not in-core, create new buffer.  The buffer
 2463                  * returned by getnewbuf() is locked.  Note that the returned
 2464                  * buffer is also considered valid (not marked B_INVAL).
 2465                  */
 2466                 VI_UNLOCK(vp);
 2467                 /*
 2468                  * If the user does not want us to create the buffer, bail out
 2469                  * here.
 2470                  */
 2471                 if (flags & GB_NOCREAT)
 2472                         return NULL;
 2473                 bsize = bo->bo_bsize;
 2474                 offset = blkno * bsize;
 2475                 vmio = vp->v_object != NULL;
 2476                 maxsize = vmio ? size + (offset & PAGE_MASK) : size;
 2477                 maxsize = imax(maxsize, bsize);
 2478 
 2479                 bp = getnewbuf(slpflag, slptimeo, size, maxsize);
 2480                 if (bp == NULL) {
 2481                         if (slpflag || slptimeo)
 2482                                 return NULL;
 2483                         goto loop;
 2484                 }
 2485 
 2486                 /*
 2487                  * This code is used to make sure that a buffer is not
 2488                  * created while the getnewbuf routine is blocked.
 2489                  * This can be a problem whether the vnode is locked or not.
 2490                  * If the buffer is created out from under us, we have to
 2491                  * throw away the one we just created.
 2492                  *
 2493                  * Note: this must occur before we associate the buffer
 2494                  * with the vp especially considering limitations in
 2495                  * the splay tree implementation when dealing with duplicate
 2496                  * lblkno's.
 2497                  */
 2498                 BO_LOCK(bo);
 2499                 if (gbincore(bo, blkno)) {
 2500                         BO_UNLOCK(bo);
 2501                         bp->b_flags |= B_INVAL;
 2502                         brelse(bp);
 2503                         goto loop;
 2504                 }
 2505 
 2506                 /*
 2507                  * Insert the buffer into the hash, so that it can
 2508                  * be found by incore.
 2509                  */
 2510                 bp->b_blkno = bp->b_lblkno = blkno;
 2511                 bp->b_offset = offset;
 2512 
 2513                 bgetvp(vp, bp);
 2514                 BO_UNLOCK(bo);
 2515 
 2516                 /*
 2517                  * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
 2518                  * buffer size starts out as 0, B_CACHE will be set by
 2519                  * allocbuf() for the VMIO case prior to it testing the
 2520                  * backing store for validity.
 2521                  */
 2522 
 2523                 if (vmio) {
 2524                         bp->b_flags |= B_VMIO;
 2525 #if defined(VFS_BIO_DEBUG)
 2526                         if (vn_canvmio(vp) != TRUE)
 2527                                 printf("getblk: VMIO on vnode type %d\n",
 2528                                         vp->v_type);
 2529 #endif
 2530                         KASSERT(vp->v_object == bp->b_bufobj->bo_object,
 2531                             ("ARGH! different b_bufobj->bo_object %p %p %p\n",
 2532                             bp, vp->v_object, bp->b_bufobj->bo_object));
 2533                 } else {
 2534                         bp->b_flags &= ~B_VMIO;
 2535                         KASSERT(bp->b_bufobj->bo_object == NULL,
 2536                             ("ARGH! has b_bufobj->bo_object %p %p\n",
 2537                             bp, bp->b_bufobj->bo_object));
 2538                 }
 2539 
 2540                 allocbuf(bp, size);
 2541                 bp->b_flags &= ~B_DONE;
 2542         }
 2543         CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
 2544         KASSERT(BUF_REFCNT(bp) == 1, ("getblk: bp %p not locked",bp));
 2545         KASSERT(bp->b_bufobj == bo,
 2546             ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
 2547         return (bp);
 2548 }
 2549 
 2550 /*
 2551  * Get an empty, disassociated buffer of given size.  The buffer is initially
 2552  * set to B_INVAL.
 2553  */
 2554 struct buf *
 2555 geteblk(int size)
 2556 {
 2557         struct buf *bp;
 2558         int maxsize;
 2559 
 2560         maxsize = (size + BKVAMASK) & ~BKVAMASK;
 2561         while ((bp = getnewbuf(0, 0, size, maxsize)) == 0)
 2562                 continue;
 2563         allocbuf(bp, size);
 2564         bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
 2565         KASSERT(BUF_REFCNT(bp) == 1, ("geteblk: bp %p not locked",bp));
 2566         return (bp);
 2567 }
 2568 
 2569 
 2570 /*
 2571  * This code constitutes the buffer memory from either anonymous system
 2572  * memory (in the case of non-VMIO operations) or from an associated
 2573  * VM object (in the case of VMIO operations).  This code is able to
 2574  * resize a buffer up or down.
 2575  *
 2576  * Note that this code is tricky, and has many complications to resolve
 2577  * deadlock or inconsistant data situations.  Tread lightly!!! 
 2578  * There are B_CACHE and B_DELWRI interactions that must be dealt with by 
 2579  * the caller.  Calling this code willy nilly can result in the loss of data.
 2580  *
 2581  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
 2582  * B_CACHE for the non-VMIO case.
 2583  */
 2584 
 2585 int
 2586 allocbuf(struct buf *bp, int size)
 2587 {
 2588         int newbsize, mbsize;
 2589         int i;
 2590 
 2591         if (BUF_REFCNT(bp) == 0)
 2592                 panic("allocbuf: buffer not busy");
 2593 
 2594         if (bp->b_kvasize < size)
 2595                 panic("allocbuf: buffer too small");
 2596 
 2597         if ((bp->b_flags & B_VMIO) == 0) {
 2598                 caddr_t origbuf;
 2599                 int origbufsize;
 2600                 /*
 2601                  * Just get anonymous memory from the kernel.  Don't
 2602                  * mess with B_CACHE.
 2603                  */
 2604                 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 2605                 if (bp->b_flags & B_MALLOC)
 2606                         newbsize = mbsize;
 2607                 else
 2608                         newbsize = round_page(size);
 2609 
 2610                 if (newbsize < bp->b_bufsize) {
 2611                         /*
 2612                          * malloced buffers are not shrunk
 2613                          */
 2614                         if (bp->b_flags & B_MALLOC) {
 2615                                 if (newbsize) {
 2616                                         bp->b_bcount = size;
 2617                                 } else {
 2618                                         free(bp->b_data, M_BIOBUF);
 2619                                         if (bp->b_bufsize) {
 2620                                                 atomic_subtract_int(
 2621                                                     &bufmallocspace,
 2622                                                     bp->b_bufsize);
 2623                                                 bufspacewakeup();
 2624                                                 bp->b_bufsize = 0;
 2625                                         }
 2626                                         bp->b_saveaddr = bp->b_kvabase;
 2627                                         bp->b_data = bp->b_saveaddr;
 2628                                         bp->b_bcount = 0;
 2629                                         bp->b_flags &= ~B_MALLOC;
 2630                                 }
 2631                                 return 1;
 2632                         }               
 2633                         vm_hold_free_pages(
 2634                             bp,
 2635                             (vm_offset_t) bp->b_data + newbsize,
 2636                             (vm_offset_t) bp->b_data + bp->b_bufsize);
 2637                 } else if (newbsize > bp->b_bufsize) {
 2638                         /*
 2639                          * We only use malloced memory on the first allocation.
 2640                          * and revert to page-allocated memory when the buffer
 2641                          * grows.
 2642                          */
 2643                         /*
 2644                          * There is a potential smp race here that could lead
 2645                          * to bufmallocspace slightly passing the max.  It
 2646                          * is probably extremely rare and not worth worrying
 2647                          * over.
 2648                          */
 2649                         if ( (bufmallocspace < maxbufmallocspace) &&
 2650                                 (bp->b_bufsize == 0) &&
 2651                                 (mbsize <= PAGE_SIZE/2)) {
 2652 
 2653                                 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
 2654                                 bp->b_bufsize = mbsize;
 2655                                 bp->b_bcount = size;
 2656                                 bp->b_flags |= B_MALLOC;
 2657                                 atomic_add_int(&bufmallocspace, mbsize);
 2658                                 return 1;
 2659                         }
 2660                         origbuf = NULL;
 2661                         origbufsize = 0;
 2662                         /*
 2663                          * If the buffer is growing on its other-than-first allocation,
 2664                          * then we revert to the page-allocation scheme.
 2665                          */
 2666                         if (bp->b_flags & B_MALLOC) {
 2667                                 origbuf = bp->b_data;
 2668                                 origbufsize = bp->b_bufsize;
 2669                                 bp->b_data = bp->b_kvabase;
 2670                                 if (bp->b_bufsize) {
 2671                                         atomic_subtract_int(&bufmallocspace,
 2672                                             bp->b_bufsize);
 2673                                         bufspacewakeup();
 2674                                         bp->b_bufsize = 0;
 2675                                 }
 2676                                 bp->b_flags &= ~B_MALLOC;
 2677                                 newbsize = round_page(newbsize);
 2678                         }
 2679                         vm_hold_load_pages(
 2680                             bp,
 2681                             (vm_offset_t) bp->b_data + bp->b_bufsize,
 2682                             (vm_offset_t) bp->b_data + newbsize);
 2683                         if (origbuf) {
 2684                                 bcopy(origbuf, bp->b_data, origbufsize);
 2685                                 free(origbuf, M_BIOBUF);
 2686                         }
 2687                 }
 2688         } else {
 2689                 int desiredpages;
 2690 
 2691                 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 2692                 desiredpages = (size == 0) ? 0 :
 2693                         num_pages((bp->b_offset & PAGE_MASK) + newbsize);
 2694 
 2695                 if (bp->b_flags & B_MALLOC)
 2696                         panic("allocbuf: VMIO buffer can't be malloced");
 2697                 /*
 2698                  * Set B_CACHE initially if buffer is 0 length or will become
 2699                  * 0-length.
 2700                  */
 2701                 if (size == 0 || bp->b_bufsize == 0)
 2702                         bp->b_flags |= B_CACHE;
 2703 
 2704                 if (newbsize < bp->b_bufsize) {
 2705                         /*
 2706                          * DEV_BSIZE aligned new buffer size is less then the
 2707                          * DEV_BSIZE aligned existing buffer size.  Figure out
 2708                          * if we have to remove any pages.
 2709                          */
 2710                         if (desiredpages < bp->b_npages) {
 2711                                 vm_page_t m;
 2712 
 2713                                 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
 2714                                 vm_page_lock_queues();
 2715                                 for (i = desiredpages; i < bp->b_npages; i++) {
 2716                                         /*
 2717                                          * the page is not freed here -- it
 2718                                          * is the responsibility of 
 2719                                          * vnode_pager_setsize
 2720                                          */
 2721                                         m = bp->b_pages[i];
 2722                                         KASSERT(m != bogus_page,
 2723                                             ("allocbuf: bogus page found"));
 2724                                         while (vm_page_sleep_if_busy(m, TRUE, "biodep"))
 2725                                                 vm_page_lock_queues();
 2726 
 2727                                         bp->b_pages[i] = NULL;
 2728                                         vm_page_unwire(m, 0);
 2729                                 }
 2730                                 vm_page_unlock_queues();
 2731                                 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 2732                                 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
 2733                                     (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
 2734                                 bp->b_npages = desiredpages;
 2735                         }
 2736                 } else if (size > bp->b_bcount) {
 2737                         /*
 2738                          * We are growing the buffer, possibly in a 
 2739                          * byte-granular fashion.
 2740                          */
 2741                         struct vnode *vp;
 2742                         vm_object_t obj;
 2743                         vm_offset_t toff;
 2744                         vm_offset_t tinc;
 2745 
 2746                         /*
 2747                          * Step 1, bring in the VM pages from the object, 
 2748                          * allocating them if necessary.  We must clear
 2749                          * B_CACHE if these pages are not valid for the 
 2750                          * range covered by the buffer.
 2751                          */
 2752 
 2753                         vp = bp->b_vp;
 2754                         obj = bp->b_bufobj->bo_object;
 2755 
 2756                         VM_OBJECT_LOCK(obj);
 2757                         while (bp->b_npages < desiredpages) {
 2758                                 vm_page_t m;
 2759                                 vm_pindex_t pi;
 2760 
 2761                                 pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
 2762                                 if ((m = vm_page_lookup(obj, pi)) == NULL) {
 2763                                         /*
 2764                                          * note: must allocate system pages
 2765                                          * since blocking here could intefere
 2766                                          * with paging I/O, no matter which
 2767                                          * process we are.
 2768                                          */
 2769                                         m = vm_page_alloc(obj, pi,
 2770                                             VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM |
 2771                                             VM_ALLOC_WIRED);
 2772                                         if (m == NULL) {
 2773                                                 atomic_add_int(&vm_pageout_deficit,
 2774                                                     desiredpages - bp->b_npages);
 2775                                                 VM_OBJECT_UNLOCK(obj);
 2776                                                 VM_WAIT;
 2777                                                 VM_OBJECT_LOCK(obj);
 2778                                         } else {
 2779                                                 bp->b_flags &= ~B_CACHE;
 2780                                                 bp->b_pages[bp->b_npages] = m;
 2781                                                 ++bp->b_npages;
 2782                                         }
 2783                                         continue;
 2784                                 }
 2785 
 2786                                 /*
 2787                                  * We found a page.  If we have to sleep on it,
 2788                                  * retry because it might have gotten freed out
 2789                                  * from under us.
 2790                                  *
 2791                                  * We can only test PG_BUSY here.  Blocking on
 2792                                  * m->busy might lead to a deadlock:
 2793                                  *
 2794                                  *  vm_fault->getpages->cluster_read->allocbuf
 2795                                  *
 2796                                  */
 2797                                 vm_page_lock_queues();
 2798                                 if (vm_page_sleep_if_busy(m, FALSE, "pgtblk"))
 2799                                         continue;
 2800 
 2801                                 /*
 2802                                  * We have a good page.  Should we wakeup the
 2803                                  * page daemon?
 2804                                  */
 2805                                 if ((curproc != pageproc) &&
 2806                                     ((m->queue - m->pc) == PQ_CACHE) &&
 2807                                     ((cnt.v_free_count + cnt.v_cache_count) <
 2808                                         (cnt.v_free_min + cnt.v_cache_min))) {
 2809                                         pagedaemon_wakeup();
 2810                                 }
 2811                                 vm_page_wire(m);
 2812                                 vm_page_unlock_queues();
 2813                                 bp->b_pages[bp->b_npages] = m;
 2814                                 ++bp->b_npages;
 2815                         }
 2816 
 2817                         /*
 2818                          * Step 2.  We've loaded the pages into the buffer,
 2819                          * we have to figure out if we can still have B_CACHE
 2820                          * set.  Note that B_CACHE is set according to the
 2821                          * byte-granular range ( bcount and size ), new the
 2822                          * aligned range ( newbsize ).
 2823                          *
 2824                          * The VM test is against m->valid, which is DEV_BSIZE
 2825                          * aligned.  Needless to say, the validity of the data
 2826                          * needs to also be DEV_BSIZE aligned.  Note that this
 2827                          * fails with NFS if the server or some other client
 2828                          * extends the file's EOF.  If our buffer is resized, 
 2829                          * B_CACHE may remain set! XXX
 2830                          */
 2831 
 2832                         toff = bp->b_bcount;
 2833                         tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
 2834 
 2835                         while ((bp->b_flags & B_CACHE) && toff < size) {
 2836                                 vm_pindex_t pi;
 2837 
 2838                                 if (tinc > (size - toff))
 2839                                         tinc = size - toff;
 2840 
 2841                                 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 
 2842                                     PAGE_SHIFT;
 2843 
 2844                                 vfs_buf_test_cache(
 2845                                     bp, 
 2846                                     bp->b_offset,
 2847                                     toff, 
 2848                                     tinc, 
 2849                                     bp->b_pages[pi]
 2850                                 );
 2851                                 toff += tinc;
 2852                                 tinc = PAGE_SIZE;
 2853                         }
 2854                         VM_OBJECT_UNLOCK(obj);
 2855 
 2856                         /*
 2857                          * Step 3, fixup the KVM pmap.  Remember that
 2858                          * bp->b_data is relative to bp->b_offset, but 
 2859                          * bp->b_offset may be offset into the first page.
 2860                          */
 2861 
 2862                         bp->b_data = (caddr_t)
 2863                             trunc_page((vm_offset_t)bp->b_data);
 2864                         pmap_qenter(
 2865                             (vm_offset_t)bp->b_data,
 2866                             bp->b_pages, 
 2867                             bp->b_npages
 2868                         );
 2869                         
 2870                         bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 
 2871                             (vm_offset_t)(bp->b_offset & PAGE_MASK));
 2872                 }
 2873         }
 2874         if (newbsize < bp->b_bufsize)
 2875                 bufspacewakeup();
 2876         bp->b_bufsize = newbsize;       /* actual buffer allocation     */
 2877         bp->b_bcount = size;            /* requested buffer size        */
 2878         return 1;
 2879 }
 2880 
 2881 void
 2882 biodone(struct bio *bp)
 2883 {
 2884         void (*done)(struct bio *);
 2885 
 2886         mtx_lock(&bdonelock);
 2887         bp->bio_flags |= BIO_DONE;
 2888         done = bp->bio_done;
 2889         if (done == NULL)
 2890                 wakeup(bp);
 2891         mtx_unlock(&bdonelock);
 2892         if (done != NULL)
 2893                 done(bp);
 2894 }
 2895 
 2896 /*
 2897  * Wait for a BIO to finish.
 2898  *
 2899  * XXX: resort to a timeout for now.  The optimal locking (if any) for this
 2900  * case is not yet clear.
 2901  */
 2902 int
 2903 biowait(struct bio *bp, const char *wchan)
 2904 {
 2905 
 2906         mtx_lock(&bdonelock);
 2907         while ((bp->bio_flags & BIO_DONE) == 0)
 2908                 msleep(bp, &bdonelock, PRIBIO, wchan, hz / 10);
 2909         mtx_unlock(&bdonelock);
 2910         if (bp->bio_error != 0)
 2911                 return (bp->bio_error);
 2912         if (!(bp->bio_flags & BIO_ERROR))
 2913                 return (0);
 2914         return (EIO);
 2915 }
 2916 
 2917 void
 2918 biofinish(struct bio *bp, struct devstat *stat, int error)
 2919 {
 2920         
 2921         if (error) {
 2922                 bp->bio_error = error;
 2923                 bp->bio_flags |= BIO_ERROR;
 2924         }
 2925         if (stat != NULL)
 2926                 devstat_end_transaction_bio(stat, bp);
 2927         biodone(bp);
 2928 }
 2929 
 2930 /*
 2931  *      bufwait:
 2932  *
 2933  *      Wait for buffer I/O completion, returning error status.  The buffer
 2934  *      is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
 2935  *      error and cleared.
 2936  */
 2937 int
 2938 bufwait(struct buf *bp)
 2939 {
 2940         if (bp->b_iocmd == BIO_READ)
 2941                 bwait(bp, PRIBIO, "biord");
 2942         else
 2943                 bwait(bp, PRIBIO, "biowr");
 2944         if (bp->b_flags & B_EINTR) {
 2945                 bp->b_flags &= ~B_EINTR;
 2946                 return (EINTR);
 2947         }
 2948         if (bp->b_ioflags & BIO_ERROR) {
 2949                 return (bp->b_error ? bp->b_error : EIO);
 2950         } else {
 2951                 return (0);
 2952         }
 2953 }
 2954 
 2955  /*
 2956   * Call back function from struct bio back up to struct buf.
 2957   */
 2958 static void
 2959 bufdonebio(struct bio *bip)
 2960 {
 2961         struct buf *bp;
 2962 
 2963         bp = bip->bio_caller2;
 2964         bp->b_resid = bp->b_bcount - bip->bio_completed;
 2965         bp->b_resid = bip->bio_resid;   /* XXX: remove */
 2966         bp->b_ioflags = bip->bio_flags;
 2967         bp->b_error = bip->bio_error;
 2968         if (bp->b_error)
 2969                 bp->b_ioflags |= BIO_ERROR;
 2970         bufdone(bp);
 2971         g_destroy_bio(bip);
 2972 }
 2973 
 2974 void
 2975 dev_strategy(struct cdev *dev, struct buf *bp)
 2976 {
 2977         struct cdevsw *csw;
 2978         struct bio *bip;
 2979 
 2980         if ((!bp->b_iocmd) || (bp->b_iocmd & (bp->b_iocmd - 1)))
 2981                 panic("b_iocmd botch");
 2982         for (;;) {
 2983                 bip = g_new_bio();
 2984                 if (bip != NULL)
 2985                         break;
 2986                 /* Try again later */
 2987                 tsleep(&bp, PRIBIO, "dev_strat", hz/10);
 2988         }
 2989         bip->bio_cmd = bp->b_iocmd;
 2990         bip->bio_offset = bp->b_iooffset;
 2991         bip->bio_length = bp->b_bcount;
 2992         bip->bio_bcount = bp->b_bcount; /* XXX: remove */
 2993         bip->bio_data = bp->b_data;
 2994         bip->bio_done = bufdonebio;
 2995         bip->bio_caller2 = bp;
 2996         bip->bio_dev = dev;
 2997         KASSERT(dev->si_refcount > 0,
 2998             ("dev_strategy on un-referenced struct cdev *(%s)",
 2999             devtoname(dev)));
 3000         csw = dev_refthread(dev);
 3001         if (csw == NULL) {
 3002                 bp->b_error = ENXIO;
 3003                 bp->b_ioflags = BIO_ERROR;
 3004                 bufdone(bp);
 3005                 return;
 3006         }
 3007         (*csw->d_strategy)(bip);
 3008         dev_relthread(dev);
 3009 }
 3010 
 3011 /*
 3012  *      bufdone:
 3013  *
 3014  *      Finish I/O on a buffer, optionally calling a completion function.
 3015  *      This is usually called from an interrupt so process blocking is
 3016  *      not allowed.
 3017  *
 3018  *      biodone is also responsible for setting B_CACHE in a B_VMIO bp.
 3019  *      In a non-VMIO bp, B_CACHE will be set on the next getblk() 
 3020  *      assuming B_INVAL is clear.
 3021  *
 3022  *      For the VMIO case, we set B_CACHE if the op was a read and no
 3023  *      read error occured, or if the op was a write.  B_CACHE is never
 3024  *      set if the buffer is invalid or otherwise uncacheable.
 3025  *
 3026  *      biodone does not mess with B_INVAL, allowing the I/O routine or the
 3027  *      initiator to leave B_INVAL set to brelse the buffer out of existance
 3028  *      in the biodone routine.
 3029  */
 3030 void
 3031 bufdone(struct buf *bp)
 3032 {
 3033         struct bufobj *dropobj;
 3034         void    (*biodone)(struct buf *);
 3035 
 3036 
 3037         CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 3038         dropobj = NULL;
 3039 
 3040         KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNT(bp)));
 3041         KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
 3042 
 3043         runningbufwakeup(bp);
 3044         if (bp->b_iocmd == BIO_WRITE)
 3045                 dropobj = bp->b_bufobj;
 3046         /* call optional completion function if requested */
 3047         if (bp->b_iodone != NULL) {
 3048                 biodone = bp->b_iodone;
 3049                 bp->b_iodone = NULL;
 3050                 (*biodone) (bp);
 3051                 if (dropobj)
 3052                         bufobj_wdrop(dropobj);
 3053                 return;
 3054         }
 3055         if (LIST_FIRST(&bp->b_dep) != NULL)
 3056                 buf_complete(bp);
 3057 
 3058         if (bp->b_flags & B_VMIO) {
 3059                 int i;
 3060                 vm_ooffset_t foff;
 3061                 vm_page_t m;
 3062                 vm_object_t obj;
 3063                 int iosize;
 3064                 struct vnode *vp = bp->b_vp;
 3065 
 3066                 obj = bp->b_bufobj->bo_object;
 3067 
 3068 #if defined(VFS_BIO_DEBUG)
 3069                 mp_fixme("usecount and vflag accessed without locks.");
 3070                 if (vp->v_usecount == 0) {
 3071                         panic("biodone: zero vnode ref count");
 3072                 }
 3073 
 3074                 KASSERT(vp->v_object != NULL,
 3075                         ("biodone: vnode %p has no vm_object", vp));
 3076 #endif
 3077 
 3078                 foff = bp->b_offset;
 3079                 KASSERT(bp->b_offset != NOOFFSET,
 3080                     ("biodone: no buffer offset"));
 3081 
 3082                 VM_OBJECT_LOCK(obj);
 3083 #if defined(VFS_BIO_DEBUG)
 3084                 if (obj->paging_in_progress < bp->b_npages) {
 3085                         printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
 3086                             obj->paging_in_progress, bp->b_npages);
 3087                 }
 3088 #endif
 3089 
 3090                 /*
 3091                  * Set B_CACHE if the op was a normal read and no error
 3092                  * occured.  B_CACHE is set for writes in the b*write()
 3093                  * routines.
 3094                  */
 3095                 iosize = bp->b_bcount - bp->b_resid;
 3096                 if (bp->b_iocmd == BIO_READ &&
 3097                     !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
 3098                     !(bp->b_ioflags & BIO_ERROR)) {
 3099                         bp->b_flags |= B_CACHE;
 3100                 }
 3101                 vm_page_lock_queues();
 3102                 for (i = 0; i < bp->b_npages; i++) {
 3103                         int bogusflag = 0;
 3104                         int resid;
 3105 
 3106                         resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
 3107                         if (resid > iosize)
 3108                                 resid = iosize;
 3109 
 3110                         /*
 3111                          * cleanup bogus pages, restoring the originals
 3112                          */
 3113                         m = bp->b_pages[i];
 3114                         if (m == bogus_page) {
 3115                                 bogusflag = 1;
 3116                                 m = vm_page_lookup(obj, OFF_TO_IDX(foff));
 3117                                 if (m == NULL)
 3118                                         panic("biodone: page disappeared!");
 3119                                 bp->b_pages[i] = m;
 3120                                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 3121                         }
 3122 #if defined(VFS_BIO_DEBUG)
 3123                         if (OFF_TO_IDX(foff) != m->pindex) {
 3124                                 printf(
 3125 "biodone: foff(%jd)/m->pindex(%ju) mismatch\n",
 3126                                     (intmax_t)foff, (uintmax_t)m->pindex);
 3127                         }
 3128 #endif
 3129 
 3130                         /*
 3131                          * In the write case, the valid and clean bits are
 3132                          * already changed correctly ( see bdwrite() ), so we 
 3133                          * only need to do this here in the read case.
 3134                          */
 3135                         if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
 3136                                 vfs_page_set_valid(bp, foff, i, m);
 3137                         }
 3138 
 3139                         /*
 3140                          * when debugging new filesystems or buffer I/O methods, this
 3141                          * is the most common error that pops up.  if you see this, you
 3142                          * have not set the page busy flag correctly!!!
 3143                          */
 3144                         if (m->busy == 0) {
 3145                                 printf("biodone: page busy < 0, "
 3146                                     "pindex: %d, foff: 0x(%x,%x), "
 3147                                     "resid: %d, index: %d\n",
 3148                                     (int) m->pindex, (int)(foff >> 32),
 3149                                                 (int) foff & 0xffffffff, resid, i);
 3150                                 if (!vn_isdisk(vp, NULL))
 3151                                         printf(" iosize: %jd, lblkno: %jd, flags: 0x%x, npages: %d\n",
 3152                                             (intmax_t)bp->b_vp->v_mount->mnt_stat.f_iosize,
 3153                                             (intmax_t) bp->b_lblkno,
 3154                                             bp->b_flags, bp->b_npages);
 3155                                 else
 3156                                         printf(" VDEV, lblkno: %jd, flags: 0x%x, npages: %d\n",
 3157                                             (intmax_t) bp->b_lblkno,
 3158                                             bp->b_flags, bp->b_npages);
 3159                                 printf(" valid: 0x%lx, dirty: 0x%lx, wired: %d\n",
 3160                                     (u_long)m->valid, (u_long)m->dirty,
 3161                                     m->wire_count);
 3162                                 panic("biodone: page busy < 0\n");
 3163                         }
 3164                         vm_page_io_finish(m);
 3165                         vm_object_pip_subtract(obj, 1);
 3166                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3167                         iosize -= resid;
 3168                 }
 3169                 vm_page_unlock_queues();
 3170                 vm_object_pip_wakeupn(obj, 0);
 3171                 VM_OBJECT_UNLOCK(obj);
 3172         }
 3173 
 3174         /*
 3175          * For asynchronous completions, release the buffer now. The brelse
 3176          * will do a wakeup there if necessary - so no need to do a wakeup
 3177          * here in the async case. The sync case always needs to do a wakeup.
 3178          */
 3179 
 3180         if (bp->b_flags & B_ASYNC) {
 3181                 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
 3182                         brelse(bp);
 3183                 else
 3184                         bqrelse(bp);
 3185         } else
 3186                 bdone(bp);
 3187         if (dropobj)
 3188                 bufobj_wdrop(dropobj);
 3189 }
 3190 
 3191 /*
 3192  * This routine is called in lieu of iodone in the case of
 3193  * incomplete I/O.  This keeps the busy status for pages
 3194  * consistant.
 3195  */
 3196 void
 3197 vfs_unbusy_pages(struct buf *bp)
 3198 {
 3199         int i;
 3200         vm_object_t obj;
 3201         vm_page_t m;
 3202 
 3203         runningbufwakeup(bp);
 3204         if (!(bp->b_flags & B_VMIO))
 3205                 return;
 3206 
 3207         obj = bp->b_bufobj->bo_object;
 3208         VM_OBJECT_LOCK(obj);
 3209         vm_page_lock_queues();
 3210         for (i = 0; i < bp->b_npages; i++) {
 3211                 m = bp->b_pages[i];
 3212                 if (m == bogus_page) {
 3213                         m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
 3214                         if (!m)
 3215                                 panic("vfs_unbusy_pages: page missing\n");
 3216                         bp->b_pages[i] = m;
 3217                         pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 3218                             bp->b_pages, bp->b_npages);
 3219                 }
 3220                 vm_object_pip_subtract(obj, 1);
 3221                 vm_page_io_finish(m);
 3222         }
 3223         vm_page_unlock_queues();
 3224         vm_object_pip_wakeupn(obj, 0);
 3225         VM_OBJECT_UNLOCK(obj);
 3226 }
 3227 
 3228 /*
 3229  * vfs_page_set_valid:
 3230  *
 3231  *      Set the valid bits in a page based on the supplied offset.   The
 3232  *      range is restricted to the buffer's size.
 3233  *
 3234  *      This routine is typically called after a read completes.
 3235  */
 3236 static void
 3237 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
 3238 {
 3239         vm_ooffset_t soff, eoff;
 3240 
 3241         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3242         /*
 3243          * Start and end offsets in buffer.  eoff - soff may not cross a
 3244          * page boundry or cross the end of the buffer.  The end of the
 3245          * buffer, in this case, is our file EOF, not the allocation size
 3246          * of the buffer.
 3247          */
 3248         soff = off;
 3249         eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3250         if (eoff > bp->b_offset + bp->b_bcount)
 3251                 eoff = bp->b_offset + bp->b_bcount;
 3252 
 3253         /*
 3254          * Set valid range.  This is typically the entire buffer and thus the
 3255          * entire page.
 3256          */
 3257         if (eoff > soff) {
 3258                 vm_page_set_validclean(
 3259                     m,
 3260                    (vm_offset_t) (soff & PAGE_MASK),
 3261                    (vm_offset_t) (eoff - soff)
 3262                 );
 3263         }
 3264 }
 3265 
 3266 /*
 3267  * This routine is called before a device strategy routine.
 3268  * It is used to tell the VM system that paging I/O is in
 3269  * progress, and treat the pages associated with the buffer
 3270  * almost as being PG_BUSY.  Also the object paging_in_progress
 3271  * flag is handled to make sure that the object doesn't become
 3272  * inconsistant.
 3273  *
 3274  * Since I/O has not been initiated yet, certain buffer flags
 3275  * such as BIO_ERROR or B_INVAL may be in an inconsistant state
 3276  * and should be ignored.
 3277  */
 3278 void
 3279 vfs_busy_pages(struct buf *bp, int clear_modify)
 3280 {
 3281         int i, bogus;
 3282         vm_object_t obj;
 3283         vm_ooffset_t foff;
 3284         vm_page_t m;
 3285 
 3286         if (!(bp->b_flags & B_VMIO))
 3287                 return;
 3288 
 3289         obj = bp->b_bufobj->bo_object;
 3290         foff = bp->b_offset;
 3291         KASSERT(bp->b_offset != NOOFFSET,
 3292             ("vfs_busy_pages: no buffer offset"));
 3293         vfs_setdirty(bp);
 3294         VM_OBJECT_LOCK(obj);
 3295 retry:
 3296         vm_page_lock_queues();
 3297         for (i = 0; i < bp->b_npages; i++) {
 3298                 m = bp->b_pages[i];
 3299 
 3300                 if (vm_page_sleep_if_busy(m, FALSE, "vbpage"))
 3301                         goto retry;
 3302         }
 3303         bogus = 0;
 3304         for (i = 0; i < bp->b_npages; i++) {
 3305                 m = bp->b_pages[i];
 3306 
 3307                 if ((bp->b_flags & B_CLUSTER) == 0) {
 3308                         vm_object_pip_add(obj, 1);
 3309                         vm_page_io_start(m);
 3310                 }
 3311                 /*
 3312                  * When readying a buffer for a read ( i.e
 3313                  * clear_modify == 0 ), it is important to do
 3314                  * bogus_page replacement for valid pages in 
 3315                  * partially instantiated buffers.  Partially 
 3316                  * instantiated buffers can, in turn, occur when
 3317                  * reconstituting a buffer from its VM backing store
 3318                  * base.  We only have to do this if B_CACHE is
 3319                  * clear ( which causes the I/O to occur in the
 3320                  * first place ).  The replacement prevents the read
 3321                  * I/O from overwriting potentially dirty VM-backed
 3322                  * pages.  XXX bogus page replacement is, uh, bogus.
 3323                  * It may not work properly with small-block devices.
 3324                  * We need to find a better way.
 3325                  */
 3326                 pmap_remove_all(m);
 3327                 if (clear_modify)
 3328                         vfs_page_set_valid(bp, foff, i, m);
 3329                 else if (m->valid == VM_PAGE_BITS_ALL &&
 3330                     (bp->b_flags & B_CACHE) == 0) {
 3331                         bp->b_pages[i] = bogus_page;
 3332                         bogus++;
 3333                 }
 3334                 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3335         }
 3336         vm_page_unlock_queues();
 3337         VM_OBJECT_UNLOCK(obj);
 3338         if (bogus)
 3339                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 3340                     bp->b_pages, bp->b_npages);
 3341 }
 3342 
 3343 /*
 3344  * Tell the VM system that the pages associated with this buffer
 3345  * are clean.  This is used for delayed writes where the data is
 3346  * going to go to disk eventually without additional VM intevention.
 3347  *
 3348  * Note that while we only really need to clean through to b_bcount, we
 3349  * just go ahead and clean through to b_bufsize.
 3350  */
 3351 static void
 3352 vfs_clean_pages(struct buf *bp)
 3353 {
 3354         int i;
 3355         vm_ooffset_t foff, noff, eoff;
 3356         vm_page_t m;
 3357 
 3358         if (!(bp->b_flags & B_VMIO))
 3359                 return;
 3360 
 3361         foff = bp->b_offset;
 3362         KASSERT(bp->b_offset != NOOFFSET,
 3363             ("vfs_clean_pages: no buffer offset"));
 3364         VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
 3365         vm_page_lock_queues();
 3366         for (i = 0; i < bp->b_npages; i++) {
 3367                 m = bp->b_pages[i];
 3368                 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3369                 eoff = noff;
 3370 
 3371                 if (eoff > bp->b_offset + bp->b_bufsize)
 3372                         eoff = bp->b_offset + bp->b_bufsize;
 3373                 vfs_page_set_valid(bp, foff, i, m);
 3374                 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
 3375                 foff = noff;
 3376         }
 3377         vm_page_unlock_queues();
 3378         VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 3379 }
 3380 
 3381 /*
 3382  *      vfs_bio_set_validclean:
 3383  *
 3384  *      Set the range within the buffer to valid and clean.  The range is 
 3385  *      relative to the beginning of the buffer, b_offset.  Note that b_offset
 3386  *      itself may be offset from the beginning of the first page.
 3387  *
 3388  */
 3389 
 3390 void   
 3391 vfs_bio_set_validclean(struct buf *bp, int base, int size)
 3392 {
 3393         int i, n;
 3394         vm_page_t m;
 3395 
 3396         if (!(bp->b_flags & B_VMIO))
 3397                 return;
 3398         /*
 3399          * Fixup base to be relative to beginning of first page.
 3400          * Set initial n to be the maximum number of bytes in the
 3401          * first page that can be validated.
 3402          */
 3403 
 3404         base += (bp->b_offset & PAGE_MASK);
 3405         n = PAGE_SIZE - (base & PAGE_MASK);
 3406 
 3407         VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
 3408         vm_page_lock_queues();
 3409         for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
 3410                 m = bp->b_pages[i];
 3411                 if (n > size)
 3412                         n = size;
 3413                 vm_page_set_validclean(m, base & PAGE_MASK, n);
 3414                 base += n;
 3415                 size -= n;
 3416                 n = PAGE_SIZE;
 3417         }
 3418         vm_page_unlock_queues();
 3419         VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 3420 }
 3421 
 3422 /*
 3423  *      vfs_bio_clrbuf:
 3424  *
 3425  *      clear a buffer.  This routine essentially fakes an I/O, so we need
 3426  *      to clear BIO_ERROR and B_INVAL.
 3427  *
 3428  *      Note that while we only theoretically need to clear through b_bcount,
 3429  *      we go ahead and clear through b_bufsize.
 3430  */
 3431 
 3432 void
 3433 vfs_bio_clrbuf(struct buf *bp) 
 3434 {
 3435         int i, j, mask = 0;
 3436         caddr_t sa, ea;
 3437 
 3438         if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
 3439                 clrbuf(bp);
 3440                 return;
 3441         }
 3442 
 3443         bp->b_flags &= ~B_INVAL;
 3444         bp->b_ioflags &= ~BIO_ERROR;
 3445         VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
 3446         if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
 3447             (bp->b_offset & PAGE_MASK) == 0) {
 3448                 if (bp->b_pages[0] == bogus_page)
 3449                         goto unlock;
 3450                 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
 3451                 VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
 3452                 if ((bp->b_pages[0]->valid & mask) == mask)
 3453                         goto unlock;
 3454                 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
 3455                     ((bp->b_pages[0]->valid & mask) == 0)) {
 3456                         bzero(bp->b_data, bp->b_bufsize);
 3457                         bp->b_pages[0]->valid |= mask;
 3458                         goto unlock;
 3459                 }
 3460         }
 3461         ea = sa = bp->b_data;
 3462         for(i = 0; i < bp->b_npages; i++, sa = ea) {
 3463                 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
 3464                 ea = (caddr_t)(vm_offset_t)ulmin(
 3465                     (u_long)(vm_offset_t)ea,
 3466                     (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
 3467                 if (bp->b_pages[i] == bogus_page)
 3468                         continue;
 3469                 j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
 3470                 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
 3471                 VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
 3472                 if ((bp->b_pages[i]->valid & mask) == mask)
 3473                         continue;
 3474                 if ((bp->b_pages[i]->valid & mask) == 0) {
 3475                         if ((bp->b_pages[i]->flags & PG_ZERO) == 0)
 3476                                 bzero(sa, ea - sa);
 3477                 } else {
 3478                         for (; sa < ea; sa += DEV_BSIZE, j++) {
 3479                                 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
 3480                                     (bp->b_pages[i]->valid & (1 << j)) == 0)
 3481                                         bzero(sa, DEV_BSIZE);
 3482                         }
 3483                 }
 3484                 bp->b_pages[i]->valid |= mask;
 3485         }
 3486 unlock:
 3487         VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 3488         bp->b_resid = 0;
 3489 }
 3490 
 3491 /*
 3492  * vm_hold_load_pages and vm_hold_free_pages get pages into
 3493  * a buffers address space.  The pages are anonymous and are
 3494  * not associated with a file object.
 3495  */
 3496 static void
 3497 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
 3498 {
 3499         vm_offset_t pg;
 3500         vm_page_t p;
 3501         int index;
 3502 
 3503         to = round_page(to);
 3504         from = round_page(from);
 3505         index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 3506 
 3507         VM_OBJECT_LOCK(kernel_object);
 3508         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 3509 tryagain:
 3510                 /*
 3511                  * note: must allocate system pages since blocking here
 3512                  * could intefere with paging I/O, no matter which
 3513                  * process we are.
 3514                  */
 3515                 p = vm_page_alloc(kernel_object,
 3516                         ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
 3517                     VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
 3518                 if (!p) {
 3519                         atomic_add_int(&vm_pageout_deficit,
 3520                             (to - pg) >> PAGE_SHIFT);
 3521                         VM_OBJECT_UNLOCK(kernel_object);
 3522                         VM_WAIT;
 3523                         VM_OBJECT_LOCK(kernel_object);
 3524                         goto tryagain;
 3525                 }
 3526                 p->valid = VM_PAGE_BITS_ALL;
 3527                 pmap_qenter(pg, &p, 1);
 3528                 bp->b_pages[index] = p;
 3529         }
 3530         VM_OBJECT_UNLOCK(kernel_object);
 3531         bp->b_npages = index;
 3532 }
 3533 
 3534 /* Return pages associated with this buf to the vm system */
 3535 static void
 3536 vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
 3537 {
 3538         vm_offset_t pg;
 3539         vm_page_t p;
 3540         int index, newnpages;
 3541 
 3542         from = round_page(from);
 3543         to = round_page(to);
 3544         newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 3545 
 3546         VM_OBJECT_LOCK(kernel_object);
 3547         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 3548                 p = bp->b_pages[index];
 3549                 if (p && (index < bp->b_npages)) {
 3550                         if (p->busy) {
 3551                                 printf(
 3552                             "vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
 3553                                     (intmax_t)bp->b_blkno,
 3554                                     (intmax_t)bp->b_lblkno);
 3555                         }
 3556                         bp->b_pages[index] = NULL;
 3557                         pmap_qremove(pg, 1);
 3558                         vm_page_lock_queues();
 3559                         vm_page_unwire(p, 0);
 3560                         vm_page_free(p);
 3561                         vm_page_unlock_queues();
 3562                 }
 3563         }
 3564         VM_OBJECT_UNLOCK(kernel_object);
 3565         bp->b_npages = newnpages;
 3566 }
 3567 
 3568 /*
 3569  * Map an IO request into kernel virtual address space.
 3570  *
 3571  * All requests are (re)mapped into kernel VA space.
 3572  * Notice that we use b_bufsize for the size of the buffer
 3573  * to be mapped.  b_bcount might be modified by the driver.
 3574  *
 3575  * Note that even if the caller determines that the address space should
 3576  * be valid, a race or a smaller-file mapped into a larger space may
 3577  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
 3578  * check the return value.
 3579  */
 3580 int
 3581 vmapbuf(struct buf *bp)
 3582 {
 3583         caddr_t addr, kva;
 3584         vm_prot_t prot;
 3585         int pidx, i;
 3586         struct vm_page *m;
 3587         struct pmap *pmap = &curproc->p_vmspace->vm_pmap;
 3588 
 3589         if (bp->b_bufsize < 0)
 3590                 return (-1);
 3591         prot = VM_PROT_READ;
 3592         if (bp->b_iocmd == BIO_READ)
 3593                 prot |= VM_PROT_WRITE;  /* Less backwards than it looks */
 3594         for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
 3595              addr < bp->b_data + bp->b_bufsize;
 3596              addr += PAGE_SIZE, pidx++) {
 3597                 /*
 3598                  * Do the vm_fault if needed; do the copy-on-write thing
 3599                  * when reading stuff off device into memory.
 3600                  *
 3601                  * NOTE! Must use pmap_extract() because addr may be in
 3602                  * the userland address space, and kextract is only guarenteed
 3603                  * to work for the kernland address space (see: sparc64 port).
 3604                  */
 3605 retry:
 3606                 if (vm_fault_quick(addr >= bp->b_data ? addr : bp->b_data,
 3607                     prot) < 0) {
 3608                         vm_page_lock_queues();
 3609                         for (i = 0; i < pidx; ++i) {
 3610                                 vm_page_unhold(bp->b_pages[i]);
 3611                                 bp->b_pages[i] = NULL;
 3612                         }
 3613                         vm_page_unlock_queues();
 3614                         return(-1);
 3615                 }
 3616                 m = pmap_extract_and_hold(pmap, (vm_offset_t)addr, prot);
 3617                 if (m == NULL)
 3618                         goto retry;
 3619                 bp->b_pages[pidx] = m;
 3620         }
 3621         if (pidx > btoc(MAXPHYS))
 3622                 panic("vmapbuf: mapped more than MAXPHYS");
 3623         pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
 3624         
 3625         kva = bp->b_saveaddr;
 3626         bp->b_npages = pidx;
 3627         bp->b_saveaddr = bp->b_data;
 3628         bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
 3629         return(0);
 3630 }
 3631 
 3632 /*
 3633  * Free the io map PTEs associated with this IO operation.
 3634  * We also invalidate the TLB entries and restore the original b_addr.
 3635  */
 3636 void
 3637 vunmapbuf(struct buf *bp)
 3638 {
 3639         int pidx;
 3640         int npages;
 3641 
 3642         npages = bp->b_npages;
 3643         pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
 3644         vm_page_lock_queues();
 3645         for (pidx = 0; pidx < npages; pidx++)
 3646                 vm_page_unhold(bp->b_pages[pidx]);
 3647         vm_page_unlock_queues();
 3648 
 3649         bp->b_data = bp->b_saveaddr;
 3650 }
 3651 
 3652 void
 3653 bdone(struct buf *bp)
 3654 {
 3655 
 3656         mtx_lock(&bdonelock);
 3657         bp->b_flags |= B_DONE;
 3658         wakeup(bp);
 3659         mtx_unlock(&bdonelock);
 3660 }
 3661 
 3662 void
 3663 bwait(struct buf *bp, u_char pri, const char *wchan)
 3664 {
 3665 
 3666         mtx_lock(&bdonelock);
 3667         while ((bp->b_flags & B_DONE) == 0)
 3668                 msleep(bp, &bdonelock, pri, wchan, 0);
 3669         mtx_unlock(&bdonelock);
 3670 }
 3671 
 3672 int
 3673 bufsync(struct bufobj *bo, int waitfor, struct thread *td)
 3674 {
 3675 
 3676         return (VOP_FSYNC(bo->__bo_vnode, waitfor, td));
 3677 }
 3678 
 3679 void
 3680 bufstrategy(struct bufobj *bo, struct buf *bp)
 3681 {
 3682         int i = 0;
 3683         struct vnode *vp;
 3684 
 3685         vp = bp->b_vp;
 3686         KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
 3687         KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
 3688             ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
 3689         i = VOP_STRATEGY(vp, bp);
 3690         KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
 3691 }
 3692 
 3693 void
 3694 bufobj_wrefl(struct bufobj *bo)
 3695 {
 3696 
 3697         KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
 3698         ASSERT_BO_LOCKED(bo);
 3699         bo->bo_numoutput++;
 3700 }
 3701 
 3702 void
 3703 bufobj_wref(struct bufobj *bo)
 3704 {
 3705 
 3706         KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
 3707         BO_LOCK(bo);
 3708         bo->bo_numoutput++;
 3709         BO_UNLOCK(bo);
 3710 }
 3711 
 3712 void
 3713 bufobj_wdrop(struct bufobj *bo)
 3714 {
 3715 
 3716         KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
 3717         BO_LOCK(bo);
 3718         KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
 3719         if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
 3720                 bo->bo_flag &= ~BO_WWAIT;
 3721                 wakeup(&bo->bo_numoutput);
 3722         }
 3723         BO_UNLOCK(bo);
 3724 }
 3725 
 3726 int
 3727 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
 3728 {
 3729         int error;
 3730 
 3731         KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
 3732         ASSERT_BO_LOCKED(bo);
 3733         error = 0;
 3734         while (bo->bo_numoutput) {
 3735                 bo->bo_flag |= BO_WWAIT;
 3736                 error = msleep(&bo->bo_numoutput, BO_MTX(bo),
 3737                     slpflag | (PRIBIO + 1), "bo_wwait", timeo);
 3738                 if (error)
 3739                         break;
 3740         }
 3741         return (error);
 3742 }
 3743 
 3744 #include "opt_ddb.h"
 3745 #ifdef DDB
 3746 #include <ddb/ddb.h>
 3747 
 3748 /* DDB command to show buffer data */
 3749 DB_SHOW_COMMAND(buffer, db_show_buffer)
 3750 {
 3751         /* get args */
 3752         struct buf *bp = (struct buf *)addr;
 3753 
 3754         if (!have_addr) {
 3755                 db_printf("usage: show buffer <addr>\n");
 3756                 return;
 3757         }
 3758 
 3759         db_printf("buf at %p\n", bp);
 3760         db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
 3761         db_printf(
 3762             "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
 3763             "b_bufobj = (%p), b_data = %p, b_blkno = %jd\n",
 3764             bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
 3765             bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno);
 3766         db_printf("lockstatus = %d, excl count = %d, excl owner %p\n",
 3767             lockstatus(&bp->b_lock, NULL), bp->b_lock.lk_exclusivecount,
 3768             bp->b_lock.lk_lockholder);
 3769         if (bp->b_npages) {
 3770                 int i;
 3771                 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
 3772                 for (i = 0; i < bp->b_npages; i++) {
 3773                         vm_page_t m;
 3774                         m = bp->b_pages[i];
 3775                         db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
 3776                             (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
 3777                         if ((i + 1) < bp->b_npages)
 3778                                 db_printf(",");
 3779                 }
 3780                 db_printf("\n");
 3781         }
 3782 }
 3783 
 3784 DB_SHOW_COMMAND(lockedbufs, lockedbufs)
 3785 {
 3786         struct buf *bp;
 3787         int i;
 3788 
 3789         for (i = 0; i < nbuf; i++) {
 3790                 bp = &buf[i];
 3791                 if (lockcount(&bp->b_lock)) {
 3792                         db_show_buffer((uintptr_t)bp, 1, 0, NULL);
 3793                         db_printf("\n");
 3794                 }
 3795         }
 3796 }
 3797 #endif /* DDB */

Cache object: 8bb03c5d30b8f14be8a05e49b89e95de


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.