The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_bio.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1994,1997 John S. Dyson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice immediately at the beginning of the file, without modification,
   10  *    this list of conditions, and the following disclaimer.
   11  * 2. Absolutely no warranty of function or purpose is made by the author
   12  *              John S. Dyson.
   13  *
   14  * $FreeBSD: releng/5.1/sys/kern/vfs_bio.c 114147 2003-04-28 05:40:45Z alc $
   15  */
   16 
   17 /*
   18  * this file contains a new buffer I/O scheme implementing a coherent
   19  * VM object and buffer cache scheme.  Pains have been taken to make
   20  * sure that the performance degradation associated with schemes such
   21  * as this is not realized.
   22  *
   23  * Author:  John S. Dyson
   24  * Significant help during the development and debugging phases
   25  * had been provided by David Greenman, also of the FreeBSD core team.
   26  *
   27  * see man buf(9) for more info.
   28  */
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/bio.h>
   33 #include <sys/buf.h>
   34 #include <sys/devicestat.h>
   35 #include <sys/eventhandler.h>
   36 #include <sys/lock.h>
   37 #include <sys/malloc.h>
   38 #include <sys/mount.h>
   39 #include <sys/mutex.h>
   40 #include <sys/kernel.h>
   41 #include <sys/kthread.h>
   42 #include <sys/proc.h>
   43 #include <sys/resourcevar.h>
   44 #include <sys/sysctl.h>
   45 #include <sys/vmmeter.h>
   46 #include <sys/vnode.h>
   47 #include <vm/vm.h>
   48 #include <vm/vm_param.h>
   49 #include <vm/vm_kern.h>
   50 #include <vm/vm_pageout.h>
   51 #include <vm/vm_page.h>
   52 #include <vm/vm_object.h>
   53 #include <vm/vm_extern.h>
   54 #include <vm/vm_map.h>
   55 #include "opt_directio.h"
   56 #include "opt_swap.h"
   57 
   58 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
   59 
   60 struct  bio_ops bioops;         /* I/O operation notification */
   61 
   62 struct  buf_ops buf_ops_bio = {
   63         "buf_ops_bio",
   64         bwrite
   65 };
   66 
   67 /*
   68  * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
   69  * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
   70  */
   71 struct buf *buf;                /* buffer header pool */
   72 
   73 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
   74                 vm_offset_t to);
   75 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
   76                 vm_offset_t to);
   77 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
   78                                int pageno, vm_page_t m);
   79 static void vfs_clean_pages(struct buf * bp);
   80 static void vfs_setdirty(struct buf *bp);
   81 static void vfs_vmio_release(struct buf *bp);
   82 static void vfs_backgroundwritedone(struct buf *bp);
   83 static int vfs_bio_clcheck(struct vnode *vp, int size,
   84                 daddr_t lblkno, daddr_t blkno);
   85 static int flushbufqueues(int flushdeps);
   86 static void buf_daemon(void);
   87 void bremfreel(struct buf * bp);
   88 
   89 int vmiodirenable = TRUE;
   90 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
   91     "Use the VM system for directory writes");
   92 int runningbufspace;
   93 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
   94     "Amount of presently outstanding async buffer io");
   95 static int bufspace;
   96 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
   97     "KVA memory used for bufs");
   98 static int maxbufspace;
   99 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
  100     "Maximum allowed value of bufspace (including buf_daemon)");
  101 static int bufmallocspace;
  102 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
  103     "Amount of malloced memory for buffers");
  104 static int maxbufmallocspace;
  105 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
  106     "Maximum amount of malloced memory for buffers");
  107 static int lobufspace;
  108 SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
  109     "Minimum amount of buffers we want to have");
  110 static int hibufspace;
  111 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
  112     "Maximum allowed value of bufspace (excluding buf_daemon)");
  113 static int bufreusecnt;
  114 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
  115     "Number of times we have reused a buffer");
  116 static int buffreekvacnt;
  117 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
  118     "Number of times we have freed the KVA space from some buffer");
  119 static int bufdefragcnt;
  120 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
  121     "Number of times we have had to repeat buffer allocation to defragment");
  122 static int lorunningspace;
  123 SYSCTL_INT(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
  124     "Minimum preferred space used for in-progress I/O");
  125 static int hirunningspace;
  126 SYSCTL_INT(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
  127     "Maximum amount of space to use for in-progress I/O");
  128 static int dirtybufferflushes;
  129 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
  130     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
  131 static int altbufferflushes;
  132 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
  133     0, "Number of fsync flushes to limit dirty buffers");
  134 static int recursiveflushes;
  135 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
  136     0, "Number of flushes skipped due to being recursive");
  137 static int numdirtybuffers;
  138 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
  139     "Number of buffers that are dirty (has unwritten changes) at the moment");
  140 static int lodirtybuffers;
  141 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
  142     "How many buffers we want to have free before bufdaemon can sleep");
  143 static int hidirtybuffers;
  144 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
  145     "When the number of dirty buffers is considered severe");
  146 static int dirtybufthresh;
  147 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
  148     0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
  149 static int numfreebuffers;
  150 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
  151     "Number of free buffers");
  152 static int lofreebuffers;
  153 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
  154    "XXX Unused");
  155 static int hifreebuffers;
  156 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
  157    "XXX Complicatedly unused");
  158 static int getnewbufcalls;
  159 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
  160    "Number of calls to getnewbuf");
  161 static int getnewbufrestarts;
  162 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
  163     "Number of times getnewbuf has had to restart a buffer aquisition");
  164 static int dobkgrdwrite = 1;
  165 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
  166     "Do background writes (honoring the BX_BKGRDWRITE flag)?");
  167 
  168 /*
  169  * Wakeup point for bufdaemon, as well as indicator of whether it is already
  170  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
  171  * is idling.
  172  */
  173 static int bd_request;
  174 
  175 /*
  176  * This lock synchronizes access to bd_request.
  177  */
  178 static struct mtx bdlock;
  179 
  180 /*
  181  * bogus page -- for I/O to/from partially complete buffers
  182  * this is a temporary solution to the problem, but it is not
  183  * really that bad.  it would be better to split the buffer
  184  * for input in the case of buffers partially already in memory,
  185  * but the code is intricate enough already.
  186  */
  187 vm_page_t bogus_page;
  188 
  189 /*
  190  * Synchronization (sleep/wakeup) variable for active buffer space requests.
  191  * Set when wait starts, cleared prior to wakeup().
  192  * Used in runningbufwakeup() and waitrunningbufspace().
  193  */
  194 static int runningbufreq;
  195 
  196 /*
  197  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
  198  * waitrunningbufspace().
  199  */
  200 static struct mtx rbreqlock;
  201 
  202 /* 
  203  * Synchronization (sleep/wakeup) variable for buffer requests.
  204  * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
  205  * by and/or.
  206  * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
  207  * getnewbuf(), and getblk().
  208  */
  209 static int needsbuffer;
  210 
  211 /*
  212  * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
  213  */
  214 static struct mtx nblock;
  215 
  216 /*
  217  * Lock that protects against bwait()/bdone()/B_DONE races.
  218  */
  219 
  220 static struct mtx bdonelock;
  221 
  222 /*
  223  * Definitions for the buffer free lists.
  224  */
  225 #define BUFFER_QUEUES   6       /* number of free buffer queues */
  226 
  227 #define QUEUE_NONE      0       /* on no queue */
  228 #define QUEUE_LOCKED    1       /* locked buffers */
  229 #define QUEUE_CLEAN     2       /* non-B_DELWRI buffers */
  230 #define QUEUE_DIRTY     3       /* B_DELWRI buffers */
  231 #define QUEUE_EMPTYKVA  4       /* empty buffer headers w/KVA assignment */
  232 #define QUEUE_EMPTY     5       /* empty buffer headers */
  233 
  234 /* Queues for free buffers with various properties */
  235 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
  236 
  237 /* Lock for the bufqueues */
  238 static struct mtx bqlock;
  239 
  240 /*
  241  * Single global constant for BUF_WMESG, to avoid getting multiple references.
  242  * buf_wmesg is referred from macros.
  243  */
  244 const char *buf_wmesg = BUF_WMESG;
  245 
  246 #define VFS_BIO_NEED_ANY        0x01    /* any freeable buffer */
  247 #define VFS_BIO_NEED_DIRTYFLUSH 0x02    /* waiting for dirty buffer flush */
  248 #define VFS_BIO_NEED_FREE       0x04    /* wait for free bufs, hi hysteresis */
  249 #define VFS_BIO_NEED_BUFSPACE   0x08    /* wait for buf space, lo hysteresis */
  250 
  251 #ifdef DIRECTIO
  252 extern void ffs_rawread_setup(void);
  253 #endif /* DIRECTIO */
  254 /*
  255  *      numdirtywakeup:
  256  *
  257  *      If someone is blocked due to there being too many dirty buffers,
  258  *      and numdirtybuffers is now reasonable, wake them up.
  259  */
  260 
  261 static __inline void
  262 numdirtywakeup(int level)
  263 {
  264         if (numdirtybuffers <= level) {
  265                 mtx_lock(&nblock);
  266                 if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
  267                         needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
  268                         wakeup(&needsbuffer);
  269                 }
  270                 mtx_unlock(&nblock);
  271         }
  272 }
  273 
  274 /*
  275  *      bufspacewakeup:
  276  *
  277  *      Called when buffer space is potentially available for recovery.
  278  *      getnewbuf() will block on this flag when it is unable to free 
  279  *      sufficient buffer space.  Buffer space becomes recoverable when 
  280  *      bp's get placed back in the queues.
  281  */
  282 
  283 static __inline void
  284 bufspacewakeup(void)
  285 {
  286         /*
  287          * If someone is waiting for BUF space, wake them up.  Even
  288          * though we haven't freed the kva space yet, the waiting
  289          * process will be able to now.
  290          */
  291         mtx_lock(&nblock);
  292         if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
  293                 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
  294                 wakeup(&needsbuffer);
  295         }
  296         mtx_unlock(&nblock);
  297 }
  298 
  299 /*
  300  * runningbufwakeup() - in-progress I/O accounting.
  301  *
  302  */
  303 static __inline void
  304 runningbufwakeup(struct buf *bp)
  305 {
  306         if (bp->b_runningbufspace) {
  307                 atomic_subtract_int(&runningbufspace, bp->b_runningbufspace);
  308                 bp->b_runningbufspace = 0;
  309                 mtx_lock(&rbreqlock);
  310                 if (runningbufreq && runningbufspace <= lorunningspace) {
  311                         runningbufreq = 0;
  312                         wakeup(&runningbufreq);
  313                 }
  314                 mtx_unlock(&rbreqlock);
  315         }
  316 }
  317 
  318 /*
  319  *      bufcountwakeup:
  320  *
  321  *      Called when a buffer has been added to one of the free queues to
  322  *      account for the buffer and to wakeup anyone waiting for free buffers.
  323  *      This typically occurs when large amounts of metadata are being handled
  324  *      by the buffer cache ( else buffer space runs out first, usually ).
  325  */
  326 
  327 static __inline void
  328 bufcountwakeup(void) 
  329 {
  330         atomic_add_int(&numfreebuffers, 1);
  331         mtx_lock(&nblock);
  332         if (needsbuffer) {
  333                 needsbuffer &= ~VFS_BIO_NEED_ANY;
  334                 if (numfreebuffers >= hifreebuffers)
  335                         needsbuffer &= ~VFS_BIO_NEED_FREE;
  336                 wakeup(&needsbuffer);
  337         }
  338         mtx_unlock(&nblock);
  339 }
  340 
  341 /*
  342  *      waitrunningbufspace()
  343  *
  344  *      runningbufspace is a measure of the amount of I/O currently
  345  *      running.  This routine is used in async-write situations to
  346  *      prevent creating huge backups of pending writes to a device.
  347  *      Only asynchronous writes are governed by this function.
  348  *
  349  *      Reads will adjust runningbufspace, but will not block based on it.
  350  *      The read load has a side effect of reducing the allowed write load.
  351  *
  352  *      This does NOT turn an async write into a sync write.  It waits  
  353  *      for earlier writes to complete and generally returns before the
  354  *      caller's write has reached the device.
  355  */
  356 static __inline void
  357 waitrunningbufspace(void)
  358 {
  359         mtx_lock(&rbreqlock);
  360         while (runningbufspace > hirunningspace) {
  361                 ++runningbufreq;
  362                 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
  363         }
  364         mtx_unlock(&rbreqlock);
  365 }
  366 
  367 
  368 /*
  369  *      vfs_buf_test_cache:
  370  *
  371  *      Called when a buffer is extended.  This function clears the B_CACHE
  372  *      bit if the newly extended portion of the buffer does not contain
  373  *      valid data.
  374  */
  375 static __inline__
  376 void
  377 vfs_buf_test_cache(struct buf *bp,
  378                   vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
  379                   vm_page_t m)
  380 {
  381         GIANT_REQUIRED;
  382 
  383         if (bp->b_flags & B_CACHE) {
  384                 int base = (foff + off) & PAGE_MASK;
  385                 if (vm_page_is_valid(m, base, size) == 0)
  386                         bp->b_flags &= ~B_CACHE;
  387         }
  388 }
  389 
  390 /* Wake up the buffer deamon if necessary */
  391 static __inline__
  392 void
  393 bd_wakeup(int dirtybuflevel)
  394 {
  395         mtx_lock(&bdlock);
  396         if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
  397                 bd_request = 1;
  398                 wakeup(&bd_request);
  399         }
  400         mtx_unlock(&bdlock);
  401 }
  402 
  403 /*
  404  * bd_speedup - speedup the buffer cache flushing code
  405  */
  406 
  407 static __inline__
  408 void
  409 bd_speedup(void)
  410 {
  411         bd_wakeup(1);
  412 }
  413 
  414 /*
  415  * Calculating buffer cache scaling values and reserve space for buffer
  416  * headers.  This is called during low level kernel initialization and
  417  * may be called more then once.  We CANNOT write to the memory area
  418  * being reserved at this time.
  419  */
  420 caddr_t
  421 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
  422 {
  423         /*
  424          * physmem_est is in pages.  Convert it to kilobytes (assumes
  425          * PAGE_SIZE is >= 1K)
  426          */
  427         physmem_est = physmem_est * (PAGE_SIZE / 1024);
  428 
  429         /*
  430          * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
  431          * For the first 64MB of ram nominally allocate sufficient buffers to
  432          * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
  433          * buffers to cover 1/20 of our ram over 64MB.  When auto-sizing
  434          * the buffer cache we limit the eventual kva reservation to
  435          * maxbcache bytes.
  436          *
  437          * factor represents the 1/4 x ram conversion.
  438          */
  439         if (nbuf == 0) {
  440                 int factor = 4 * BKVASIZE / 1024;
  441 
  442                 nbuf = 50;
  443                 if (physmem_est > 4096)
  444                         nbuf += min((physmem_est - 4096) / factor,
  445                             65536 / factor);
  446                 if (physmem_est > 65536)
  447                         nbuf += (physmem_est - 65536) * 2 / (factor * 5);
  448 
  449                 if (maxbcache && nbuf > maxbcache / BKVASIZE)
  450                         nbuf = maxbcache / BKVASIZE;
  451         }
  452 
  453 #if 0
  454         /*
  455          * Do not allow the buffer_map to be more then 1/2 the size of the
  456          * kernel_map.
  457          */
  458         if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 
  459             (BKVASIZE * 2)) {
  460                 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 
  461                     (BKVASIZE * 2);
  462                 printf("Warning: nbufs capped at %d\n", nbuf);
  463         }
  464 #endif
  465 
  466         /*
  467          * swbufs are used as temporary holders for I/O, such as paging I/O.
  468          * We have no less then 16 and no more then 256.
  469          */
  470         nswbuf = max(min(nbuf/4, 256), 16);
  471 #ifdef NSWBUF_MIN
  472         if (nswbuf < NSWBUF_MIN)
  473                 nswbuf = NSWBUF_MIN;
  474 #endif
  475 #ifdef DIRECTIO
  476         ffs_rawread_setup();
  477 #endif
  478 
  479         /*
  480          * Reserve space for the buffer cache buffers
  481          */
  482         swbuf = (void *)v;
  483         v = (caddr_t)(swbuf + nswbuf);
  484         buf = (void *)v;
  485         v = (caddr_t)(buf + nbuf);
  486 
  487         return(v);
  488 }
  489 
  490 /* Initialize the buffer subsystem.  Called before use of any buffers. */
  491 void
  492 bufinit(void)
  493 {
  494         struct buf *bp;
  495         vm_offset_t bogus_offset;
  496         int i;
  497 
  498         GIANT_REQUIRED;
  499 
  500         mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF);
  501         mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
  502         mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
  503         mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
  504         mtx_init(&bdonelock, "bdone lock", NULL, MTX_DEF);
  505 
  506         /* next, make a null set of free lists */
  507         for (i = 0; i < BUFFER_QUEUES; i++)
  508                 TAILQ_INIT(&bufqueues[i]);
  509 
  510         /* finally, initialize each buffer header and stick on empty q */
  511         for (i = 0; i < nbuf; i++) {
  512                 bp = &buf[i];
  513                 bzero(bp, sizeof *bp);
  514                 bp->b_flags = B_INVAL;  /* we're just an empty header */
  515                 bp->b_dev = NODEV;
  516                 bp->b_rcred = NOCRED;
  517                 bp->b_wcred = NOCRED;
  518                 bp->b_qindex = QUEUE_EMPTY;
  519                 bp->b_vflags = 0;
  520                 bp->b_xflags = 0;
  521                 LIST_INIT(&bp->b_dep);
  522                 BUF_LOCKINIT(bp);
  523                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
  524         }
  525 
  526         /*
  527          * maxbufspace is the absolute maximum amount of buffer space we are 
  528          * allowed to reserve in KVM and in real terms.  The absolute maximum
  529          * is nominally used by buf_daemon.  hibufspace is the nominal maximum
  530          * used by most other processes.  The differential is required to 
  531          * ensure that buf_daemon is able to run when other processes might 
  532          * be blocked waiting for buffer space.
  533          *
  534          * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
  535          * this may result in KVM fragmentation which is not handled optimally
  536          * by the system.
  537          */
  538         maxbufspace = nbuf * BKVASIZE;
  539         hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
  540         lobufspace = hibufspace - MAXBSIZE;
  541 
  542         lorunningspace = 512 * 1024;
  543         hirunningspace = 1024 * 1024;
  544 
  545 /*
  546  * Limit the amount of malloc memory since it is wired permanently into
  547  * the kernel space.  Even though this is accounted for in the buffer
  548  * allocation, we don't want the malloced region to grow uncontrolled.
  549  * The malloc scheme improves memory utilization significantly on average
  550  * (small) directories.
  551  */
  552         maxbufmallocspace = hibufspace / 20;
  553 
  554 /*
  555  * Reduce the chance of a deadlock occuring by limiting the number
  556  * of delayed-write dirty buffers we allow to stack up.
  557  */
  558         hidirtybuffers = nbuf / 4 + 20;
  559         dirtybufthresh = hidirtybuffers * 9 / 10;
  560         numdirtybuffers = 0;
  561 /*
  562  * To support extreme low-memory systems, make sure hidirtybuffers cannot
  563  * eat up all available buffer space.  This occurs when our minimum cannot
  564  * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
  565  * BKVASIZE'd (8K) buffers.
  566  */
  567         while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
  568                 hidirtybuffers >>= 1;
  569         }
  570         lodirtybuffers = hidirtybuffers / 2;
  571 
  572 /*
  573  * Try to keep the number of free buffers in the specified range,
  574  * and give special processes (e.g. like buf_daemon) access to an 
  575  * emergency reserve.
  576  */
  577         lofreebuffers = nbuf / 18 + 5;
  578         hifreebuffers = 2 * lofreebuffers;
  579         numfreebuffers = nbuf;
  580 
  581 /*
  582  * Maximum number of async ops initiated per buf_daemon loop.  This is
  583  * somewhat of a hack at the moment, we really need to limit ourselves
  584  * based on the number of bytes of I/O in-transit that were initiated
  585  * from buf_daemon.
  586  */
  587 
  588         bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
  589         VM_OBJECT_LOCK(kernel_object);
  590         bogus_page = vm_page_alloc(kernel_object,
  591                         ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
  592             VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
  593         VM_OBJECT_UNLOCK(kernel_object);
  594 }
  595 
  596 /*
  597  * bfreekva() - free the kva allocation for a buffer.
  598  *
  599  *      Must be called at splbio() or higher as this is the only locking for
  600  *      buffer_map.
  601  *
  602  *      Since this call frees up buffer space, we call bufspacewakeup().
  603  */
  604 static void
  605 bfreekva(struct buf * bp)
  606 {
  607         GIANT_REQUIRED;
  608 
  609         if (bp->b_kvasize) {
  610                 atomic_add_int(&buffreekvacnt, 1);
  611                 atomic_subtract_int(&bufspace, bp->b_kvasize);
  612                 vm_map_delete(buffer_map,
  613                     (vm_offset_t) bp->b_kvabase,
  614                     (vm_offset_t) bp->b_kvabase + bp->b_kvasize
  615                 );
  616                 bp->b_kvasize = 0;
  617                 bufspacewakeup();
  618         }
  619 }
  620 
  621 /*
  622  *      bremfree:
  623  *
  624  *      Remove the buffer from the appropriate free list.
  625  */
  626 void
  627 bremfree(struct buf * bp)
  628 {
  629         mtx_lock(&bqlock);
  630         bremfreel(bp);
  631         mtx_unlock(&bqlock);
  632 }
  633 
  634 void
  635 bremfreel(struct buf * bp)
  636 {
  637         int s = splbio();
  638         int old_qindex = bp->b_qindex;
  639 
  640         GIANT_REQUIRED;
  641 
  642         if (bp->b_qindex != QUEUE_NONE) {
  643                 KASSERT(BUF_REFCNT(bp) == 1, ("bremfree: bp %p not locked",bp));
  644                 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
  645                 bp->b_qindex = QUEUE_NONE;
  646         } else {
  647                 if (BUF_REFCNT(bp) <= 1)
  648                         panic("bremfree: removing a buffer not on a queue");
  649         }
  650 
  651         /*
  652          * Fixup numfreebuffers count.  If the buffer is invalid or not
  653          * delayed-write, and it was on the EMPTY, LRU, or AGE queues,
  654          * the buffer was free and we must decrement numfreebuffers.
  655          */
  656         if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
  657                 switch(old_qindex) {
  658                 case QUEUE_DIRTY:
  659                 case QUEUE_CLEAN:
  660                 case QUEUE_EMPTY:
  661                 case QUEUE_EMPTYKVA:
  662                         atomic_subtract_int(&numfreebuffers, 1);
  663                         break;
  664                 default:
  665                         break;
  666                 }
  667         }
  668         splx(s);
  669 }
  670 
  671 
  672 /*
  673  * Get a buffer with the specified data.  Look in the cache first.  We
  674  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
  675  * is set, the buffer is valid and we do not have to do anything ( see
  676  * getblk() ).  This is really just a special case of breadn().
  677  */
  678 int
  679 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
  680     struct buf ** bpp)
  681 {
  682 
  683         return (breadn(vp, blkno, size, 0, 0, 0, cred, bpp));
  684 }
  685 
  686 /*
  687  * Operates like bread, but also starts asynchronous I/O on
  688  * read-ahead blocks.  We must clear BIO_ERROR and B_INVAL prior
  689  * to initiating I/O . If B_CACHE is set, the buffer is valid 
  690  * and we do not have to do anything.
  691  */
  692 int
  693 breadn(struct vnode * vp, daddr_t blkno, int size,
  694     daddr_t * rablkno, int *rabsize,
  695     int cnt, struct ucred * cred, struct buf ** bpp)
  696 {
  697         struct buf *bp, *rabp;
  698         int i;
  699         int rv = 0, readwait = 0;
  700 
  701         *bpp = bp = getblk(vp, blkno, size, 0, 0, 0);
  702 
  703         /* if not found in cache, do some I/O */
  704         if ((bp->b_flags & B_CACHE) == 0) {
  705                 if (curthread != PCPU_GET(idlethread))
  706                         curthread->td_proc->p_stats->p_ru.ru_inblock++;
  707                 bp->b_iocmd = BIO_READ;
  708                 bp->b_flags &= ~B_INVAL;
  709                 bp->b_ioflags &= ~BIO_ERROR;
  710                 if (bp->b_rcred == NOCRED && cred != NOCRED)
  711                         bp->b_rcred = crhold(cred);
  712                 vfs_busy_pages(bp, 0);
  713                 if (vp->v_type == VCHR)
  714                         VOP_SPECSTRATEGY(vp, bp);
  715                 else
  716                         VOP_STRATEGY(vp, bp);
  717                 ++readwait;
  718         }
  719 
  720         for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
  721                 if (inmem(vp, *rablkno))
  722                         continue;
  723                 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
  724 
  725                 if ((rabp->b_flags & B_CACHE) == 0) {
  726                         if (curthread != PCPU_GET(idlethread))
  727                                 curthread->td_proc->p_stats->p_ru.ru_inblock++;
  728                         rabp->b_flags |= B_ASYNC;
  729                         rabp->b_flags &= ~B_INVAL;
  730                         rabp->b_ioflags &= ~BIO_ERROR;
  731                         rabp->b_iocmd = BIO_READ;
  732                         if (rabp->b_rcred == NOCRED && cred != NOCRED)
  733                                 rabp->b_rcred = crhold(cred);
  734                         vfs_busy_pages(rabp, 0);
  735                         BUF_KERNPROC(rabp);
  736                         if (vp->v_type == VCHR)
  737                                 VOP_SPECSTRATEGY(vp, rabp);
  738                         else
  739                                 VOP_STRATEGY(vp, rabp);
  740                 } else {
  741                         brelse(rabp);
  742                 }
  743         }
  744 
  745         if (readwait) {
  746                 rv = bufwait(bp);
  747         }
  748         return (rv);
  749 }
  750 
  751 /*
  752  * Write, release buffer on completion.  (Done by iodone
  753  * if async).  Do not bother writing anything if the buffer
  754  * is invalid.
  755  *
  756  * Note that we set B_CACHE here, indicating that buffer is
  757  * fully valid and thus cacheable.  This is true even of NFS
  758  * now so we set it generally.  This could be set either here 
  759  * or in biodone() since the I/O is synchronous.  We put it
  760  * here.
  761  */
  762 
  763 int
  764 bwrite(struct buf * bp)
  765 {
  766         int oldflags, s;
  767         struct buf *newbp;
  768 
  769         if (bp->b_flags & B_INVAL) {
  770                 brelse(bp);
  771                 return (0);
  772         }
  773 
  774         oldflags = bp->b_flags;
  775 
  776         if (BUF_REFCNT(bp) == 0)
  777                 panic("bwrite: buffer is not busy???");
  778         s = splbio();
  779         /*
  780          * If a background write is already in progress, delay
  781          * writing this block if it is asynchronous. Otherwise
  782          * wait for the background write to complete.
  783          */
  784         if (bp->b_xflags & BX_BKGRDINPROG) {
  785                 if (bp->b_flags & B_ASYNC) {
  786                         splx(s);
  787                         bdwrite(bp);
  788                         return (0);
  789                 }
  790                 bp->b_xflags |= BX_BKGRDWAIT;
  791                 tsleep(&bp->b_xflags, PRIBIO, "bwrbg", 0);
  792                 if (bp->b_xflags & BX_BKGRDINPROG)
  793                         panic("bwrite: still writing");
  794         }
  795 
  796         /* Mark the buffer clean */
  797         bundirty(bp);
  798 
  799         /*
  800          * If this buffer is marked for background writing and we
  801          * do not have to wait for it, make a copy and write the
  802          * copy so as to leave this buffer ready for further use.
  803          *
  804          * This optimization eats a lot of memory.  If we have a page
  805          * or buffer shortfall we can't do it.
  806          */
  807         if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 
  808             (bp->b_flags & B_ASYNC) &&
  809             !vm_page_count_severe() &&
  810             !buf_dirty_count_severe()) {
  811                 if (bp->b_iodone != NULL) {
  812                         printf("bp->b_iodone = %p\n", bp->b_iodone);
  813                         panic("bwrite: need chained iodone");
  814                 }
  815 
  816                 /* get a new block */
  817                 newbp = geteblk(bp->b_bufsize);
  818 
  819                 /*
  820                  * set it to be identical to the old block.  We have to
  821                  * set b_lblkno and BKGRDMARKER before calling bgetvp()
  822                  * to avoid confusing the splay tree and gbincore().
  823                  */
  824                 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
  825                 newbp->b_lblkno = bp->b_lblkno;
  826                 newbp->b_xflags |= BX_BKGRDMARKER;
  827                 /* XXX The BX_ flags need to be protected as well */
  828                 VI_LOCK(bp->b_vp);
  829                 bgetvp(bp->b_vp, newbp);
  830                 VI_UNLOCK(bp->b_vp);
  831                 newbp->b_blkno = bp->b_blkno;
  832                 newbp->b_offset = bp->b_offset;
  833                 newbp->b_iodone = vfs_backgroundwritedone;
  834                 newbp->b_flags |= B_ASYNC;
  835                 newbp->b_flags &= ~B_INVAL;
  836 
  837                 /* move over the dependencies */
  838                 if (LIST_FIRST(&bp->b_dep) != NULL)
  839                         buf_movedeps(bp, newbp);
  840 
  841                 /*
  842                  * Initiate write on the copy, release the original to
  843                  * the B_LOCKED queue so that it cannot go away until
  844                  * the background write completes. If not locked it could go
  845                  * away and then be reconstituted while it was being written.
  846                  * If the reconstituted buffer were written, we could end up
  847                  * with two background copies being written at the same time.
  848                  */
  849                 bp->b_xflags |= BX_BKGRDINPROG;
  850                 bp->b_flags |= B_LOCKED;
  851                 bqrelse(bp);
  852                 bp = newbp;
  853         }
  854 
  855         bp->b_flags &= ~B_DONE;
  856         bp->b_ioflags &= ~BIO_ERROR;
  857         bp->b_flags |= B_WRITEINPROG | B_CACHE;
  858         bp->b_iocmd = BIO_WRITE;
  859 
  860         VI_LOCK(bp->b_vp);
  861         bp->b_vp->v_numoutput++;
  862         VI_UNLOCK(bp->b_vp);
  863         vfs_busy_pages(bp, 1);
  864 
  865         /*
  866          * Normal bwrites pipeline writes
  867          */
  868         bp->b_runningbufspace = bp->b_bufsize;
  869         atomic_add_int(&runningbufspace, bp->b_runningbufspace);
  870 
  871         if (curthread != PCPU_GET(idlethread))
  872                 curthread->td_proc->p_stats->p_ru.ru_oublock++;
  873         splx(s);
  874         if (oldflags & B_ASYNC)
  875                 BUF_KERNPROC(bp);
  876         if (bp->b_vp->v_type == VCHR)
  877                 VOP_SPECSTRATEGY(bp->b_vp, bp);
  878         else
  879                 VOP_STRATEGY(bp->b_vp, bp);
  880 
  881         if ((oldflags & B_ASYNC) == 0) {
  882                 int rtval = bufwait(bp);
  883                 brelse(bp);
  884                 return (rtval);
  885         } else if ((oldflags & B_NOWDRAIN) == 0) {
  886                 /*
  887                  * don't allow the async write to saturate the I/O
  888                  * system.  Deadlocks can occur only if a device strategy
  889                  * routine (like in MD) turns around and issues another
  890                  * high-level write, in which case B_NOWDRAIN is expected
  891                  * to be set.  Otherwise we will not deadlock here because
  892                  * we are blocking waiting for I/O that is already in-progress
  893                  * to complete.
  894                  */
  895                 waitrunningbufspace();
  896         }
  897 
  898         return (0);
  899 }
  900 
  901 /*
  902  * Complete a background write started from bwrite.
  903  */
  904 static void
  905 vfs_backgroundwritedone(bp)
  906         struct buf *bp;
  907 {
  908         struct buf *origbp;
  909 
  910         /*
  911          * Find the original buffer that we are writing.
  912          */
  913         VI_LOCK(bp->b_vp);
  914         if ((origbp = gbincore(bp->b_vp, bp->b_lblkno)) == NULL)
  915                 panic("backgroundwritedone: lost buffer");
  916         VI_UNLOCK(bp->b_vp);
  917         /*
  918          * Process dependencies then return any unfinished ones.
  919          */
  920         if (LIST_FIRST(&bp->b_dep) != NULL)
  921                 buf_complete(bp);
  922         if (LIST_FIRST(&bp->b_dep) != NULL)
  923                 buf_movedeps(bp, origbp);
  924 
  925         /* XXX Find out if origbp can disappear or get inconsistent */
  926         /*
  927          * Clear the BX_BKGRDINPROG flag in the original buffer
  928          * and awaken it if it is waiting for the write to complete.
  929          * If BX_BKGRDINPROG is not set in the original buffer it must
  930          * have been released and re-instantiated - which is not legal.
  931          */
  932         KASSERT((origbp->b_xflags & BX_BKGRDINPROG),
  933             ("backgroundwritedone: lost buffer2"));
  934         origbp->b_xflags &= ~BX_BKGRDINPROG;
  935         if (origbp->b_xflags & BX_BKGRDWAIT) {
  936                 origbp->b_xflags &= ~BX_BKGRDWAIT;
  937                 wakeup(&origbp->b_xflags);
  938         }
  939         /*
  940          * Clear the B_LOCKED flag and remove it from the locked
  941          * queue if it currently resides there.
  942          */
  943         origbp->b_flags &= ~B_LOCKED;
  944         if (BUF_LOCK(origbp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) {
  945                 bremfree(origbp);
  946                 bqrelse(origbp);
  947         }
  948         /*
  949          * This buffer is marked B_NOCACHE, so when it is released
  950          * by biodone, it will be tossed. We mark it with BIO_READ
  951          * to avoid biodone doing a second vwakeup.
  952          */
  953         bp->b_flags |= B_NOCACHE;
  954         bp->b_iocmd = BIO_READ;
  955         bp->b_flags &= ~(B_CACHE | B_DONE);
  956         bp->b_iodone = 0;
  957         bufdone(bp);
  958 }
  959 
  960 /*
  961  * Delayed write. (Buffer is marked dirty).  Do not bother writing
  962  * anything if the buffer is marked invalid.
  963  *
  964  * Note that since the buffer must be completely valid, we can safely
  965  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
  966  * biodone() in order to prevent getblk from writing the buffer
  967  * out synchronously.
  968  */
  969 void
  970 bdwrite(struct buf * bp)
  971 {
  972         struct thread *td = curthread;
  973         struct vnode *vp;
  974         struct buf *nbp;
  975 
  976         GIANT_REQUIRED;
  977 
  978         if (BUF_REFCNT(bp) == 0)
  979                 panic("bdwrite: buffer is not busy");
  980 
  981         if (bp->b_flags & B_INVAL) {
  982                 brelse(bp);
  983                 return;
  984         }
  985 
  986         /*
  987          * If we have too many dirty buffers, don't create any more.
  988          * If we are wildly over our limit, then force a complete
  989          * cleanup. Otherwise, just keep the situation from getting
  990          * out of control. Note that we have to avoid a recursive
  991          * disaster and not try to clean up after our own cleanup!
  992          */
  993         vp = bp->b_vp;
  994         VI_LOCK(vp);
  995         if (td->td_proc->p_flag & P_COWINPROGRESS) {
  996                 recursiveflushes++;
  997         } else if (vp != NULL && vp->v_dirtybufcnt > dirtybufthresh + 10) {
  998                 VI_UNLOCK(vp);
  999                 (void) VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td);
 1000                 VI_LOCK(vp);
 1001                 altbufferflushes++;
 1002         } else if (vp != NULL && vp->v_dirtybufcnt > dirtybufthresh) {
 1003                 /*
 1004                  * Try to find a buffer to flush.
 1005                  */
 1006                 TAILQ_FOREACH(nbp, &vp->v_dirtyblkhd, b_vnbufs) {
 1007                         if ((nbp->b_xflags & BX_BKGRDINPROG) ||
 1008                             buf_countdeps(nbp, 0) ||
 1009                             BUF_LOCK(nbp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
 1010                                 continue;
 1011                         if (bp == nbp)
 1012                                 panic("bdwrite: found ourselves");
 1013                         VI_UNLOCK(vp);
 1014                         if (nbp->b_flags & B_CLUSTEROK) {
 1015                                 vfs_bio_awrite(nbp);
 1016                         } else {
 1017                                 bremfree(nbp);
 1018                                 bawrite(nbp);
 1019                         }
 1020                         VI_LOCK(vp);
 1021                         dirtybufferflushes++;
 1022                         break;
 1023                 }
 1024         }
 1025         VI_UNLOCK(vp);
 1026 
 1027         bdirty(bp);
 1028         /*
 1029          * Set B_CACHE, indicating that the buffer is fully valid.  This is
 1030          * true even of NFS now.
 1031          */
 1032         bp->b_flags |= B_CACHE;
 1033 
 1034         /*
 1035          * This bmap keeps the system from needing to do the bmap later,
 1036          * perhaps when the system is attempting to do a sync.  Since it
 1037          * is likely that the indirect block -- or whatever other datastructure
 1038          * that the filesystem needs is still in memory now, it is a good
 1039          * thing to do this.  Note also, that if the pageout daemon is
 1040          * requesting a sync -- there might not be enough memory to do
 1041          * the bmap then...  So, this is important to do.
 1042          */
 1043         if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
 1044                 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
 1045         }
 1046 
 1047         /*
 1048          * Set the *dirty* buffer range based upon the VM system dirty pages.
 1049          */
 1050         vfs_setdirty(bp);
 1051 
 1052         /*
 1053          * We need to do this here to satisfy the vnode_pager and the
 1054          * pageout daemon, so that it thinks that the pages have been
 1055          * "cleaned".  Note that since the pages are in a delayed write
 1056          * buffer -- the VFS layer "will" see that the pages get written
 1057          * out on the next sync, or perhaps the cluster will be completed.
 1058          */
 1059         vfs_clean_pages(bp);
 1060         bqrelse(bp);
 1061 
 1062         /*
 1063          * Wakeup the buffer flushing daemon if we have a lot of dirty
 1064          * buffers (midpoint between our recovery point and our stall
 1065          * point).
 1066          */
 1067         bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
 1068 
 1069         /*
 1070          * note: we cannot initiate I/O from a bdwrite even if we wanted to,
 1071          * due to the softdep code.
 1072          */
 1073 }
 1074 
 1075 /*
 1076  *      bdirty:
 1077  *
 1078  *      Turn buffer into delayed write request.  We must clear BIO_READ and
 1079  *      B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to 
 1080  *      itself to properly update it in the dirty/clean lists.  We mark it
 1081  *      B_DONE to ensure that any asynchronization of the buffer properly
 1082  *      clears B_DONE ( else a panic will occur later ).  
 1083  *
 1084  *      bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
 1085  *      might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
 1086  *      should only be called if the buffer is known-good.
 1087  *
 1088  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 1089  *      count.
 1090  *
 1091  *      Must be called at splbio().
 1092  *      The buffer must be on QUEUE_NONE.
 1093  */
 1094 void
 1095 bdirty(bp)
 1096         struct buf *bp;
 1097 {
 1098         KASSERT(bp->b_qindex == QUEUE_NONE,
 1099             ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1100         bp->b_flags &= ~(B_RELBUF);
 1101         bp->b_iocmd = BIO_WRITE;
 1102 
 1103         if ((bp->b_flags & B_DELWRI) == 0) {
 1104                 bp->b_flags |= B_DONE | B_DELWRI;
 1105                 reassignbuf(bp, bp->b_vp);
 1106                 atomic_add_int(&numdirtybuffers, 1);
 1107                 bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
 1108         }
 1109 }
 1110 
 1111 /*
 1112  *      bundirty:
 1113  *
 1114  *      Clear B_DELWRI for buffer.
 1115  *
 1116  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 1117  *      count.
 1118  *      
 1119  *      Must be called at splbio().
 1120  *      The buffer must be on QUEUE_NONE.
 1121  */
 1122 
 1123 void
 1124 bundirty(bp)
 1125         struct buf *bp;
 1126 {
 1127         KASSERT(bp->b_qindex == QUEUE_NONE,
 1128             ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1129 
 1130         if (bp->b_flags & B_DELWRI) {
 1131                 bp->b_flags &= ~B_DELWRI;
 1132                 reassignbuf(bp, bp->b_vp);
 1133                 atomic_subtract_int(&numdirtybuffers, 1);
 1134                 numdirtywakeup(lodirtybuffers);
 1135         }
 1136         /*
 1137          * Since it is now being written, we can clear its deferred write flag.
 1138          */
 1139         bp->b_flags &= ~B_DEFERRED;
 1140 }
 1141 
 1142 /*
 1143  *      bawrite:
 1144  *
 1145  *      Asynchronous write.  Start output on a buffer, but do not wait for
 1146  *      it to complete.  The buffer is released when the output completes.
 1147  *
 1148  *      bwrite() ( or the VOP routine anyway ) is responsible for handling 
 1149  *      B_INVAL buffers.  Not us.
 1150  */
 1151 void
 1152 bawrite(struct buf * bp)
 1153 {
 1154         bp->b_flags |= B_ASYNC;
 1155         (void) BUF_WRITE(bp);
 1156 }
 1157 
 1158 /*
 1159  *      bwillwrite:
 1160  *
 1161  *      Called prior to the locking of any vnodes when we are expecting to
 1162  *      write.  We do not want to starve the buffer cache with too many
 1163  *      dirty buffers so we block here.  By blocking prior to the locking
 1164  *      of any vnodes we attempt to avoid the situation where a locked vnode
 1165  *      prevents the various system daemons from flushing related buffers.
 1166  */
 1167 
 1168 void
 1169 bwillwrite(void)
 1170 {
 1171         if (numdirtybuffers >= hidirtybuffers) {
 1172                 int s;
 1173 
 1174                 mtx_lock(&Giant);
 1175                 s = splbio();
 1176                 mtx_lock(&nblock);
 1177                 while (numdirtybuffers >= hidirtybuffers) {
 1178                         bd_wakeup(1);
 1179                         needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
 1180                         msleep(&needsbuffer, &nblock,
 1181                             (PRIBIO + 4), "flswai", 0);
 1182                 }
 1183                 splx(s);
 1184                 mtx_unlock(&nblock);
 1185                 mtx_unlock(&Giant);
 1186         }
 1187 }
 1188 
 1189 /*
 1190  * Return true if we have too many dirty buffers.
 1191  */
 1192 int
 1193 buf_dirty_count_severe(void)
 1194 {
 1195         return(numdirtybuffers >= hidirtybuffers);
 1196 }
 1197 
 1198 /*
 1199  *      brelse:
 1200  *
 1201  *      Release a busy buffer and, if requested, free its resources.  The
 1202  *      buffer will be stashed in the appropriate bufqueue[] allowing it
 1203  *      to be accessed later as a cache entity or reused for other purposes.
 1204  */
 1205 void
 1206 brelse(struct buf * bp)
 1207 {
 1208         int s;
 1209 
 1210         GIANT_REQUIRED;
 1211 
 1212         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
 1213             ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1214 
 1215         s = splbio();
 1216 
 1217         if (bp->b_flags & B_LOCKED)
 1218                 bp->b_ioflags &= ~BIO_ERROR;
 1219 
 1220         if (bp->b_iocmd == BIO_WRITE &&
 1221             (bp->b_ioflags & BIO_ERROR) &&
 1222             !(bp->b_flags & B_INVAL)) {
 1223                 /*
 1224                  * Failed write, redirty.  Must clear BIO_ERROR to prevent
 1225                  * pages from being scrapped.  If B_INVAL is set then
 1226                  * this case is not run and the next case is run to 
 1227                  * destroy the buffer.  B_INVAL can occur if the buffer
 1228                  * is outside the range supported by the underlying device.
 1229                  */
 1230                 bp->b_ioflags &= ~BIO_ERROR;
 1231                 bdirty(bp);
 1232         } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
 1233             (bp->b_ioflags & BIO_ERROR) ||
 1234             bp->b_iocmd == BIO_DELETE || (bp->b_bufsize <= 0)) {
 1235                 /*
 1236                  * Either a failed I/O or we were asked to free or not
 1237                  * cache the buffer.
 1238                  */
 1239                 bp->b_flags |= B_INVAL;
 1240                 if (LIST_FIRST(&bp->b_dep) != NULL)
 1241                         buf_deallocate(bp);
 1242                 if (bp->b_flags & B_DELWRI) {
 1243                         atomic_subtract_int(&numdirtybuffers, 1);
 1244                         numdirtywakeup(lodirtybuffers);
 1245                 }
 1246                 bp->b_flags &= ~(B_DELWRI | B_CACHE);
 1247                 if ((bp->b_flags & B_VMIO) == 0) {
 1248                         if (bp->b_bufsize)
 1249                                 allocbuf(bp, 0);
 1250                         if (bp->b_vp)
 1251                                 brelvp(bp);
 1252                 }
 1253         }
 1254 
 1255         /*
 1256          * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release() 
 1257          * is called with B_DELWRI set, the underlying pages may wind up
 1258          * getting freed causing a previous write (bdwrite()) to get 'lost'
 1259          * because pages associated with a B_DELWRI bp are marked clean.
 1260          * 
 1261          * We still allow the B_INVAL case to call vfs_vmio_release(), even
 1262          * if B_DELWRI is set.
 1263          *
 1264          * If B_DELWRI is not set we may have to set B_RELBUF if we are low
 1265          * on pages to return pages to the VM page queues.
 1266          */
 1267         if (bp->b_flags & B_DELWRI)
 1268                 bp->b_flags &= ~B_RELBUF;
 1269         else if (vm_page_count_severe() && !(bp->b_xflags & BX_BKGRDINPROG))
 1270                 bp->b_flags |= B_RELBUF;
 1271 
 1272         /*
 1273          * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
 1274          * constituted, not even NFS buffers now.  Two flags effect this.  If
 1275          * B_INVAL, the struct buf is invalidated but the VM object is kept
 1276          * around ( i.e. so it is trivial to reconstitute the buffer later ).
 1277          *
 1278          * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
 1279          * invalidated.  BIO_ERROR cannot be set for a failed write unless the
 1280          * buffer is also B_INVAL because it hits the re-dirtying code above.
 1281          *
 1282          * Normally we can do this whether a buffer is B_DELWRI or not.  If
 1283          * the buffer is an NFS buffer, it is tracking piecemeal writes or
 1284          * the commit state and we cannot afford to lose the buffer. If the
 1285          * buffer has a background write in progress, we need to keep it
 1286          * around to prevent it from being reconstituted and starting a second
 1287          * background write.
 1288          */
 1289         if ((bp->b_flags & B_VMIO)
 1290             && !(bp->b_vp->v_mount != NULL &&
 1291                  (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
 1292                  !vn_isdisk(bp->b_vp, NULL) &&
 1293                  (bp->b_flags & B_DELWRI))
 1294             ) {
 1295 
 1296                 int i, j, resid;
 1297                 vm_page_t m;
 1298                 off_t foff;
 1299                 vm_pindex_t poff;
 1300                 vm_object_t obj;
 1301                 struct vnode *vp;
 1302 
 1303                 vp = bp->b_vp;
 1304                 obj = bp->b_object;
 1305 
 1306                 /*
 1307                  * Get the base offset and length of the buffer.  Note that 
 1308                  * in the VMIO case if the buffer block size is not
 1309                  * page-aligned then b_data pointer may not be page-aligned.
 1310                  * But our b_pages[] array *IS* page aligned.
 1311                  *
 1312                  * block sizes less then DEV_BSIZE (usually 512) are not 
 1313                  * supported due to the page granularity bits (m->valid,
 1314                  * m->dirty, etc...). 
 1315                  *
 1316                  * See man buf(9) for more information
 1317                  */
 1318                 resid = bp->b_bufsize;
 1319                 foff = bp->b_offset;
 1320                 if (obj != NULL)
 1321                         VM_OBJECT_LOCK(obj);
 1322                 for (i = 0; i < bp->b_npages; i++) {
 1323                         int had_bogus = 0;
 1324 
 1325                         m = bp->b_pages[i];
 1326                         vm_page_lock_queues();
 1327                         vm_page_flag_clear(m, PG_ZERO);
 1328                         vm_page_unlock_queues();
 1329 
 1330                         /*
 1331                          * If we hit a bogus page, fixup *all* the bogus pages
 1332                          * now.
 1333                          */
 1334                         if (m == bogus_page) {
 1335                                 poff = OFF_TO_IDX(bp->b_offset);
 1336                                 had_bogus = 1;
 1337 
 1338                                 for (j = i; j < bp->b_npages; j++) {
 1339                                         vm_page_t mtmp;
 1340                                         mtmp = bp->b_pages[j];
 1341                                         if (mtmp == bogus_page) {
 1342                                                 mtmp = vm_page_lookup(obj, poff + j);
 1343                                                 if (!mtmp) {
 1344                                                         panic("brelse: page missing\n");
 1345                                                 }
 1346                                                 bp->b_pages[j] = mtmp;
 1347                                         }
 1348                                 }
 1349 
 1350                                 if ((bp->b_flags & B_INVAL) == 0) {
 1351                                         pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 1352                                 }
 1353                                 m = bp->b_pages[i];
 1354                         }
 1355                         if ((bp->b_flags & B_NOCACHE) || (bp->b_ioflags & BIO_ERROR)) {
 1356                                 int poffset = foff & PAGE_MASK;
 1357                                 int presid = resid > (PAGE_SIZE - poffset) ?
 1358                                         (PAGE_SIZE - poffset) : resid;
 1359 
 1360                                 KASSERT(presid >= 0, ("brelse: extra page"));
 1361                                 vm_page_set_invalid(m, poffset, presid);
 1362                                 if (had_bogus)
 1363                                         printf("avoided corruption bug in bogus_page/brelse code\n");
 1364                         }
 1365                         resid -= PAGE_SIZE - (foff & PAGE_MASK);
 1366                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 1367                 }
 1368                 if (obj != NULL)
 1369                         VM_OBJECT_UNLOCK(obj);
 1370                 if (bp->b_flags & (B_INVAL | B_RELBUF))
 1371                         vfs_vmio_release(bp);
 1372 
 1373         } else if (bp->b_flags & B_VMIO) {
 1374 
 1375                 if (bp->b_flags & (B_INVAL | B_RELBUF)) {
 1376                         vfs_vmio_release(bp);
 1377                 }
 1378 
 1379         }
 1380                         
 1381         if (bp->b_qindex != QUEUE_NONE)
 1382                 panic("brelse: free buffer onto another queue???");
 1383         if (BUF_REFCNT(bp) > 1) {
 1384                 /* do not release to free list */
 1385                 BUF_UNLOCK(bp);
 1386                 splx(s);
 1387                 return;
 1388         }
 1389 
 1390         /* enqueue */
 1391         mtx_lock(&bqlock);
 1392 
 1393         /* buffers with no memory */
 1394         if (bp->b_bufsize == 0) {
 1395                 bp->b_flags |= B_INVAL;
 1396                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1397                 if (bp->b_xflags & BX_BKGRDINPROG)
 1398                         panic("losing buffer 1");
 1399                 if (bp->b_kvasize) {
 1400                         bp->b_qindex = QUEUE_EMPTYKVA;
 1401                 } else {
 1402                         bp->b_qindex = QUEUE_EMPTY;
 1403                 }
 1404                 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
 1405                 bp->b_dev = NODEV;
 1406         /* buffers with junk contents */
 1407         } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
 1408             (bp->b_ioflags & BIO_ERROR)) {
 1409                 bp->b_flags |= B_INVAL;
 1410                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1411                 if (bp->b_xflags & BX_BKGRDINPROG)
 1412                         panic("losing buffer 2");
 1413                 bp->b_qindex = QUEUE_CLEAN;
 1414                 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
 1415                 bp->b_dev = NODEV;
 1416 
 1417         /* buffers that are locked */
 1418         } else if (bp->b_flags & B_LOCKED) {
 1419                 bp->b_qindex = QUEUE_LOCKED;
 1420                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
 1421 
 1422         /* remaining buffers */
 1423         } else {
 1424                 if (bp->b_flags & B_DELWRI)
 1425                         bp->b_qindex = QUEUE_DIRTY;
 1426                 else
 1427                         bp->b_qindex = QUEUE_CLEAN;
 1428                 if (bp->b_flags & B_AGE)
 1429                         TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
 1430                 else
 1431                         TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
 1432         }
 1433         mtx_unlock(&bqlock);
 1434 
 1435         /*
 1436          * If B_INVAL and B_DELWRI is set, clear B_DELWRI.  We have already
 1437          * placed the buffer on the correct queue.  We must also disassociate
 1438          * the device and vnode for a B_INVAL buffer so gbincore() doesn't
 1439          * find it.
 1440          */
 1441         if (bp->b_flags & B_INVAL) {
 1442                 if (bp->b_flags & B_DELWRI)
 1443                         bundirty(bp);
 1444                 if (bp->b_vp)
 1445                         brelvp(bp);
 1446         }
 1447 
 1448         /*
 1449          * Fixup numfreebuffers count.  The bp is on an appropriate queue
 1450          * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
 1451          * We've already handled the B_INVAL case ( B_DELWRI will be clear
 1452          * if B_INVAL is set ).
 1453          */
 1454 
 1455         if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI))
 1456                 bufcountwakeup();
 1457 
 1458         /*
 1459          * Something we can maybe free or reuse
 1460          */
 1461         if (bp->b_bufsize || bp->b_kvasize)
 1462                 bufspacewakeup();
 1463 
 1464         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | 
 1465                         B_DIRECT | B_NOWDRAIN);
 1466         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 1467                 panic("brelse: not dirty");
 1468         /* unlock */
 1469         BUF_UNLOCK(bp);
 1470         splx(s);
 1471 }
 1472 
 1473 /*
 1474  * Release a buffer back to the appropriate queue but do not try to free
 1475  * it.  The buffer is expected to be used again soon.
 1476  *
 1477  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
 1478  * biodone() to requeue an async I/O on completion.  It is also used when
 1479  * known good buffers need to be requeued but we think we may need the data
 1480  * again soon.
 1481  *
 1482  * XXX we should be able to leave the B_RELBUF hint set on completion.
 1483  */
 1484 void
 1485 bqrelse(struct buf * bp)
 1486 {
 1487         int s;
 1488 
 1489         s = splbio();
 1490 
 1491         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1492 
 1493         if (bp->b_qindex != QUEUE_NONE)
 1494                 panic("bqrelse: free buffer onto another queue???");
 1495         if (BUF_REFCNT(bp) > 1) {
 1496                 /* do not release to free list */
 1497                 BUF_UNLOCK(bp);
 1498                 splx(s);
 1499                 return;
 1500         }
 1501         mtx_lock(&bqlock);
 1502         if (bp->b_flags & B_LOCKED) {
 1503                 bp->b_ioflags &= ~BIO_ERROR;
 1504                 bp->b_qindex = QUEUE_LOCKED;
 1505                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
 1506                 /* buffers with stale but valid contents */
 1507         } else if (bp->b_flags & B_DELWRI) {
 1508                 bp->b_qindex = QUEUE_DIRTY;
 1509                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist);
 1510         } else if (vm_page_count_severe()) {
 1511                 /*
 1512                  * We are too low on memory, we have to try to free the
 1513                  * buffer (most importantly: the wired pages making up its
 1514                  * backing store) *now*.
 1515                  */
 1516                 mtx_unlock(&bqlock);
 1517                 splx(s);
 1518                 brelse(bp);
 1519                 return;
 1520         } else {
 1521                 bp->b_qindex = QUEUE_CLEAN;
 1522                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
 1523         }
 1524         mtx_unlock(&bqlock);
 1525 
 1526         if ((bp->b_flags & B_LOCKED) == 0 &&
 1527             ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))) {
 1528                 bufcountwakeup();
 1529         }
 1530 
 1531         /*
 1532          * Something we can maybe free or reuse.
 1533          */
 1534         if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
 1535                 bufspacewakeup();
 1536 
 1537         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
 1538         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 1539                 panic("bqrelse: not dirty");
 1540         /* unlock */
 1541         BUF_UNLOCK(bp);
 1542         splx(s);
 1543 }
 1544 
 1545 /* Give pages used by the bp back to the VM system (where possible) */
 1546 static void
 1547 vfs_vmio_release(bp)
 1548         struct buf *bp;
 1549 {
 1550         int i;
 1551         vm_page_t m;
 1552 
 1553         GIANT_REQUIRED;
 1554         vm_page_lock_queues();
 1555         for (i = 0; i < bp->b_npages; i++) {
 1556                 m = bp->b_pages[i];
 1557                 bp->b_pages[i] = NULL;
 1558                 /*
 1559                  * In order to keep page LRU ordering consistent, put
 1560                  * everything on the inactive queue.
 1561                  */
 1562                 vm_page_unwire(m, 0);
 1563                 /*
 1564                  * We don't mess with busy pages, it is
 1565                  * the responsibility of the process that
 1566                  * busied the pages to deal with them.
 1567                  */
 1568                 if ((m->flags & PG_BUSY) || (m->busy != 0))
 1569                         continue;
 1570                         
 1571                 if (m->wire_count == 0) {
 1572                         vm_page_flag_clear(m, PG_ZERO);
 1573                         /*
 1574                          * Might as well free the page if we can and it has
 1575                          * no valid data.  We also free the page if the
 1576                          * buffer was used for direct I/O
 1577                          */
 1578                         if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
 1579                             m->hold_count == 0) {
 1580                                 vm_page_busy(m);
 1581                                 pmap_remove_all(m);
 1582                                 vm_page_free(m);
 1583                         } else if (bp->b_flags & B_DIRECT) {
 1584                                 vm_page_try_to_free(m);
 1585                         } else if (vm_page_count_severe()) {
 1586                                 vm_page_try_to_cache(m);
 1587                         }
 1588                 }
 1589         }
 1590         vm_page_unlock_queues();
 1591         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
 1592         
 1593         if (bp->b_bufsize) {
 1594                 bufspacewakeup();
 1595                 bp->b_bufsize = 0;
 1596         }
 1597         bp->b_npages = 0;
 1598         bp->b_flags &= ~B_VMIO;
 1599         if (bp->b_vp)
 1600                 brelvp(bp);
 1601 }
 1602 
 1603 /*
 1604  * Check to see if a block at a particular lbn is available for a clustered
 1605  * write.
 1606  */
 1607 static int
 1608 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
 1609 {
 1610         struct buf *bpa;
 1611         int match;
 1612 
 1613         match = 0;
 1614 
 1615         /* If the buf isn't in core skip it */
 1616         if ((bpa = gbincore(vp, lblkno)) == NULL)
 1617                 return (0);
 1618 
 1619         /* If the buf is busy we don't want to wait for it */
 1620         if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 1621                 return (0);
 1622 
 1623         /* Only cluster with valid clusterable delayed write buffers */
 1624         if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
 1625             (B_DELWRI | B_CLUSTEROK))
 1626                 goto done;
 1627 
 1628         if (bpa->b_bufsize != size)
 1629                 goto done;
 1630 
 1631         /*
 1632          * Check to see if it is in the expected place on disk and that the
 1633          * block has been mapped.
 1634          */
 1635         if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
 1636                 match = 1;
 1637 done:
 1638         BUF_UNLOCK(bpa);
 1639         return (match);
 1640 }
 1641 
 1642 /*
 1643  *      vfs_bio_awrite:
 1644  *
 1645  *      Implement clustered async writes for clearing out B_DELWRI buffers.
 1646  *      This is much better then the old way of writing only one buffer at
 1647  *      a time.  Note that we may not be presented with the buffers in the 
 1648  *      correct order, so we search for the cluster in both directions.
 1649  */
 1650 int
 1651 vfs_bio_awrite(struct buf * bp)
 1652 {
 1653         int i;
 1654         int j;
 1655         daddr_t lblkno = bp->b_lblkno;
 1656         struct vnode *vp = bp->b_vp;
 1657         int s;
 1658         int ncl;
 1659         int nwritten;
 1660         int size;
 1661         int maxcl;
 1662 
 1663         s = splbio();
 1664         /*
 1665          * right now we support clustered writing only to regular files.  If
 1666          * we find a clusterable block we could be in the middle of a cluster
 1667          * rather then at the beginning.
 1668          */
 1669         if ((vp->v_type == VREG) && 
 1670             (vp->v_mount != 0) && /* Only on nodes that have the size info */
 1671             (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
 1672 
 1673                 size = vp->v_mount->mnt_stat.f_iosize;
 1674                 maxcl = MAXPHYS / size;
 1675 
 1676                 VI_LOCK(vp);
 1677                 for (i = 1; i < maxcl; i++)
 1678                         if (vfs_bio_clcheck(vp, size, lblkno + i,
 1679                             bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
 1680                                 break;
 1681 
 1682                 for (j = 1; i + j <= maxcl && j <= lblkno; j++) 
 1683                         if (vfs_bio_clcheck(vp, size, lblkno - j,
 1684                             bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
 1685                                 break;
 1686 
 1687                 VI_UNLOCK(vp);
 1688                 --j;
 1689                 ncl = i + j;
 1690                 /*
 1691                  * this is a possible cluster write
 1692                  */
 1693                 if (ncl != 1) {
 1694                         BUF_UNLOCK(bp);
 1695                         nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
 1696                         splx(s);
 1697                         return nwritten;
 1698                 }
 1699         }
 1700 
 1701         bremfree(bp);
 1702         bp->b_flags |= B_ASYNC;
 1703 
 1704         splx(s);
 1705         /*
 1706          * default (old) behavior, writing out only one block
 1707          *
 1708          * XXX returns b_bufsize instead of b_bcount for nwritten?
 1709          */
 1710         nwritten = bp->b_bufsize;
 1711         (void) BUF_WRITE(bp);
 1712 
 1713         return nwritten;
 1714 }
 1715 
 1716 /*
 1717  *      getnewbuf:
 1718  *
 1719  *      Find and initialize a new buffer header, freeing up existing buffers 
 1720  *      in the bufqueues as necessary.  The new buffer is returned locked.
 1721  *
 1722  *      Important:  B_INVAL is not set.  If the caller wishes to throw the
 1723  *      buffer away, the caller must set B_INVAL prior to calling brelse().
 1724  *
 1725  *      We block if:
 1726  *              We have insufficient buffer headers
 1727  *              We have insufficient buffer space
 1728  *              buffer_map is too fragmented ( space reservation fails )
 1729  *              If we have to flush dirty buffers ( but we try to avoid this )
 1730  *
 1731  *      To avoid VFS layer recursion we do not flush dirty buffers ourselves.
 1732  *      Instead we ask the buf daemon to do it for us.  We attempt to
 1733  *      avoid piecemeal wakeups of the pageout daemon.
 1734  */
 1735 
 1736 static struct buf *
 1737 getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
 1738 {
 1739         struct buf *bp;
 1740         struct buf *nbp;
 1741         int defrag = 0;
 1742         int nqindex;
 1743         static int flushingbufs;
 1744 
 1745         GIANT_REQUIRED;
 1746 
 1747         /*
 1748          * We can't afford to block since we might be holding a vnode lock,
 1749          * which may prevent system daemons from running.  We deal with
 1750          * low-memory situations by proactively returning memory and running
 1751          * async I/O rather then sync I/O.
 1752          */
 1753 
 1754         atomic_add_int(&getnewbufcalls, 1);
 1755         atomic_subtract_int(&getnewbufrestarts, 1);
 1756 restart:
 1757         atomic_add_int(&getnewbufrestarts, 1);
 1758 
 1759         /*
 1760          * Setup for scan.  If we do not have enough free buffers,
 1761          * we setup a degenerate case that immediately fails.  Note
 1762          * that if we are specially marked process, we are allowed to
 1763          * dip into our reserves.
 1764          *
 1765          * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
 1766          *
 1767          * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
 1768          * However, there are a number of cases (defragging, reusing, ...)
 1769          * where we cannot backup.
 1770          */
 1771         mtx_lock(&bqlock);
 1772         nqindex = QUEUE_EMPTYKVA;
 1773         nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
 1774 
 1775         if (nbp == NULL) {
 1776                 /*
 1777                  * If no EMPTYKVA buffers and we are either
 1778                  * defragging or reusing, locate a CLEAN buffer
 1779                  * to free or reuse.  If bufspace useage is low
 1780                  * skip this step so we can allocate a new buffer.
 1781                  */
 1782                 if (defrag || bufspace >= lobufspace) {
 1783                         nqindex = QUEUE_CLEAN;
 1784                         nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
 1785                 }
 1786 
 1787                 /*
 1788                  * If we could not find or were not allowed to reuse a
 1789                  * CLEAN buffer, check to see if it is ok to use an EMPTY
 1790                  * buffer.  We can only use an EMPTY buffer if allocating
 1791                  * its KVA would not otherwise run us out of buffer space.
 1792                  */
 1793                 if (nbp == NULL && defrag == 0 &&
 1794                     bufspace + maxsize < hibufspace) {
 1795                         nqindex = QUEUE_EMPTY;
 1796                         nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
 1797                 }
 1798         }
 1799 
 1800         /*
 1801          * Run scan, possibly freeing data and/or kva mappings on the fly
 1802          * depending.
 1803          */
 1804 
 1805         while ((bp = nbp) != NULL) {
 1806                 int qindex = nqindex;
 1807 
 1808                 /*
 1809                  * Calculate next bp ( we can only use it if we do not block
 1810                  * or do other fancy things ).
 1811                  */
 1812                 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
 1813                         switch(qindex) {
 1814                         case QUEUE_EMPTY:
 1815                                 nqindex = QUEUE_EMPTYKVA;
 1816                                 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
 1817                                         break;
 1818                                 /* FALLTHROUGH */
 1819                         case QUEUE_EMPTYKVA:
 1820                                 nqindex = QUEUE_CLEAN;
 1821                                 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
 1822                                         break;
 1823                                 /* FALLTHROUGH */
 1824                         case QUEUE_CLEAN:
 1825                                 /*
 1826                                  * nbp is NULL. 
 1827                                  */
 1828                                 break;
 1829                         }
 1830                 }
 1831 
 1832                 /*
 1833                  * Sanity Checks
 1834                  */
 1835                 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
 1836 
 1837                 /*
 1838                  * Note: we no longer distinguish between VMIO and non-VMIO
 1839                  * buffers.
 1840                  */
 1841 
 1842                 KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
 1843 
 1844                 /*
 1845                  * If we are defragging then we need a buffer with 
 1846                  * b_kvasize != 0.  XXX this situation should no longer
 1847                  * occur, if defrag is non-zero the buffer's b_kvasize
 1848                  * should also be non-zero at this point.  XXX
 1849                  */
 1850                 if (defrag && bp->b_kvasize == 0) {
 1851                         printf("Warning: defrag empty buffer %p\n", bp);
 1852                         continue;
 1853                 }
 1854 
 1855                 /*
 1856                  * Start freeing the bp.  This is somewhat involved.  nbp
 1857                  * remains valid only for QUEUE_EMPTY[KVA] bp's.
 1858                  */
 1859 
 1860                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 1861                         panic("getnewbuf: locked buf");
 1862                 bremfreel(bp);
 1863                 mtx_unlock(&bqlock);
 1864 
 1865                 if (qindex == QUEUE_CLEAN) {
 1866                         if (bp->b_flags & B_VMIO) {
 1867                                 bp->b_flags &= ~B_ASYNC;
 1868                                 vfs_vmio_release(bp);
 1869                         }
 1870                         if (bp->b_vp)
 1871                                 brelvp(bp);
 1872                 }
 1873 
 1874                 /*
 1875                  * NOTE:  nbp is now entirely invalid.  We can only restart
 1876                  * the scan from this point on.
 1877                  *
 1878                  * Get the rest of the buffer freed up.  b_kva* is still
 1879                  * valid after this operation.
 1880                  */
 1881 
 1882                 if (bp->b_rcred != NOCRED) {
 1883                         crfree(bp->b_rcred);
 1884                         bp->b_rcred = NOCRED;
 1885                 }
 1886                 if (bp->b_wcred != NOCRED) {
 1887                         crfree(bp->b_wcred);
 1888                         bp->b_wcred = NOCRED;
 1889                 }
 1890                 if (LIST_FIRST(&bp->b_dep) != NULL)
 1891                         buf_deallocate(bp);
 1892                 if (bp->b_xflags & BX_BKGRDINPROG)
 1893                         panic("losing buffer 3");
 1894 
 1895                 if (bp->b_bufsize)
 1896                         allocbuf(bp, 0);
 1897 
 1898                 bp->b_flags = 0;
 1899                 bp->b_ioflags = 0;
 1900                 bp->b_xflags = 0;
 1901                 bp->b_vflags = 0;
 1902                 bp->b_dev = NODEV;
 1903                 bp->b_vp = NULL;
 1904                 bp->b_blkno = bp->b_lblkno = 0;
 1905                 bp->b_offset = NOOFFSET;
 1906                 bp->b_iodone = 0;
 1907                 bp->b_error = 0;
 1908                 bp->b_resid = 0;
 1909                 bp->b_bcount = 0;
 1910                 bp->b_npages = 0;
 1911                 bp->b_dirtyoff = bp->b_dirtyend = 0;
 1912                 bp->b_magic = B_MAGIC_BIO;
 1913                 bp->b_op = &buf_ops_bio;
 1914                 bp->b_object = NULL;
 1915 
 1916                 LIST_INIT(&bp->b_dep);
 1917 
 1918                 /*
 1919                  * If we are defragging then free the buffer.
 1920                  */
 1921                 if (defrag) {
 1922                         bp->b_flags |= B_INVAL;
 1923                         bfreekva(bp);
 1924                         brelse(bp);
 1925                         defrag = 0;
 1926                         goto restart;
 1927                 }
 1928 
 1929                 /*
 1930                  * If we are overcomitted then recover the buffer and its
 1931                  * KVM space.  This occurs in rare situations when multiple
 1932                  * processes are blocked in getnewbuf() or allocbuf().
 1933                  */
 1934                 if (bufspace >= hibufspace)
 1935                         flushingbufs = 1;
 1936                 if (flushingbufs && bp->b_kvasize != 0) {
 1937                         bp->b_flags |= B_INVAL;
 1938                         bfreekva(bp);
 1939                         brelse(bp);
 1940                         goto restart;
 1941                 }
 1942                 if (bufspace < lobufspace)
 1943                         flushingbufs = 0;
 1944                 break;
 1945         }
 1946 
 1947         /*
 1948          * If we exhausted our list, sleep as appropriate.  We may have to
 1949          * wakeup various daemons and write out some dirty buffers.
 1950          *
 1951          * Generally we are sleeping due to insufficient buffer space.
 1952          */
 1953 
 1954         if (bp == NULL) {
 1955                 int flags;
 1956                 char *waitmsg;
 1957 
 1958                 mtx_unlock(&bqlock);
 1959                 if (defrag) {
 1960                         flags = VFS_BIO_NEED_BUFSPACE;
 1961                         waitmsg = "nbufkv";
 1962                 } else if (bufspace >= hibufspace) {
 1963                         waitmsg = "nbufbs";
 1964                         flags = VFS_BIO_NEED_BUFSPACE;
 1965                 } else {
 1966                         waitmsg = "newbuf";
 1967                         flags = VFS_BIO_NEED_ANY;
 1968                 }
 1969 
 1970                 bd_speedup();   /* heeeelp */
 1971 
 1972                 mtx_lock(&nblock);
 1973                 needsbuffer |= flags;
 1974                 while (needsbuffer & flags) {
 1975                         if (msleep(&needsbuffer, &nblock,
 1976                             (PRIBIO + 4) | slpflag, waitmsg, slptimeo)) {
 1977                                 mtx_unlock(&nblock);
 1978                                 return (NULL);
 1979                         }
 1980                 }
 1981                 mtx_unlock(&nblock);
 1982         } else {
 1983                 /*
 1984                  * We finally have a valid bp.  We aren't quite out of the
 1985                  * woods, we still have to reserve kva space.  In order
 1986                  * to keep fragmentation sane we only allocate kva in
 1987                  * BKVASIZE chunks.
 1988                  */
 1989                 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
 1990 
 1991                 if (maxsize != bp->b_kvasize) {
 1992                         vm_offset_t addr = 0;
 1993 
 1994                         bfreekva(bp);
 1995 
 1996                         if (vm_map_findspace(buffer_map,
 1997                                 vm_map_min(buffer_map), maxsize, &addr)) {
 1998                                 /*
 1999                                  * Uh oh.  Buffer map is to fragmented.  We
 2000                                  * must defragment the map.
 2001                                  */
 2002                                 atomic_add_int(&bufdefragcnt, 1);
 2003                                 defrag = 1;
 2004                                 bp->b_flags |= B_INVAL;
 2005                                 brelse(bp);
 2006                                 goto restart;
 2007                         }
 2008                         if (addr) {
 2009                                 vm_map_insert(buffer_map, NULL, 0,
 2010                                         addr, addr + maxsize,
 2011                                         VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
 2012 
 2013                                 bp->b_kvabase = (caddr_t) addr;
 2014                                 bp->b_kvasize = maxsize;
 2015                                 atomic_add_int(&bufspace, bp->b_kvasize);
 2016                                 atomic_add_int(&bufreusecnt, 1);
 2017                         }
 2018                 }
 2019                 bp->b_data = bp->b_kvabase;
 2020         }
 2021         return(bp);
 2022 }
 2023 
 2024 /*
 2025  *      buf_daemon:
 2026  *
 2027  *      buffer flushing daemon.  Buffers are normally flushed by the
 2028  *      update daemon but if it cannot keep up this process starts to
 2029  *      take the load in an attempt to prevent getnewbuf() from blocking.
 2030  */
 2031 
 2032 static struct proc *bufdaemonproc;
 2033 
 2034 static struct kproc_desc buf_kp = {
 2035         "bufdaemon",
 2036         buf_daemon,
 2037         &bufdaemonproc
 2038 };
 2039 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp)
 2040 
 2041 static void
 2042 buf_daemon()
 2043 {
 2044         int s;
 2045 
 2046         mtx_lock(&Giant);
 2047 
 2048         /*
 2049          * This process needs to be suspended prior to shutdown sync.
 2050          */
 2051         EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
 2052             SHUTDOWN_PRI_LAST);
 2053 
 2054         /*
 2055          * This process is allowed to take the buffer cache to the limit
 2056          */
 2057         s = splbio();
 2058         mtx_lock(&bdlock);
 2059 
 2060         for (;;) {
 2061                 bd_request = 0;
 2062                 mtx_unlock(&bdlock);
 2063 
 2064                 kthread_suspend_check(bufdaemonproc);
 2065 
 2066                 /*
 2067                  * Do the flush.  Limit the amount of in-transit I/O we
 2068                  * allow to build up, otherwise we would completely saturate
 2069                  * the I/O system.  Wakeup any waiting processes before we
 2070                  * normally would so they can run in parallel with our drain.
 2071                  */
 2072                 while (numdirtybuffers > lodirtybuffers) {
 2073                         if (flushbufqueues(0) == 0) {
 2074                                 /*
 2075                                  * Could not find any buffers without rollback
 2076                                  * dependencies, so just write the first one
 2077                                  * in the hopes of eventually making progress.
 2078                                  */
 2079                                 flushbufqueues(1);
 2080                                 break;
 2081                         }
 2082                         waitrunningbufspace();
 2083                         numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
 2084                 }
 2085 
 2086                 /*
 2087                  * Only clear bd_request if we have reached our low water
 2088                  * mark.  The buf_daemon normally waits 1 second and
 2089                  * then incrementally flushes any dirty buffers that have
 2090                  * built up, within reason.
 2091                  *
 2092                  * If we were unable to hit our low water mark and couldn't
 2093                  * find any flushable buffers, we sleep half a second.
 2094                  * Otherwise we loop immediately.
 2095                  */
 2096                 mtx_lock(&bdlock);
 2097                 if (numdirtybuffers <= lodirtybuffers) {
 2098                         /*
 2099                          * We reached our low water mark, reset the
 2100                          * request and sleep until we are needed again.
 2101                          * The sleep is just so the suspend code works.
 2102                          */
 2103                         bd_request = 0;
 2104                         msleep(&bd_request, &bdlock, PVM, "psleep", hz);
 2105                 } else {
 2106                         /*
 2107                          * We couldn't find any flushable dirty buffers but
 2108                          * still have too many dirty buffers, we
 2109                          * have to sleep and try again.  (rare)
 2110                          */
 2111                         msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
 2112                 }
 2113         }
 2114 }
 2115 
 2116 /*
 2117  *      flushbufqueues:
 2118  *
 2119  *      Try to flush a buffer in the dirty queue.  We must be careful to
 2120  *      free up B_INVAL buffers instead of write them, which NFS is 
 2121  *      particularly sensitive to.
 2122  */
 2123 int flushwithdeps = 0;
 2124 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
 2125     0, "Number of buffers flushed with dependecies that require rollbacks");
 2126 static int
 2127 flushbufqueues(int flushdeps)
 2128 {
 2129         struct thread *td = curthread;
 2130         struct vnode *vp;
 2131         struct buf *bp;
 2132         int hasdeps;
 2133 
 2134         mtx_lock(&bqlock);
 2135         TAILQ_FOREACH(bp, &bufqueues[QUEUE_DIRTY], b_freelist) {
 2136                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 2137                         continue;
 2138                 KASSERT((bp->b_flags & B_DELWRI),
 2139                     ("unexpected clean buffer %p", bp));
 2140                 if ((bp->b_xflags & BX_BKGRDINPROG) != 0) {
 2141                         BUF_UNLOCK(bp);
 2142                         continue;
 2143                 }
 2144                 if (bp->b_flags & B_INVAL) {
 2145                         bremfreel(bp);
 2146                         mtx_unlock(&bqlock);
 2147                         brelse(bp);
 2148                         return (1);
 2149                 }
 2150 
 2151                 if (LIST_FIRST(&bp->b_dep) != NULL && buf_countdeps(bp, 0)) {
 2152                         if (flushdeps == 0) {
 2153                                 BUF_UNLOCK(bp);
 2154                                 continue;
 2155                         }
 2156                         hasdeps = 1;
 2157                 } else
 2158                         hasdeps = 0;
 2159                 /*
 2160                  * We must hold the lock on a vnode before writing
 2161                  * one of its buffers. Otherwise we may confuse, or
 2162                  * in the case of a snapshot vnode, deadlock the
 2163                  * system.
 2164                  *
 2165                  * The lock order here is the reverse of the normal
 2166                  * of vnode followed by buf lock.  This is ok because
 2167                  * the NOWAIT will prevent deadlock.
 2168                  */
 2169                 if ((vp = bp->b_vp) == NULL ||
 2170                     vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) == 0) {
 2171                         mtx_unlock(&bqlock);
 2172                         vfs_bio_awrite(bp);
 2173                         if (vp != NULL)
 2174                                 VOP_UNLOCK(vp, 0, td);
 2175                         flushwithdeps += hasdeps;
 2176                         return (1);
 2177                 }
 2178                 BUF_UNLOCK(bp);
 2179         }
 2180         mtx_unlock(&bqlock);
 2181         return (0);
 2182 }
 2183 
 2184 /*
 2185  * Check to see if a block is currently memory resident.
 2186  */
 2187 struct buf *
 2188 incore(struct vnode * vp, daddr_t blkno)
 2189 {
 2190         struct buf *bp;
 2191 
 2192         int s = splbio();
 2193         VI_LOCK(vp);
 2194         bp = gbincore(vp, blkno);
 2195         VI_UNLOCK(vp);
 2196         splx(s);
 2197         return (bp);
 2198 }
 2199 
 2200 /*
 2201  * Returns true if no I/O is needed to access the
 2202  * associated VM object.  This is like incore except
 2203  * it also hunts around in the VM system for the data.
 2204  */
 2205 
 2206 int
 2207 inmem(struct vnode * vp, daddr_t blkno)
 2208 {
 2209         vm_object_t obj;
 2210         vm_offset_t toff, tinc, size;
 2211         vm_page_t m;
 2212         vm_ooffset_t off;
 2213 
 2214         GIANT_REQUIRED;
 2215         ASSERT_VOP_LOCKED(vp, "inmem");
 2216 
 2217         if (incore(vp, blkno))
 2218                 return 1;
 2219         if (vp->v_mount == NULL)
 2220                 return 0;
 2221         if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_vflag & VV_OBJBUF) == 0)
 2222                 return 0;
 2223 
 2224         size = PAGE_SIZE;
 2225         if (size > vp->v_mount->mnt_stat.f_iosize)
 2226                 size = vp->v_mount->mnt_stat.f_iosize;
 2227         off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
 2228 
 2229         for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
 2230                 VM_OBJECT_LOCK(obj);
 2231                 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
 2232                 VM_OBJECT_UNLOCK(obj);
 2233                 if (!m)
 2234                         goto notinmem;
 2235                 tinc = size;
 2236                 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
 2237                         tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
 2238                 if (vm_page_is_valid(m,
 2239                     (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
 2240                         goto notinmem;
 2241         }
 2242         return 1;
 2243 
 2244 notinmem:
 2245         return (0);
 2246 }
 2247 
 2248 /*
 2249  *      vfs_setdirty:
 2250  *
 2251  *      Sets the dirty range for a buffer based on the status of the dirty
 2252  *      bits in the pages comprising the buffer.
 2253  *
 2254  *      The range is limited to the size of the buffer.
 2255  *
 2256  *      This routine is primarily used by NFS, but is generalized for the
 2257  *      B_VMIO case.
 2258  */
 2259 static void
 2260 vfs_setdirty(struct buf *bp) 
 2261 {
 2262         int i;
 2263         vm_object_t object;
 2264 
 2265         GIANT_REQUIRED;
 2266         /*
 2267          * Degenerate case - empty buffer
 2268          */
 2269 
 2270         if (bp->b_bufsize == 0)
 2271                 return;
 2272 
 2273         /*
 2274          * We qualify the scan for modified pages on whether the
 2275          * object has been flushed yet.  The OBJ_WRITEABLE flag
 2276          * is not cleared simply by protecting pages off.
 2277          */
 2278 
 2279         if ((bp->b_flags & B_VMIO) == 0)
 2280                 return;
 2281 
 2282         object = bp->b_pages[0]->object;
 2283         VM_OBJECT_LOCK(object);
 2284         if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
 2285                 printf("Warning: object %p writeable but not mightbedirty\n", object);
 2286         if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
 2287                 printf("Warning: object %p mightbedirty but not writeable\n", object);
 2288 
 2289         if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
 2290                 vm_offset_t boffset;
 2291                 vm_offset_t eoffset;
 2292 
 2293                 vm_page_lock_queues();
 2294                 /*
 2295                  * test the pages to see if they have been modified directly
 2296                  * by users through the VM system.
 2297                  */
 2298                 for (i = 0; i < bp->b_npages; i++) {
 2299                         vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
 2300                         vm_page_test_dirty(bp->b_pages[i]);
 2301                 }
 2302 
 2303                 /*
 2304                  * Calculate the encompassing dirty range, boffset and eoffset,
 2305                  * (eoffset - boffset) bytes.
 2306                  */
 2307 
 2308                 for (i = 0; i < bp->b_npages; i++) {
 2309                         if (bp->b_pages[i]->dirty)
 2310                                 break;
 2311                 }
 2312                 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2313 
 2314                 for (i = bp->b_npages - 1; i >= 0; --i) {
 2315                         if (bp->b_pages[i]->dirty) {
 2316                                 break;
 2317                         }
 2318                 }
 2319                 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2320 
 2321                 vm_page_unlock_queues();
 2322                 /*
 2323                  * Fit it to the buffer.
 2324                  */
 2325 
 2326                 if (eoffset > bp->b_bcount)
 2327                         eoffset = bp->b_bcount;
 2328 
 2329                 /*
 2330                  * If we have a good dirty range, merge with the existing
 2331                  * dirty range.
 2332                  */
 2333 
 2334                 if (boffset < eoffset) {
 2335                         if (bp->b_dirtyoff > boffset)
 2336                                 bp->b_dirtyoff = boffset;
 2337                         if (bp->b_dirtyend < eoffset)
 2338                                 bp->b_dirtyend = eoffset;
 2339                 }
 2340         }
 2341         VM_OBJECT_UNLOCK(object);
 2342 }
 2343 
 2344 /*
 2345  *      getblk:
 2346  *
 2347  *      Get a block given a specified block and offset into a file/device.
 2348  *      The buffers B_DONE bit will be cleared on return, making it almost
 2349  *      ready for an I/O initiation.  B_INVAL may or may not be set on 
 2350  *      return.  The caller should clear B_INVAL prior to initiating a
 2351  *      READ.
 2352  *
 2353  *      For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
 2354  *      an existing buffer.
 2355  *
 2356  *      For a VMIO buffer, B_CACHE is modified according to the backing VM.
 2357  *      If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
 2358  *      and then cleared based on the backing VM.  If the previous buffer is
 2359  *      non-0-sized but invalid, B_CACHE will be cleared.
 2360  *
 2361  *      If getblk() must create a new buffer, the new buffer is returned with
 2362  *      both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
 2363  *      case it is returned with B_INVAL clear and B_CACHE set based on the
 2364  *      backing VM.
 2365  *
 2366  *      getblk() also forces a BUF_WRITE() for any B_DELWRI buffer whos
 2367  *      B_CACHE bit is clear.
 2368  *      
 2369  *      What this means, basically, is that the caller should use B_CACHE to
 2370  *      determine whether the buffer is fully valid or not and should clear
 2371  *      B_INVAL prior to issuing a read.  If the caller intends to validate
 2372  *      the buffer by loading its data area with something, the caller needs
 2373  *      to clear B_INVAL.  If the caller does this without issuing an I/O, 
 2374  *      the caller should set B_CACHE ( as an optimization ), else the caller
 2375  *      should issue the I/O and biodone() will set B_CACHE if the I/O was
 2376  *      a write attempt or if it was a successfull read.  If the caller 
 2377  *      intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
 2378  *      prior to issuing the READ.  biodone() will *not* clear B_INVAL.
 2379  */
 2380 struct buf *
 2381 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
 2382     int flags)
 2383 {
 2384         struct buf *bp;
 2385         int s;
 2386         int error;
 2387         ASSERT_VOP_LOCKED(vp, "getblk");
 2388 
 2389         if (size > MAXBSIZE)
 2390                 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
 2391 
 2392         s = splbio();
 2393 loop:
 2394         /*
 2395          * Block if we are low on buffers.   Certain processes are allowed
 2396          * to completely exhaust the buffer cache.
 2397          *
 2398          * If this check ever becomes a bottleneck it may be better to
 2399          * move it into the else, when gbincore() fails.  At the moment
 2400          * it isn't a problem.
 2401          *
 2402          * XXX remove if 0 sections (clean this up after its proven)
 2403          */
 2404         if (numfreebuffers == 0) {
 2405                 if (curthread == PCPU_GET(idlethread))
 2406                         return NULL;
 2407                 mtx_lock(&nblock);
 2408                 needsbuffer |= VFS_BIO_NEED_ANY;
 2409                 mtx_unlock(&nblock);
 2410         }
 2411 
 2412         VI_LOCK(vp);
 2413         if ((bp = gbincore(vp, blkno))) {
 2414                 int lockflags;
 2415                 /*
 2416                  * Buffer is in-core.  If the buffer is not busy, it must
 2417                  * be on a queue.
 2418                  */
 2419                 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
 2420 
 2421                 if (flags & GB_LOCK_NOWAIT)
 2422                         lockflags |= LK_NOWAIT;
 2423 
 2424                 error = BUF_TIMELOCK(bp, lockflags,
 2425                     VI_MTX(vp), "getblk", slpflag, slptimeo);
 2426 
 2427                 /*
 2428                  * If we slept and got the lock we have to restart in case
 2429                  * the buffer changed identities.
 2430                  */
 2431                 if (error == ENOLCK)
 2432                         goto loop;
 2433                 /* We timed out or were interrupted. */
 2434                 else if (error)
 2435                         return (NULL);
 2436 
 2437                 /*
 2438                  * The buffer is locked.  B_CACHE is cleared if the buffer is 
 2439                  * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
 2440                  * and for a VMIO buffer B_CACHE is adjusted according to the
 2441                  * backing VM cache.
 2442                  */
 2443                 if (bp->b_flags & B_INVAL)
 2444                         bp->b_flags &= ~B_CACHE;
 2445                 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
 2446                         bp->b_flags |= B_CACHE;
 2447                 bremfree(bp);
 2448 
 2449                 /*
 2450                  * check for size inconsistancies for non-VMIO case.
 2451                  */
 2452 
 2453                 if (bp->b_bcount != size) {
 2454                         if ((bp->b_flags & B_VMIO) == 0 ||
 2455                             (size > bp->b_kvasize)) {
 2456                                 if (bp->b_flags & B_DELWRI) {
 2457                                         bp->b_flags |= B_NOCACHE;
 2458                                         BUF_WRITE(bp);
 2459                                 } else {
 2460                                         if ((bp->b_flags & B_VMIO) &&
 2461                                            (LIST_FIRST(&bp->b_dep) == NULL)) {
 2462                                                 bp->b_flags |= B_RELBUF;
 2463                                                 brelse(bp);
 2464                                         } else {
 2465                                                 bp->b_flags |= B_NOCACHE;
 2466                                                 BUF_WRITE(bp);
 2467                                         }
 2468                                 }
 2469                                 goto loop;
 2470                         }
 2471                 }
 2472 
 2473                 /*
 2474                  * If the size is inconsistant in the VMIO case, we can resize
 2475                  * the buffer.  This might lead to B_CACHE getting set or
 2476                  * cleared.  If the size has not changed, B_CACHE remains
 2477                  * unchanged from its previous state.
 2478                  */
 2479 
 2480                 if (bp->b_bcount != size)
 2481                         allocbuf(bp, size);
 2482 
 2483                 KASSERT(bp->b_offset != NOOFFSET, 
 2484                     ("getblk: no buffer offset"));
 2485 
 2486                 /*
 2487                  * A buffer with B_DELWRI set and B_CACHE clear must
 2488                  * be committed before we can return the buffer in
 2489                  * order to prevent the caller from issuing a read
 2490                  * ( due to B_CACHE not being set ) and overwriting
 2491                  * it.
 2492                  *
 2493                  * Most callers, including NFS and FFS, need this to
 2494                  * operate properly either because they assume they
 2495                  * can issue a read if B_CACHE is not set, or because
 2496                  * ( for example ) an uncached B_DELWRI might loop due 
 2497                  * to softupdates re-dirtying the buffer.  In the latter
 2498                  * case, B_CACHE is set after the first write completes,
 2499                  * preventing further loops.
 2500                  * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
 2501                  * above while extending the buffer, we cannot allow the
 2502                  * buffer to remain with B_CACHE set after the write
 2503                  * completes or it will represent a corrupt state.  To
 2504                  * deal with this we set B_NOCACHE to scrap the buffer
 2505                  * after the write.
 2506                  *
 2507                  * We might be able to do something fancy, like setting
 2508                  * B_CACHE in bwrite() except if B_DELWRI is already set,
 2509                  * so the below call doesn't set B_CACHE, but that gets real
 2510                  * confusing.  This is much easier.
 2511                  */
 2512 
 2513                 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
 2514                         bp->b_flags |= B_NOCACHE;
 2515                         BUF_WRITE(bp);
 2516                         goto loop;
 2517                 }
 2518 
 2519                 splx(s);
 2520                 bp->b_flags &= ~B_DONE;
 2521         } else {
 2522                 int bsize, maxsize, vmio;
 2523                 off_t offset;
 2524 
 2525                 /*
 2526                  * Buffer is not in-core, create new buffer.  The buffer
 2527                  * returned by getnewbuf() is locked.  Note that the returned
 2528                  * buffer is also considered valid (not marked B_INVAL).
 2529                  */
 2530                 VI_UNLOCK(vp);
 2531                 if (vn_isdisk(vp, NULL))
 2532                         bsize = DEV_BSIZE;
 2533                 else if (vp->v_mountedhere)
 2534                         bsize = vp->v_mountedhere->mnt_stat.f_iosize;
 2535                 else if (vp->v_mount)
 2536                         bsize = vp->v_mount->mnt_stat.f_iosize;
 2537                 else
 2538                         bsize = size;
 2539 
 2540                 offset = blkno * bsize;
 2541                 vmio = (VOP_GETVOBJECT(vp, NULL) == 0) &&
 2542                     (vp->v_vflag & VV_OBJBUF);
 2543                 maxsize = vmio ? size + (offset & PAGE_MASK) : size;
 2544                 maxsize = imax(maxsize, bsize);
 2545 
 2546                 if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == NULL) {
 2547                         if (slpflag || slptimeo) {
 2548                                 splx(s);
 2549                                 return NULL;
 2550                         }
 2551                         goto loop;
 2552                 }
 2553 
 2554                 /*
 2555                  * This code is used to make sure that a buffer is not
 2556                  * created while the getnewbuf routine is blocked.
 2557                  * This can be a problem whether the vnode is locked or not.
 2558                  * If the buffer is created out from under us, we have to
 2559                  * throw away the one we just created.  There is now window
 2560                  * race because we are safely running at splbio() from the
 2561                  * point of the duplicate buffer creation through to here,
 2562                  * and we've locked the buffer.
 2563                  *
 2564                  * Note: this must occur before we associate the buffer
 2565                  * with the vp especially considering limitations in
 2566                  * the splay tree implementation when dealing with duplicate
 2567                  * lblkno's.
 2568                  */
 2569                 VI_LOCK(vp);
 2570                 if (gbincore(vp, blkno)) {
 2571                         VI_UNLOCK(vp);
 2572                         bp->b_flags |= B_INVAL;
 2573                         brelse(bp);
 2574                         goto loop;
 2575                 }
 2576 
 2577                 /*
 2578                  * Insert the buffer into the hash, so that it can
 2579                  * be found by incore.
 2580                  */
 2581                 bp->b_blkno = bp->b_lblkno = blkno;
 2582                 bp->b_offset = offset;
 2583 
 2584                 bgetvp(vp, bp);
 2585                 VI_UNLOCK(vp);
 2586 
 2587                 /*
 2588                  * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
 2589                  * buffer size starts out as 0, B_CACHE will be set by
 2590                  * allocbuf() for the VMIO case prior to it testing the
 2591                  * backing store for validity.
 2592                  */
 2593 
 2594                 if (vmio) {
 2595                         bp->b_flags |= B_VMIO;
 2596 #if defined(VFS_BIO_DEBUG)
 2597                         if (vp->v_type != VREG)
 2598                                 printf("getblk: vmioing file type %d???\n", vp->v_type);
 2599 #endif
 2600                         VOP_GETVOBJECT(vp, &bp->b_object);
 2601                 } else {
 2602                         bp->b_flags &= ~B_VMIO;
 2603                         bp->b_object = NULL;
 2604                 }
 2605 
 2606                 allocbuf(bp, size);
 2607 
 2608                 splx(s);
 2609                 bp->b_flags &= ~B_DONE;
 2610         }
 2611         KASSERT(BUF_REFCNT(bp) == 1, ("getblk: bp %p not locked",bp));
 2612         return (bp);
 2613 }
 2614 
 2615 /*
 2616  * Get an empty, disassociated buffer of given size.  The buffer is initially
 2617  * set to B_INVAL.
 2618  */
 2619 struct buf *
 2620 geteblk(int size)
 2621 {
 2622         struct buf *bp;
 2623         int s;
 2624         int maxsize;
 2625 
 2626         maxsize = (size + BKVAMASK) & ~BKVAMASK;
 2627 
 2628         s = splbio();
 2629         while ((bp = getnewbuf(0, 0, size, maxsize)) == 0)
 2630                 continue;
 2631         splx(s);
 2632         allocbuf(bp, size);
 2633         bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
 2634         KASSERT(BUF_REFCNT(bp) == 1, ("geteblk: bp %p not locked",bp));
 2635         return (bp);
 2636 }
 2637 
 2638 
 2639 /*
 2640  * This code constitutes the buffer memory from either anonymous system
 2641  * memory (in the case of non-VMIO operations) or from an associated
 2642  * VM object (in the case of VMIO operations).  This code is able to
 2643  * resize a buffer up or down.
 2644  *
 2645  * Note that this code is tricky, and has many complications to resolve
 2646  * deadlock or inconsistant data situations.  Tread lightly!!! 
 2647  * There are B_CACHE and B_DELWRI interactions that must be dealt with by 
 2648  * the caller.  Calling this code willy nilly can result in the loss of data.
 2649  *
 2650  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
 2651  * B_CACHE for the non-VMIO case.
 2652  */
 2653 
 2654 int
 2655 allocbuf(struct buf *bp, int size)
 2656 {
 2657         int newbsize, mbsize;
 2658         int i;
 2659 
 2660         GIANT_REQUIRED;
 2661 
 2662         if (BUF_REFCNT(bp) == 0)
 2663                 panic("allocbuf: buffer not busy");
 2664 
 2665         if (bp->b_kvasize < size)
 2666                 panic("allocbuf: buffer too small");
 2667 
 2668         if ((bp->b_flags & B_VMIO) == 0) {
 2669                 caddr_t origbuf;
 2670                 int origbufsize;
 2671                 /*
 2672                  * Just get anonymous memory from the kernel.  Don't
 2673                  * mess with B_CACHE.
 2674                  */
 2675                 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 2676                 if (bp->b_flags & B_MALLOC)
 2677                         newbsize = mbsize;
 2678                 else
 2679                         newbsize = round_page(size);
 2680 
 2681                 if (newbsize < bp->b_bufsize) {
 2682                         /*
 2683                          * malloced buffers are not shrunk
 2684                          */
 2685                         if (bp->b_flags & B_MALLOC) {
 2686                                 if (newbsize) {
 2687                                         bp->b_bcount = size;
 2688                                 } else {
 2689                                         free(bp->b_data, M_BIOBUF);
 2690                                         if (bp->b_bufsize) {
 2691                                                 atomic_subtract_int(
 2692                                                     &bufmallocspace,
 2693                                                     bp->b_bufsize);
 2694                                                 bufspacewakeup();
 2695                                                 bp->b_bufsize = 0;
 2696                                         }
 2697                                         bp->b_data = bp->b_kvabase;
 2698                                         bp->b_bcount = 0;
 2699                                         bp->b_flags &= ~B_MALLOC;
 2700                                 }
 2701                                 return 1;
 2702                         }               
 2703                         vm_hold_free_pages(
 2704                             bp,
 2705                             (vm_offset_t) bp->b_data + newbsize,
 2706                             (vm_offset_t) bp->b_data + bp->b_bufsize);
 2707                 } else if (newbsize > bp->b_bufsize) {
 2708                         /*
 2709                          * We only use malloced memory on the first allocation.
 2710                          * and revert to page-allocated memory when the buffer
 2711                          * grows.
 2712                          */
 2713                         /*
 2714                          * There is a potential smp race here that could lead
 2715                          * to bufmallocspace slightly passing the max.  It
 2716                          * is probably extremely rare and not worth worrying
 2717                          * over.
 2718                          */
 2719                         if ( (bufmallocspace < maxbufmallocspace) &&
 2720                                 (bp->b_bufsize == 0) &&
 2721                                 (mbsize <= PAGE_SIZE/2)) {
 2722 
 2723                                 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
 2724                                 bp->b_bufsize = mbsize;
 2725                                 bp->b_bcount = size;
 2726                                 bp->b_flags |= B_MALLOC;
 2727                                 atomic_add_int(&bufmallocspace, mbsize);
 2728                                 return 1;
 2729                         }
 2730                         origbuf = NULL;
 2731                         origbufsize = 0;
 2732                         /*
 2733                          * If the buffer is growing on its other-than-first allocation,
 2734                          * then we revert to the page-allocation scheme.
 2735                          */
 2736                         if (bp->b_flags & B_MALLOC) {
 2737                                 origbuf = bp->b_data;
 2738                                 origbufsize = bp->b_bufsize;
 2739                                 bp->b_data = bp->b_kvabase;
 2740                                 if (bp->b_bufsize) {
 2741                                         atomic_subtract_int(&bufmallocspace,
 2742                                             bp->b_bufsize);
 2743                                         bufspacewakeup();
 2744                                         bp->b_bufsize = 0;
 2745                                 }
 2746                                 bp->b_flags &= ~B_MALLOC;
 2747                                 newbsize = round_page(newbsize);
 2748                         }
 2749                         vm_hold_load_pages(
 2750                             bp,
 2751                             (vm_offset_t) bp->b_data + bp->b_bufsize,
 2752                             (vm_offset_t) bp->b_data + newbsize);
 2753                         if (origbuf) {
 2754                                 bcopy(origbuf, bp->b_data, origbufsize);
 2755                                 free(origbuf, M_BIOBUF);
 2756                         }
 2757                 }
 2758         } else {
 2759                 int desiredpages;
 2760 
 2761                 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 2762                 desiredpages = (size == 0) ? 0 :
 2763                         num_pages((bp->b_offset & PAGE_MASK) + newbsize);
 2764 
 2765                 if (bp->b_flags & B_MALLOC)
 2766                         panic("allocbuf: VMIO buffer can't be malloced");
 2767                 /*
 2768                  * Set B_CACHE initially if buffer is 0 length or will become
 2769                  * 0-length.
 2770                  */
 2771                 if (size == 0 || bp->b_bufsize == 0)
 2772                         bp->b_flags |= B_CACHE;
 2773 
 2774                 if (newbsize < bp->b_bufsize) {
 2775                         /*
 2776                          * DEV_BSIZE aligned new buffer size is less then the
 2777                          * DEV_BSIZE aligned existing buffer size.  Figure out
 2778                          * if we have to remove any pages.
 2779                          */
 2780                         if (desiredpages < bp->b_npages) {
 2781                                 vm_page_t m;
 2782 
 2783                                 vm_page_lock_queues();
 2784                                 for (i = desiredpages; i < bp->b_npages; i++) {
 2785                                         /*
 2786                                          * the page is not freed here -- it
 2787                                          * is the responsibility of 
 2788                                          * vnode_pager_setsize
 2789                                          */
 2790                                         m = bp->b_pages[i];
 2791                                         KASSERT(m != bogus_page,
 2792                                             ("allocbuf: bogus page found"));
 2793                                         while (vm_page_sleep_if_busy(m, TRUE, "biodep"))
 2794                                                 vm_page_lock_queues();
 2795 
 2796                                         bp->b_pages[i] = NULL;
 2797                                         vm_page_unwire(m, 0);
 2798                                 }
 2799                                 vm_page_unlock_queues();
 2800                                 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
 2801                                     (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
 2802                                 bp->b_npages = desiredpages;
 2803                         }
 2804                 } else if (size > bp->b_bcount) {
 2805                         /*
 2806                          * We are growing the buffer, possibly in a 
 2807                          * byte-granular fashion.
 2808                          */
 2809                         struct vnode *vp;
 2810                         vm_object_t obj;
 2811                         vm_offset_t toff;
 2812                         vm_offset_t tinc;
 2813 
 2814                         /*
 2815                          * Step 1, bring in the VM pages from the object, 
 2816                          * allocating them if necessary.  We must clear
 2817                          * B_CACHE if these pages are not valid for the 
 2818                          * range covered by the buffer.
 2819                          */
 2820 
 2821                         vp = bp->b_vp;
 2822                         obj = bp->b_object;
 2823 
 2824                         VM_OBJECT_LOCK(obj);
 2825                         while (bp->b_npages < desiredpages) {
 2826                                 vm_page_t m;
 2827                                 vm_pindex_t pi;
 2828 
 2829                                 pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
 2830                                 if ((m = vm_page_lookup(obj, pi)) == NULL) {
 2831                                         /*
 2832                                          * note: must allocate system pages
 2833                                          * since blocking here could intefere
 2834                                          * with paging I/O, no matter which
 2835                                          * process we are.
 2836                                          */
 2837                                         m = vm_page_alloc(obj, pi,
 2838                                             VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
 2839                                         if (m == NULL) {
 2840                                                 atomic_add_int(&vm_pageout_deficit,
 2841                                                     desiredpages - bp->b_npages);
 2842                                                 VM_OBJECT_UNLOCK(obj);
 2843                                                 VM_WAIT;
 2844                                                 VM_OBJECT_LOCK(obj);
 2845                                         } else {
 2846                                                 vm_page_lock_queues();
 2847                                                 vm_page_wakeup(m);
 2848                                                 vm_page_unlock_queues();
 2849                                                 bp->b_flags &= ~B_CACHE;
 2850                                                 bp->b_pages[bp->b_npages] = m;
 2851                                                 ++bp->b_npages;
 2852                                         }
 2853                                         continue;
 2854                                 }
 2855 
 2856                                 /*
 2857                                  * We found a page.  If we have to sleep on it,
 2858                                  * retry because it might have gotten freed out
 2859                                  * from under us.
 2860                                  *
 2861                                  * We can only test PG_BUSY here.  Blocking on
 2862                                  * m->busy might lead to a deadlock:
 2863                                  *
 2864                                  *  vm_fault->getpages->cluster_read->allocbuf
 2865                                  *
 2866                                  */
 2867                                 vm_page_lock_queues();
 2868                                 if (vm_page_sleep_if_busy(m, FALSE, "pgtblk"))
 2869                                         continue;
 2870 
 2871                                 /*
 2872                                  * We have a good page.  Should we wakeup the
 2873                                  * page daemon?
 2874                                  */
 2875                                 if ((curproc != pageproc) &&
 2876                                     ((m->queue - m->pc) == PQ_CACHE) &&
 2877                                     ((cnt.v_free_count + cnt.v_cache_count) <
 2878                                         (cnt.v_free_min + cnt.v_cache_min))) {
 2879                                         pagedaemon_wakeup();
 2880                                 }
 2881                                 vm_page_flag_clear(m, PG_ZERO);
 2882                                 vm_page_wire(m);
 2883                                 vm_page_unlock_queues();
 2884                                 bp->b_pages[bp->b_npages] = m;
 2885                                 ++bp->b_npages;
 2886                         }
 2887                         VM_OBJECT_UNLOCK(obj);
 2888 
 2889                         /*
 2890                          * Step 2.  We've loaded the pages into the buffer,
 2891                          * we have to figure out if we can still have B_CACHE
 2892                          * set.  Note that B_CACHE is set according to the
 2893                          * byte-granular range ( bcount and size ), new the
 2894                          * aligned range ( newbsize ).
 2895                          *
 2896                          * The VM test is against m->valid, which is DEV_BSIZE
 2897                          * aligned.  Needless to say, the validity of the data
 2898                          * needs to also be DEV_BSIZE aligned.  Note that this
 2899                          * fails with NFS if the server or some other client
 2900                          * extends the file's EOF.  If our buffer is resized, 
 2901                          * B_CACHE may remain set! XXX
 2902                          */
 2903 
 2904                         toff = bp->b_bcount;
 2905                         tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
 2906 
 2907                         while ((bp->b_flags & B_CACHE) && toff < size) {
 2908                                 vm_pindex_t pi;
 2909 
 2910                                 if (tinc > (size - toff))
 2911                                         tinc = size - toff;
 2912 
 2913                                 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 
 2914                                     PAGE_SHIFT;
 2915 
 2916                                 vfs_buf_test_cache(
 2917                                     bp, 
 2918                                     bp->b_offset,
 2919                                     toff, 
 2920                                     tinc, 
 2921                                     bp->b_pages[pi]
 2922                                 );
 2923                                 toff += tinc;
 2924                                 tinc = PAGE_SIZE;
 2925                         }
 2926 
 2927                         /*
 2928                          * Step 3, fixup the KVM pmap.  Remember that
 2929                          * bp->b_data is relative to bp->b_offset, but 
 2930                          * bp->b_offset may be offset into the first page.
 2931                          */
 2932 
 2933                         bp->b_data = (caddr_t)
 2934                             trunc_page((vm_offset_t)bp->b_data);
 2935                         pmap_qenter(
 2936                             (vm_offset_t)bp->b_data,
 2937                             bp->b_pages, 
 2938                             bp->b_npages
 2939                         );
 2940                         
 2941                         bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 
 2942                             (vm_offset_t)(bp->b_offset & PAGE_MASK));
 2943                 }
 2944         }
 2945         if (newbsize < bp->b_bufsize)
 2946                 bufspacewakeup();
 2947         bp->b_bufsize = newbsize;       /* actual buffer allocation     */
 2948         bp->b_bcount = size;            /* requested buffer size        */
 2949         return 1;
 2950 }
 2951 
 2952 void
 2953 biodone(struct bio *bp)
 2954 {
 2955         mtx_lock(&bdonelock);
 2956         bp->bio_flags |= BIO_DONE;
 2957         if (bp->bio_done == NULL)
 2958                 wakeup(bp);
 2959         mtx_unlock(&bdonelock);
 2960         if (bp->bio_done != NULL)
 2961                 bp->bio_done(bp);
 2962 }
 2963 
 2964 /*
 2965  * Wait for a BIO to finish.
 2966  *
 2967  * XXX: resort to a timeout for now.  The optimal locking (if any) for this
 2968  * case is not yet clear.
 2969  */
 2970 int
 2971 biowait(struct bio *bp, const char *wchan)
 2972 {
 2973 
 2974         mtx_lock(&bdonelock);
 2975         while ((bp->bio_flags & BIO_DONE) == 0)
 2976                 msleep(bp, &bdonelock, PRIBIO, wchan, hz / 10);
 2977         mtx_unlock(&bdonelock);
 2978         if (bp->bio_error != 0)
 2979                 return (bp->bio_error);
 2980         if (!(bp->bio_flags & BIO_ERROR))
 2981                 return (0);
 2982         return (EIO);
 2983 }
 2984 
 2985 void
 2986 biofinish(struct bio *bp, struct devstat *stat, int error)
 2987 {
 2988         
 2989         if (error) {
 2990                 bp->bio_error = error;
 2991                 bp->bio_flags |= BIO_ERROR;
 2992         }
 2993         if (stat != NULL)
 2994                 devstat_end_transaction_bio(stat, bp);
 2995         biodone(bp);
 2996 }
 2997 
 2998 /*
 2999  *      bufwait:
 3000  *
 3001  *      Wait for buffer I/O completion, returning error status.  The buffer
 3002  *      is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
 3003  *      error and cleared.
 3004  */
 3005 int
 3006 bufwait(register struct buf * bp)
 3007 {
 3008         int s;
 3009 
 3010         s = splbio();
 3011         if (bp->b_iocmd == BIO_READ)
 3012                 bwait(bp, PRIBIO, "biord");
 3013         else
 3014                 bwait(bp, PRIBIO, "biowr");
 3015         splx(s);
 3016         if (bp->b_flags & B_EINTR) {
 3017                 bp->b_flags &= ~B_EINTR;
 3018                 return (EINTR);
 3019         }
 3020         if (bp->b_ioflags & BIO_ERROR) {
 3021                 return (bp->b_error ? bp->b_error : EIO);
 3022         } else {
 3023                 return (0);
 3024         }
 3025 }
 3026 
 3027  /*
 3028   * Call back function from struct bio back up to struct buf.
 3029   * The corresponding initialization lives in sys/conf.h:DEV_STRATEGY().
 3030   */
 3031 void
 3032 bufdonebio(struct bio *bp)
 3033 {
 3034         bufdone(bp->bio_caller2);
 3035 }
 3036 
 3037 /*
 3038  *      bufdone:
 3039  *
 3040  *      Finish I/O on a buffer, optionally calling a completion function.
 3041  *      This is usually called from an interrupt so process blocking is
 3042  *      not allowed.
 3043  *
 3044  *      biodone is also responsible for setting B_CACHE in a B_VMIO bp.
 3045  *      In a non-VMIO bp, B_CACHE will be set on the next getblk() 
 3046  *      assuming B_INVAL is clear.
 3047  *
 3048  *      For the VMIO case, we set B_CACHE if the op was a read and no
 3049  *      read error occured, or if the op was a write.  B_CACHE is never
 3050  *      set if the buffer is invalid or otherwise uncacheable.
 3051  *
 3052  *      biodone does not mess with B_INVAL, allowing the I/O routine or the
 3053  *      initiator to leave B_INVAL set to brelse the buffer out of existance
 3054  *      in the biodone routine.
 3055  */
 3056 void
 3057 bufdone(struct buf *bp)
 3058 {
 3059         int s;
 3060         void    (*biodone)(struct buf *);
 3061 
 3062         GIANT_REQUIRED;
 3063 
 3064         s = splbio();
 3065 
 3066         KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNT(bp)));
 3067         KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
 3068 
 3069         bp->b_flags |= B_DONE;
 3070         runningbufwakeup(bp);
 3071 
 3072         if (bp->b_iocmd == BIO_DELETE) {
 3073                 brelse(bp);
 3074                 splx(s);
 3075                 return;
 3076         }
 3077 
 3078         if (bp->b_iocmd == BIO_WRITE) {
 3079                 vwakeup(bp);
 3080         }
 3081 
 3082         /* call optional completion function if requested */
 3083         if (bp->b_iodone != NULL) {
 3084                 biodone = bp->b_iodone;
 3085                 bp->b_iodone = NULL;
 3086                 (*biodone) (bp);
 3087                 splx(s);
 3088                 return;
 3089         }
 3090         if (LIST_FIRST(&bp->b_dep) != NULL)
 3091                 buf_complete(bp);
 3092 
 3093         if (bp->b_flags & B_VMIO) {
 3094                 int i;
 3095                 vm_ooffset_t foff;
 3096                 vm_page_t m;
 3097                 vm_object_t obj;
 3098                 int iosize;
 3099                 struct vnode *vp = bp->b_vp;
 3100 
 3101                 obj = bp->b_object;
 3102 
 3103 #if defined(VFS_BIO_DEBUG)
 3104                 mp_fixme("usecount and vflag accessed without locks.");
 3105                 if (vp->v_usecount == 0) {
 3106                         panic("biodone: zero vnode ref count");
 3107                 }
 3108 
 3109                 if ((vp->v_vflag & VV_OBJBUF) == 0) {
 3110                         panic("biodone: vnode is not setup for merged cache");
 3111                 }
 3112 #endif
 3113 
 3114                 foff = bp->b_offset;
 3115                 KASSERT(bp->b_offset != NOOFFSET,
 3116                     ("biodone: no buffer offset"));
 3117 
 3118                 if (obj != NULL)
 3119                         VM_OBJECT_LOCK(obj);
 3120 #if defined(VFS_BIO_DEBUG)
 3121                 if (obj->paging_in_progress < bp->b_npages) {
 3122                         printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
 3123                             obj->paging_in_progress, bp->b_npages);
 3124                 }
 3125 #endif
 3126 
 3127                 /*
 3128                  * Set B_CACHE if the op was a normal read and no error
 3129                  * occured.  B_CACHE is set for writes in the b*write()
 3130                  * routines.
 3131                  */
 3132                 iosize = bp->b_bcount - bp->b_resid;
 3133                 if (bp->b_iocmd == BIO_READ &&
 3134                     !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
 3135                     !(bp->b_ioflags & BIO_ERROR)) {
 3136                         bp->b_flags |= B_CACHE;
 3137                 }
 3138                 vm_page_lock_queues();
 3139                 for (i = 0; i < bp->b_npages; i++) {
 3140                         int bogusflag = 0;
 3141                         int resid;
 3142 
 3143                         resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
 3144                         if (resid > iosize)
 3145                                 resid = iosize;
 3146 
 3147                         /*
 3148                          * cleanup bogus pages, restoring the originals
 3149                          */
 3150                         m = bp->b_pages[i];
 3151                         if (m == bogus_page) {
 3152                                 bogusflag = 1;
 3153                                 m = vm_page_lookup(obj, OFF_TO_IDX(foff));
 3154                                 if (m == NULL)
 3155                                         panic("biodone: page disappeared!");
 3156                                 bp->b_pages[i] = m;
 3157                                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 3158                         }
 3159 #if defined(VFS_BIO_DEBUG)
 3160                         if (OFF_TO_IDX(foff) != m->pindex) {
 3161                                 printf(
 3162 "biodone: foff(%jd)/m->pindex(%ju) mismatch\n",
 3163                                     (intmax_t)foff, (uintmax_t)m->pindex);
 3164                         }
 3165 #endif
 3166 
 3167                         /*
 3168                          * In the write case, the valid and clean bits are
 3169                          * already changed correctly ( see bdwrite() ), so we 
 3170                          * only need to do this here in the read case.
 3171                          */
 3172                         if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
 3173                                 vfs_page_set_valid(bp, foff, i, m);
 3174                         }
 3175                         vm_page_flag_clear(m, PG_ZERO);
 3176 
 3177                         /*
 3178                          * when debugging new filesystems or buffer I/O methods, this
 3179                          * is the most common error that pops up.  if you see this, you
 3180                          * have not set the page busy flag correctly!!!
 3181                          */
 3182                         if (m->busy == 0) {
 3183                                 printf("biodone: page busy < 0, "
 3184                                     "pindex: %d, foff: 0x(%x,%x), "
 3185                                     "resid: %d, index: %d\n",
 3186                                     (int) m->pindex, (int)(foff >> 32),
 3187                                                 (int) foff & 0xffffffff, resid, i);
 3188                                 if (!vn_isdisk(vp, NULL))
 3189                                         printf(" iosize: %ld, lblkno: %jd, flags: 0x%x, npages: %d\n",
 3190                                             bp->b_vp->v_mount->mnt_stat.f_iosize,
 3191                                             (intmax_t) bp->b_lblkno,
 3192                                             bp->b_flags, bp->b_npages);
 3193                                 else
 3194                                         printf(" VDEV, lblkno: %jd, flags: 0x%x, npages: %d\n",
 3195                                             (intmax_t) bp->b_lblkno,
 3196                                             bp->b_flags, bp->b_npages);
 3197                                 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
 3198                                     m->valid, m->dirty, m->wire_count);
 3199                                 panic("biodone: page busy < 0\n");
 3200                         }
 3201                         vm_page_io_finish(m);
 3202                         vm_object_pip_subtract(obj, 1);
 3203                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3204                         iosize -= resid;
 3205                 }
 3206                 vm_page_unlock_queues();
 3207                 if (obj != NULL) {
 3208                         vm_object_pip_wakeupn(obj, 0);
 3209                         VM_OBJECT_UNLOCK(obj);
 3210                 }
 3211         }
 3212 
 3213         /*
 3214          * For asynchronous completions, release the buffer now. The brelse
 3215          * will do a wakeup there if necessary - so no need to do a wakeup
 3216          * here in the async case. The sync case always needs to do a wakeup.
 3217          */
 3218 
 3219         if (bp->b_flags & B_ASYNC) {
 3220                 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
 3221                         brelse(bp);
 3222                 else
 3223                         bqrelse(bp);
 3224         } else {
 3225                 bdone(bp);
 3226         }
 3227         splx(s);
 3228 }
 3229 
 3230 /*
 3231  * This routine is called in lieu of iodone in the case of
 3232  * incomplete I/O.  This keeps the busy status for pages
 3233  * consistant.
 3234  */
 3235 void
 3236 vfs_unbusy_pages(struct buf * bp)
 3237 {
 3238         int i;
 3239 
 3240         GIANT_REQUIRED;
 3241 
 3242         runningbufwakeup(bp);
 3243         if (bp->b_flags & B_VMIO) {
 3244                 vm_object_t obj;
 3245 
 3246                 obj = bp->b_object;
 3247                 VM_OBJECT_LOCK(obj);
 3248                 vm_page_lock_queues();
 3249                 for (i = 0; i < bp->b_npages; i++) {
 3250                         vm_page_t m = bp->b_pages[i];
 3251 
 3252                         if (m == bogus_page) {
 3253                                 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
 3254                                 if (!m) {
 3255                                         panic("vfs_unbusy_pages: page missing\n");
 3256                                 }
 3257                                 bp->b_pages[i] = m;
 3258                                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 3259                         }
 3260                         vm_object_pip_subtract(obj, 1);
 3261                         vm_page_flag_clear(m, PG_ZERO);
 3262                         vm_page_io_finish(m);
 3263                 }
 3264                 vm_page_unlock_queues();
 3265                 vm_object_pip_wakeupn(obj, 0);
 3266                 VM_OBJECT_UNLOCK(obj);
 3267         }
 3268 }
 3269 
 3270 /*
 3271  * vfs_page_set_valid:
 3272  *
 3273  *      Set the valid bits in a page based on the supplied offset.   The
 3274  *      range is restricted to the buffer's size.
 3275  *
 3276  *      This routine is typically called after a read completes.
 3277  */
 3278 static void
 3279 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
 3280 {
 3281         vm_ooffset_t soff, eoff;
 3282 
 3283         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3284         /*
 3285          * Start and end offsets in buffer.  eoff - soff may not cross a
 3286          * page boundry or cross the end of the buffer.  The end of the
 3287          * buffer, in this case, is our file EOF, not the allocation size
 3288          * of the buffer.
 3289          */
 3290         soff = off;
 3291         eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3292         if (eoff > bp->b_offset + bp->b_bcount)
 3293                 eoff = bp->b_offset + bp->b_bcount;
 3294 
 3295         /*
 3296          * Set valid range.  This is typically the entire buffer and thus the
 3297          * entire page.
 3298          */
 3299         if (eoff > soff) {
 3300                 vm_page_set_validclean(
 3301                     m,
 3302                    (vm_offset_t) (soff & PAGE_MASK),
 3303                    (vm_offset_t) (eoff - soff)
 3304                 );
 3305         }
 3306 }
 3307 
 3308 /*
 3309  * This routine is called before a device strategy routine.
 3310  * It is used to tell the VM system that paging I/O is in
 3311  * progress, and treat the pages associated with the buffer
 3312  * almost as being PG_BUSY.  Also the object paging_in_progress
 3313  * flag is handled to make sure that the object doesn't become
 3314  * inconsistant.
 3315  *
 3316  * Since I/O has not been initiated yet, certain buffer flags
 3317  * such as BIO_ERROR or B_INVAL may be in an inconsistant state
 3318  * and should be ignored.
 3319  */
 3320 void
 3321 vfs_busy_pages(struct buf * bp, int clear_modify)
 3322 {
 3323         int i, bogus;
 3324 
 3325         if (bp->b_flags & B_VMIO) {
 3326                 vm_object_t obj;
 3327                 vm_ooffset_t foff;
 3328 
 3329                 obj = bp->b_object;
 3330                 foff = bp->b_offset;
 3331                 KASSERT(bp->b_offset != NOOFFSET,
 3332                     ("vfs_busy_pages: no buffer offset"));
 3333                 vfs_setdirty(bp);
 3334                 if (obj != NULL)
 3335                         VM_OBJECT_LOCK(obj);
 3336 retry:
 3337                 vm_page_lock_queues();
 3338                 for (i = 0; i < bp->b_npages; i++) {
 3339                         vm_page_t m = bp->b_pages[i];
 3340 
 3341                         if (vm_page_sleep_if_busy(m, FALSE, "vbpage"))
 3342                                 goto retry;
 3343                 }
 3344                 bogus = 0;
 3345                 for (i = 0; i < bp->b_npages; i++) {
 3346                         vm_page_t m = bp->b_pages[i];
 3347 
 3348                         vm_page_flag_clear(m, PG_ZERO);
 3349                         if ((bp->b_flags & B_CLUSTER) == 0) {
 3350                                 vm_object_pip_add(obj, 1);
 3351                                 vm_page_io_start(m);
 3352                         }
 3353                         /*
 3354                          * When readying a buffer for a read ( i.e
 3355                          * clear_modify == 0 ), it is important to do
 3356                          * bogus_page replacement for valid pages in 
 3357                          * partially instantiated buffers.  Partially 
 3358                          * instantiated buffers can, in turn, occur when
 3359                          * reconstituting a buffer from its VM backing store
 3360                          * base.  We only have to do this if B_CACHE is
 3361                          * clear ( which causes the I/O to occur in the
 3362                          * first place ).  The replacement prevents the read
 3363                          * I/O from overwriting potentially dirty VM-backed
 3364                          * pages.  XXX bogus page replacement is, uh, bogus.
 3365                          * It may not work properly with small-block devices.
 3366                          * We need to find a better way.
 3367                          */
 3368                         pmap_remove_all(m);
 3369                         if (clear_modify)
 3370                                 vfs_page_set_valid(bp, foff, i, m);
 3371                         else if (m->valid == VM_PAGE_BITS_ALL &&
 3372                                 (bp->b_flags & B_CACHE) == 0) {
 3373                                 bp->b_pages[i] = bogus_page;
 3374                                 bogus++;
 3375                         }
 3376                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3377                 }
 3378                 vm_page_unlock_queues();
 3379                 if (obj != NULL)
 3380                         VM_OBJECT_UNLOCK(obj);
 3381                 if (bogus)
 3382                         pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 3383         }
 3384 }
 3385 
 3386 /*
 3387  * Tell the VM system that the pages associated with this buffer
 3388  * are clean.  This is used for delayed writes where the data is
 3389  * going to go to disk eventually without additional VM intevention.
 3390  *
 3391  * Note that while we only really need to clean through to b_bcount, we
 3392  * just go ahead and clean through to b_bufsize.
 3393  */
 3394 static void
 3395 vfs_clean_pages(struct buf * bp)
 3396 {
 3397         int i;
 3398 
 3399         GIANT_REQUIRED;
 3400 
 3401         if (bp->b_flags & B_VMIO) {
 3402                 vm_ooffset_t foff;
 3403 
 3404                 foff = bp->b_offset;
 3405                 KASSERT(bp->b_offset != NOOFFSET,
 3406                     ("vfs_clean_pages: no buffer offset"));
 3407                 vm_page_lock_queues();
 3408                 for (i = 0; i < bp->b_npages; i++) {
 3409                         vm_page_t m = bp->b_pages[i];
 3410                         vm_ooffset_t noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3411                         vm_ooffset_t eoff = noff;
 3412 
 3413                         if (eoff > bp->b_offset + bp->b_bufsize)
 3414                                 eoff = bp->b_offset + bp->b_bufsize;
 3415                         vfs_page_set_valid(bp, foff, i, m);
 3416                         /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
 3417                         foff = noff;
 3418                 }
 3419                 vm_page_unlock_queues();
 3420         }
 3421 }
 3422 
 3423 /*
 3424  *      vfs_bio_set_validclean:
 3425  *
 3426  *      Set the range within the buffer to valid and clean.  The range is 
 3427  *      relative to the beginning of the buffer, b_offset.  Note that b_offset
 3428  *      itself may be offset from the beginning of the first page.
 3429  *
 3430  */
 3431 
 3432 void   
 3433 vfs_bio_set_validclean(struct buf *bp, int base, int size)
 3434 {
 3435         if (bp->b_flags & B_VMIO) {
 3436                 int i;
 3437                 int n;
 3438 
 3439                 /*
 3440                  * Fixup base to be relative to beginning of first page.
 3441                  * Set initial n to be the maximum number of bytes in the
 3442                  * first page that can be validated.
 3443                  */
 3444 
 3445                 base += (bp->b_offset & PAGE_MASK);
 3446                 n = PAGE_SIZE - (base & PAGE_MASK);
 3447 
 3448                 vm_page_lock_queues();
 3449                 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
 3450                         vm_page_t m = bp->b_pages[i];
 3451 
 3452                         if (n > size)
 3453                                 n = size;
 3454 
 3455                         vm_page_set_validclean(m, base & PAGE_MASK, n);
 3456                         base += n;
 3457                         size -= n;
 3458                         n = PAGE_SIZE;
 3459                 }
 3460                 vm_page_unlock_queues();
 3461         }
 3462 }
 3463 
 3464 /*
 3465  *      vfs_bio_clrbuf:
 3466  *
 3467  *      clear a buffer.  This routine essentially fakes an I/O, so we need
 3468  *      to clear BIO_ERROR and B_INVAL.
 3469  *
 3470  *      Note that while we only theoretically need to clear through b_bcount,
 3471  *      we go ahead and clear through b_bufsize.
 3472  */
 3473 
 3474 void
 3475 vfs_bio_clrbuf(struct buf *bp) 
 3476 {
 3477         int i, mask = 0;
 3478         caddr_t sa, ea;
 3479 
 3480         GIANT_REQUIRED;
 3481 
 3482         if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
 3483                 bp->b_flags &= ~B_INVAL;
 3484                 bp->b_ioflags &= ~BIO_ERROR;
 3485                 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
 3486                     (bp->b_offset & PAGE_MASK) == 0) {
 3487                         mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
 3488                         if ((bp->b_pages[0]->valid & mask) == mask) {
 3489                                 bp->b_resid = 0;
 3490                                 return;
 3491                         }
 3492                         if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
 3493                             ((bp->b_pages[0]->valid & mask) == 0)) {
 3494                                 bzero(bp->b_data, bp->b_bufsize);
 3495                                 bp->b_pages[0]->valid |= mask;
 3496                                 bp->b_resid = 0;
 3497                                 return;
 3498                         }
 3499                 }
 3500                 ea = sa = bp->b_data;
 3501                 for(i=0;i<bp->b_npages;i++,sa=ea) {
 3502                         int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
 3503                         ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
 3504                         ea = (caddr_t)(vm_offset_t)ulmin(
 3505                             (u_long)(vm_offset_t)ea,
 3506                             (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
 3507                         mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
 3508                         if ((bp->b_pages[i]->valid & mask) == mask)
 3509                                 continue;
 3510                         if ((bp->b_pages[i]->valid & mask) == 0) {
 3511                                 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
 3512                                         bzero(sa, ea - sa);
 3513                                 }
 3514                         } else {
 3515                                 for (; sa < ea; sa += DEV_BSIZE, j++) {
 3516                                         if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
 3517                                                 (bp->b_pages[i]->valid & (1<<j)) == 0)
 3518                                                 bzero(sa, DEV_BSIZE);
 3519                                 }
 3520                         }
 3521                         bp->b_pages[i]->valid |= mask;
 3522                         vm_page_lock_queues();
 3523                         vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
 3524                         vm_page_unlock_queues();
 3525                 }
 3526                 bp->b_resid = 0;
 3527         } else {
 3528                 clrbuf(bp);
 3529         }
 3530 }
 3531 
 3532 /*
 3533  * vm_hold_load_pages and vm_hold_free_pages get pages into
 3534  * a buffers address space.  The pages are anonymous and are
 3535  * not associated with a file object.
 3536  */
 3537 static void
 3538 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
 3539 {
 3540         vm_offset_t pg;
 3541         vm_page_t p;
 3542         int index;
 3543 
 3544         GIANT_REQUIRED;
 3545 
 3546         to = round_page(to);
 3547         from = round_page(from);
 3548         index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 3549 
 3550         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 3551 tryagain:
 3552                 /*
 3553                  * note: must allocate system pages since blocking here
 3554                  * could intefere with paging I/O, no matter which
 3555                  * process we are.
 3556                  */
 3557                 VM_OBJECT_LOCK(kernel_object);
 3558                 p = vm_page_alloc(kernel_object,
 3559                         ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
 3560                     VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
 3561                 VM_OBJECT_UNLOCK(kernel_object);
 3562                 if (!p) {
 3563                         atomic_add_int(&vm_pageout_deficit,
 3564                             (to - pg) >> PAGE_SHIFT);
 3565                         VM_WAIT;
 3566                         goto tryagain;
 3567                 }
 3568                 vm_page_lock_queues();
 3569                 p->valid = VM_PAGE_BITS_ALL;
 3570                 vm_page_unlock_queues();
 3571                 pmap_qenter(pg, &p, 1);
 3572                 bp->b_pages[index] = p;
 3573                 vm_page_lock_queues();
 3574                 vm_page_wakeup(p);
 3575                 vm_page_unlock_queues();
 3576         }
 3577         bp->b_npages = index;
 3578 }
 3579 
 3580 /* Return pages associated with this buf to the vm system */
 3581 static void
 3582 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
 3583 {
 3584         vm_offset_t pg;
 3585         vm_page_t p;
 3586         int index, newnpages;
 3587 
 3588         GIANT_REQUIRED;
 3589 
 3590         from = round_page(from);
 3591         to = round_page(to);
 3592         newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 3593 
 3594         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 3595                 p = bp->b_pages[index];
 3596                 if (p && (index < bp->b_npages)) {
 3597                         if (p->busy) {
 3598                                 printf(
 3599                             "vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
 3600                                     (intmax_t)bp->b_blkno,
 3601                                     (intmax_t)bp->b_lblkno);
 3602                         }
 3603                         bp->b_pages[index] = NULL;
 3604                         pmap_qremove(pg, 1);
 3605                         vm_page_lock_queues();
 3606                         vm_page_busy(p);
 3607                         vm_page_unwire(p, 0);
 3608                         vm_page_free(p);
 3609                         vm_page_unlock_queues();
 3610                 }
 3611         }
 3612         bp->b_npages = newnpages;
 3613 }
 3614 
 3615 /*
 3616  * Map an IO request into kernel virtual address space.
 3617  *
 3618  * All requests are (re)mapped into kernel VA space.
 3619  * Notice that we use b_bufsize for the size of the buffer
 3620  * to be mapped.  b_bcount might be modified by the driver.
 3621  *
 3622  * Note that even if the caller determines that the address space should
 3623  * be valid, a race or a smaller-file mapped into a larger space may
 3624  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
 3625  * check the return value.
 3626  */
 3627 int
 3628 vmapbuf(struct buf *bp)
 3629 {
 3630         caddr_t addr, kva;
 3631         vm_paddr_t pa;
 3632         int pidx, i;
 3633         struct vm_page *m;
 3634         struct pmap *pmap = &curproc->p_vmspace->vm_pmap;
 3635 
 3636         GIANT_REQUIRED;
 3637 
 3638         if ((bp->b_flags & B_PHYS) == 0)
 3639                 panic("vmapbuf");
 3640         if (bp->b_bufsize < 0)
 3641                 return (-1);
 3642         for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
 3643              addr < bp->b_data + bp->b_bufsize;
 3644              addr += PAGE_SIZE, pidx++) {
 3645                 /*
 3646                  * Do the vm_fault if needed; do the copy-on-write thing
 3647                  * when reading stuff off device into memory.
 3648                  *
 3649                  * NOTE! Must use pmap_extract() because addr may be in
 3650                  * the userland address space, and kextract is only guarenteed
 3651                  * to work for the kernland address space (see: sparc64 port).
 3652                  */
 3653 retry:
 3654                 i = vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
 3655                         (bp->b_iocmd == BIO_READ) ?
 3656                         (VM_PROT_READ|VM_PROT_WRITE) : VM_PROT_READ);
 3657                 if (i < 0) {
 3658                         vm_page_lock_queues();
 3659                         for (i = 0; i < pidx; ++i) {
 3660                                 vm_page_unhold(bp->b_pages[i]);
 3661                                 bp->b_pages[i] = NULL;
 3662                         }
 3663                         vm_page_unlock_queues();
 3664                         return(-1);
 3665                 }
 3666                 pa = pmap_extract(pmap, (vm_offset_t)addr);
 3667                 if (pa == 0) {
 3668                         printf("vmapbuf: warning, race against user address during I/O");
 3669                         goto retry;
 3670                 }
 3671                 m = PHYS_TO_VM_PAGE(pa);
 3672                 vm_page_lock_queues();
 3673                 vm_page_hold(m);
 3674                 vm_page_unlock_queues();
 3675                 bp->b_pages[pidx] = m;
 3676         }
 3677         if (pidx > btoc(MAXPHYS))
 3678                 panic("vmapbuf: mapped more than MAXPHYS");
 3679         pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
 3680         
 3681         kva = bp->b_saveaddr;
 3682         bp->b_npages = pidx;
 3683         bp->b_saveaddr = bp->b_data;
 3684         bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
 3685         return(0);
 3686 }
 3687 
 3688 /*
 3689  * Free the io map PTEs associated with this IO operation.
 3690  * We also invalidate the TLB entries and restore the original b_addr.
 3691  */
 3692 void
 3693 vunmapbuf(struct buf *bp)
 3694 {
 3695         int pidx;
 3696         int npages;
 3697 
 3698         GIANT_REQUIRED;
 3699 
 3700         if ((bp->b_flags & B_PHYS) == 0)
 3701                 panic("vunmapbuf");
 3702 
 3703         npages = bp->b_npages;
 3704         pmap_qremove(trunc_page((vm_offset_t)bp->b_data),
 3705                      npages);
 3706         vm_page_lock_queues();
 3707         for (pidx = 0; pidx < npages; pidx++)
 3708                 vm_page_unhold(bp->b_pages[pidx]);
 3709         vm_page_unlock_queues();
 3710 
 3711         bp->b_data = bp->b_saveaddr;
 3712 }
 3713 
 3714 void
 3715 bdone(struct buf *bp)
 3716 {
 3717         mtx_lock(&bdonelock);
 3718         bp->b_flags |= B_DONE;
 3719         wakeup(bp);
 3720         mtx_unlock(&bdonelock);
 3721 }
 3722 
 3723 void
 3724 bwait(struct buf *bp, u_char pri, const char *wchan)
 3725 {
 3726         mtx_lock(&bdonelock);
 3727         while ((bp->b_flags & B_DONE) == 0)
 3728                 msleep(bp, &bdonelock, pri, wchan, 0);
 3729         mtx_unlock(&bdonelock);
 3730 }
 3731 
 3732 #include "opt_ddb.h"
 3733 #ifdef DDB
 3734 #include <ddb/ddb.h>
 3735 
 3736 /* DDB command to show buffer data */
 3737 DB_SHOW_COMMAND(buffer, db_show_buffer)
 3738 {
 3739         /* get args */
 3740         struct buf *bp = (struct buf *)addr;
 3741 
 3742         if (!have_addr) {
 3743                 db_printf("usage: show buffer <addr>\n");
 3744                 return;
 3745         }
 3746 
 3747         db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
 3748         db_printf(
 3749             "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
 3750             "b_dev = (%d,%d), b_data = %p, b_blkno = %jd, b_pblkno = %jd\n",
 3751             bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
 3752             major(bp->b_dev), minor(bp->b_dev), bp->b_data,
 3753             (intmax_t)bp->b_blkno, (intmax_t)bp->b_pblkno);
 3754         if (bp->b_npages) {
 3755                 int i;
 3756                 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
 3757                 for (i = 0; i < bp->b_npages; i++) {
 3758                         vm_page_t m;
 3759                         m = bp->b_pages[i];
 3760                         db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
 3761                             (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
 3762                         if ((i + 1) < bp->b_npages)
 3763                                 db_printf(",");
 3764                 }
 3765                 db_printf("\n");
 3766         }
 3767 }
 3768 #endif /* DDB */

Cache object: 47af01950a12bc5acf16b2d6257cdcc6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.