The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_bio.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1994,1997 John S. Dyson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice immediately at the beginning of the file, without modification,
   10  *    this list of conditions, and the following disclaimer.
   11  * 2. Absolutely no warranty of function or purpose is made by the author
   12  *              John S. Dyson.
   13  */
   14 
   15 /*
   16  * this file contains a new buffer I/O scheme implementing a coherent
   17  * VM object and buffer cache scheme.  Pains have been taken to make
   18  * sure that the performance degradation associated with schemes such
   19  * as this is not realized.
   20  *
   21  * Author:  John S. Dyson
   22  * Significant help during the development and debugging phases
   23  * had been provided by David Greenman, also of the FreeBSD core team.
   24  *
   25  * see man buf(9) for more info.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/5.2/sys/kern/vfs_bio.c 122747 2003-11-15 09:28:09Z phk $");
   30 
   31 #include <sys/param.h>
   32 #include <sys/systm.h>
   33 #include <sys/bio.h>
   34 #include <sys/conf.h>
   35 #include <sys/buf.h>
   36 #include <sys/devicestat.h>
   37 #include <sys/eventhandler.h>
   38 #include <sys/lock.h>
   39 #include <sys/malloc.h>
   40 #include <sys/mount.h>
   41 #include <sys/mutex.h>
   42 #include <sys/kernel.h>
   43 #include <sys/kthread.h>
   44 #include <sys/proc.h>
   45 #include <sys/resourcevar.h>
   46 #include <sys/sysctl.h>
   47 #include <sys/vmmeter.h>
   48 #include <sys/vnode.h>
   49 #include <vm/vm.h>
   50 #include <vm/vm_param.h>
   51 #include <vm/vm_kern.h>
   52 #include <vm/vm_pageout.h>
   53 #include <vm/vm_page.h>
   54 #include <vm/vm_object.h>
   55 #include <vm/vm_extern.h>
   56 #include <vm/vm_map.h>
   57 #include "opt_directio.h"
   58 #include "opt_swap.h"
   59 
   60 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
   61 
   62 struct  bio_ops bioops;         /* I/O operation notification */
   63 
   64 struct  buf_ops buf_ops_bio = {
   65         "buf_ops_bio",
   66         bwrite
   67 };
   68 
   69 /*
   70  * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
   71  * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
   72  */
   73 struct buf *buf;                /* buffer header pool */
   74 
   75 static struct proc *bufdaemonproc;
   76 
   77 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
   78                 vm_offset_t to);
   79 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
   80                 vm_offset_t to);
   81 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
   82                                int pageno, vm_page_t m);
   83 static void vfs_clean_pages(struct buf * bp);
   84 static void vfs_setdirty(struct buf *bp);
   85 static void vfs_vmio_release(struct buf *bp);
   86 static void vfs_backgroundwritedone(struct buf *bp);
   87 static int vfs_bio_clcheck(struct vnode *vp, int size,
   88                 daddr_t lblkno, daddr_t blkno);
   89 static int flushbufqueues(int flushdeps);
   90 static void buf_daemon(void);
   91 void bremfreel(struct buf * bp);
   92 
   93 int vmiodirenable = TRUE;
   94 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
   95     "Use the VM system for directory writes");
   96 int runningbufspace;
   97 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
   98     "Amount of presently outstanding async buffer io");
   99 static int bufspace;
  100 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
  101     "KVA memory used for bufs");
  102 static int maxbufspace;
  103 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
  104     "Maximum allowed value of bufspace (including buf_daemon)");
  105 static int bufmallocspace;
  106 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
  107     "Amount of malloced memory for buffers");
  108 static int maxbufmallocspace;
  109 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
  110     "Maximum amount of malloced memory for buffers");
  111 static int lobufspace;
  112 SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
  113     "Minimum amount of buffers we want to have");
  114 static int hibufspace;
  115 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
  116     "Maximum allowed value of bufspace (excluding buf_daemon)");
  117 static int bufreusecnt;
  118 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
  119     "Number of times we have reused a buffer");
  120 static int buffreekvacnt;
  121 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
  122     "Number of times we have freed the KVA space from some buffer");
  123 static int bufdefragcnt;
  124 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
  125     "Number of times we have had to repeat buffer allocation to defragment");
  126 static int lorunningspace;
  127 SYSCTL_INT(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
  128     "Minimum preferred space used for in-progress I/O");
  129 static int hirunningspace;
  130 SYSCTL_INT(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
  131     "Maximum amount of space to use for in-progress I/O");
  132 static int dirtybufferflushes;
  133 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
  134     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
  135 static int altbufferflushes;
  136 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
  137     0, "Number of fsync flushes to limit dirty buffers");
  138 static int recursiveflushes;
  139 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
  140     0, "Number of flushes skipped due to being recursive");
  141 static int numdirtybuffers;
  142 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
  143     "Number of buffers that are dirty (has unwritten changes) at the moment");
  144 static int lodirtybuffers;
  145 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
  146     "How many buffers we want to have free before bufdaemon can sleep");
  147 static int hidirtybuffers;
  148 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
  149     "When the number of dirty buffers is considered severe");
  150 static int dirtybufthresh;
  151 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
  152     0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
  153 static int numfreebuffers;
  154 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
  155     "Number of free buffers");
  156 static int lofreebuffers;
  157 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
  158    "XXX Unused");
  159 static int hifreebuffers;
  160 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
  161    "XXX Complicatedly unused");
  162 static int getnewbufcalls;
  163 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
  164    "Number of calls to getnewbuf");
  165 static int getnewbufrestarts;
  166 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
  167     "Number of times getnewbuf has had to restart a buffer aquisition");
  168 static int dobkgrdwrite = 1;
  169 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
  170     "Do background writes (honoring the BV_BKGRDWRITE flag)?");
  171 
  172 /*
  173  * Wakeup point for bufdaemon, as well as indicator of whether it is already
  174  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
  175  * is idling.
  176  */
  177 static int bd_request;
  178 
  179 /*
  180  * This lock synchronizes access to bd_request.
  181  */
  182 static struct mtx bdlock;
  183 
  184 /*
  185  * bogus page -- for I/O to/from partially complete buffers
  186  * this is a temporary solution to the problem, but it is not
  187  * really that bad.  it would be better to split the buffer
  188  * for input in the case of buffers partially already in memory,
  189  * but the code is intricate enough already.
  190  */
  191 vm_page_t bogus_page;
  192 
  193 /*
  194  * Synchronization (sleep/wakeup) variable for active buffer space requests.
  195  * Set when wait starts, cleared prior to wakeup().
  196  * Used in runningbufwakeup() and waitrunningbufspace().
  197  */
  198 static int runningbufreq;
  199 
  200 /*
  201  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
  202  * waitrunningbufspace().
  203  */
  204 static struct mtx rbreqlock;
  205 
  206 /* 
  207  * Synchronization (sleep/wakeup) variable for buffer requests.
  208  * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
  209  * by and/or.
  210  * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
  211  * getnewbuf(), and getblk().
  212  */
  213 static int needsbuffer;
  214 
  215 /*
  216  * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
  217  */
  218 static struct mtx nblock;
  219 
  220 /*
  221  * Lock that protects against bwait()/bdone()/B_DONE races.
  222  */
  223 
  224 static struct mtx bdonelock;
  225 
  226 /*
  227  * Definitions for the buffer free lists.
  228  */
  229 #define BUFFER_QUEUES   5       /* number of free buffer queues */
  230 
  231 #define QUEUE_NONE      0       /* on no queue */
  232 #define QUEUE_CLEAN     1       /* non-B_DELWRI buffers */
  233 #define QUEUE_DIRTY     2       /* B_DELWRI buffers */
  234 #define QUEUE_EMPTYKVA  3       /* empty buffer headers w/KVA assignment */
  235 #define QUEUE_EMPTY     4       /* empty buffer headers */
  236 
  237 /* Queues for free buffers with various properties */
  238 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
  239 
  240 /* Lock for the bufqueues */
  241 static struct mtx bqlock;
  242 
  243 /*
  244  * Single global constant for BUF_WMESG, to avoid getting multiple references.
  245  * buf_wmesg is referred from macros.
  246  */
  247 const char *buf_wmesg = BUF_WMESG;
  248 
  249 #define VFS_BIO_NEED_ANY        0x01    /* any freeable buffer */
  250 #define VFS_BIO_NEED_DIRTYFLUSH 0x02    /* waiting for dirty buffer flush */
  251 #define VFS_BIO_NEED_FREE       0x04    /* wait for free bufs, hi hysteresis */
  252 #define VFS_BIO_NEED_BUFSPACE   0x08    /* wait for buf space, lo hysteresis */
  253 
  254 #ifdef DIRECTIO
  255 extern void ffs_rawread_setup(void);
  256 #endif /* DIRECTIO */
  257 /*
  258  *      numdirtywakeup:
  259  *
  260  *      If someone is blocked due to there being too many dirty buffers,
  261  *      and numdirtybuffers is now reasonable, wake them up.
  262  */
  263 
  264 static __inline void
  265 numdirtywakeup(int level)
  266 {
  267         if (numdirtybuffers <= level) {
  268                 mtx_lock(&nblock);
  269                 if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
  270                         needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
  271                         wakeup(&needsbuffer);
  272                 }
  273                 mtx_unlock(&nblock);
  274         }
  275 }
  276 
  277 /*
  278  *      bufspacewakeup:
  279  *
  280  *      Called when buffer space is potentially available for recovery.
  281  *      getnewbuf() will block on this flag when it is unable to free 
  282  *      sufficient buffer space.  Buffer space becomes recoverable when 
  283  *      bp's get placed back in the queues.
  284  */
  285 
  286 static __inline void
  287 bufspacewakeup(void)
  288 {
  289         /*
  290          * If someone is waiting for BUF space, wake them up.  Even
  291          * though we haven't freed the kva space yet, the waiting
  292          * process will be able to now.
  293          */
  294         mtx_lock(&nblock);
  295         if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
  296                 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
  297                 wakeup(&needsbuffer);
  298         }
  299         mtx_unlock(&nblock);
  300 }
  301 
  302 /*
  303  * runningbufwakeup() - in-progress I/O accounting.
  304  *
  305  */
  306 static __inline void
  307 runningbufwakeup(struct buf *bp)
  308 {
  309         if (bp->b_runningbufspace) {
  310                 atomic_subtract_int(&runningbufspace, bp->b_runningbufspace);
  311                 bp->b_runningbufspace = 0;
  312                 mtx_lock(&rbreqlock);
  313                 if (runningbufreq && runningbufspace <= lorunningspace) {
  314                         runningbufreq = 0;
  315                         wakeup(&runningbufreq);
  316                 }
  317                 mtx_unlock(&rbreqlock);
  318         }
  319 }
  320 
  321 /*
  322  *      bufcountwakeup:
  323  *
  324  *      Called when a buffer has been added to one of the free queues to
  325  *      account for the buffer and to wakeup anyone waiting for free buffers.
  326  *      This typically occurs when large amounts of metadata are being handled
  327  *      by the buffer cache ( else buffer space runs out first, usually ).
  328  */
  329 
  330 static __inline void
  331 bufcountwakeup(void) 
  332 {
  333         atomic_add_int(&numfreebuffers, 1);
  334         mtx_lock(&nblock);
  335         if (needsbuffer) {
  336                 needsbuffer &= ~VFS_BIO_NEED_ANY;
  337                 if (numfreebuffers >= hifreebuffers)
  338                         needsbuffer &= ~VFS_BIO_NEED_FREE;
  339                 wakeup(&needsbuffer);
  340         }
  341         mtx_unlock(&nblock);
  342 }
  343 
  344 /*
  345  *      waitrunningbufspace()
  346  *
  347  *      runningbufspace is a measure of the amount of I/O currently
  348  *      running.  This routine is used in async-write situations to
  349  *      prevent creating huge backups of pending writes to a device.
  350  *      Only asynchronous writes are governed by this function.
  351  *
  352  *      Reads will adjust runningbufspace, but will not block based on it.
  353  *      The read load has a side effect of reducing the allowed write load.
  354  *
  355  *      This does NOT turn an async write into a sync write.  It waits  
  356  *      for earlier writes to complete and generally returns before the
  357  *      caller's write has reached the device.
  358  */
  359 static __inline void
  360 waitrunningbufspace(void)
  361 {
  362         mtx_lock(&rbreqlock);
  363         while (runningbufspace > hirunningspace) {
  364                 ++runningbufreq;
  365                 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
  366         }
  367         mtx_unlock(&rbreqlock);
  368 }
  369 
  370 
  371 /*
  372  *      vfs_buf_test_cache:
  373  *
  374  *      Called when a buffer is extended.  This function clears the B_CACHE
  375  *      bit if the newly extended portion of the buffer does not contain
  376  *      valid data.
  377  */
  378 static __inline__
  379 void
  380 vfs_buf_test_cache(struct buf *bp,
  381                   vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
  382                   vm_page_t m)
  383 {
  384         GIANT_REQUIRED;
  385 
  386         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  387         if (bp->b_flags & B_CACHE) {
  388                 int base = (foff + off) & PAGE_MASK;
  389                 if (vm_page_is_valid(m, base, size) == 0)
  390                         bp->b_flags &= ~B_CACHE;
  391         }
  392 }
  393 
  394 /* Wake up the buffer deamon if necessary */
  395 static __inline__
  396 void
  397 bd_wakeup(int dirtybuflevel)
  398 {
  399         mtx_lock(&bdlock);
  400         if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
  401                 bd_request = 1;
  402                 wakeup(&bd_request);
  403         }
  404         mtx_unlock(&bdlock);
  405 }
  406 
  407 /*
  408  * bd_speedup - speedup the buffer cache flushing code
  409  */
  410 
  411 static __inline__
  412 void
  413 bd_speedup(void)
  414 {
  415         bd_wakeup(1);
  416 }
  417 
  418 /*
  419  * Calculating buffer cache scaling values and reserve space for buffer
  420  * headers.  This is called during low level kernel initialization and
  421  * may be called more then once.  We CANNOT write to the memory area
  422  * being reserved at this time.
  423  */
  424 caddr_t
  425 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
  426 {
  427         /*
  428          * physmem_est is in pages.  Convert it to kilobytes (assumes
  429          * PAGE_SIZE is >= 1K)
  430          */
  431         physmem_est = physmem_est * (PAGE_SIZE / 1024);
  432 
  433         /*
  434          * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
  435          * For the first 64MB of ram nominally allocate sufficient buffers to
  436          * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
  437          * buffers to cover 1/20 of our ram over 64MB.  When auto-sizing
  438          * the buffer cache we limit the eventual kva reservation to
  439          * maxbcache bytes.
  440          *
  441          * factor represents the 1/4 x ram conversion.
  442          */
  443         if (nbuf == 0) {
  444                 int factor = 4 * BKVASIZE / 1024;
  445 
  446                 nbuf = 50;
  447                 if (physmem_est > 4096)
  448                         nbuf += min((physmem_est - 4096) / factor,
  449                             65536 / factor);
  450                 if (physmem_est > 65536)
  451                         nbuf += (physmem_est - 65536) * 2 / (factor * 5);
  452 
  453                 if (maxbcache && nbuf > maxbcache / BKVASIZE)
  454                         nbuf = maxbcache / BKVASIZE;
  455         }
  456 
  457 #if 0
  458         /*
  459          * Do not allow the buffer_map to be more then 1/2 the size of the
  460          * kernel_map.
  461          */
  462         if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 
  463             (BKVASIZE * 2)) {
  464                 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 
  465                     (BKVASIZE * 2);
  466                 printf("Warning: nbufs capped at %d\n", nbuf);
  467         }
  468 #endif
  469 
  470         /*
  471          * swbufs are used as temporary holders for I/O, such as paging I/O.
  472          * We have no less then 16 and no more then 256.
  473          */
  474         nswbuf = max(min(nbuf/4, 256), 16);
  475 #ifdef NSWBUF_MIN
  476         if (nswbuf < NSWBUF_MIN)
  477                 nswbuf = NSWBUF_MIN;
  478 #endif
  479 #ifdef DIRECTIO
  480         ffs_rawread_setup();
  481 #endif
  482 
  483         /*
  484          * Reserve space for the buffer cache buffers
  485          */
  486         swbuf = (void *)v;
  487         v = (caddr_t)(swbuf + nswbuf);
  488         buf = (void *)v;
  489         v = (caddr_t)(buf + nbuf);
  490 
  491         return(v);
  492 }
  493 
  494 /* Initialize the buffer subsystem.  Called before use of any buffers. */
  495 void
  496 bufinit(void)
  497 {
  498         struct buf *bp;
  499         int i;
  500 
  501         GIANT_REQUIRED;
  502 
  503         mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF);
  504         mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
  505         mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
  506         mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
  507         mtx_init(&bdonelock, "bdone lock", NULL, MTX_DEF);
  508 
  509         /* next, make a null set of free lists */
  510         for (i = 0; i < BUFFER_QUEUES; i++)
  511                 TAILQ_INIT(&bufqueues[i]);
  512 
  513         /* finally, initialize each buffer header and stick on empty q */
  514         for (i = 0; i < nbuf; i++) {
  515                 bp = &buf[i];
  516                 bzero(bp, sizeof *bp);
  517                 bp->b_flags = B_INVAL;  /* we're just an empty header */
  518                 bp->b_dev = NODEV;
  519                 bp->b_rcred = NOCRED;
  520                 bp->b_wcred = NOCRED;
  521                 bp->b_qindex = QUEUE_EMPTY;
  522                 bp->b_vflags = 0;
  523                 bp->b_xflags = 0;
  524                 LIST_INIT(&bp->b_dep);
  525                 BUF_LOCKINIT(bp);
  526                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
  527         }
  528 
  529         /*
  530          * maxbufspace is the absolute maximum amount of buffer space we are 
  531          * allowed to reserve in KVM and in real terms.  The absolute maximum
  532          * is nominally used by buf_daemon.  hibufspace is the nominal maximum
  533          * used by most other processes.  The differential is required to 
  534          * ensure that buf_daemon is able to run when other processes might 
  535          * be blocked waiting for buffer space.
  536          *
  537          * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
  538          * this may result in KVM fragmentation which is not handled optimally
  539          * by the system.
  540          */
  541         maxbufspace = nbuf * BKVASIZE;
  542         hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
  543         lobufspace = hibufspace - MAXBSIZE;
  544 
  545         lorunningspace = 512 * 1024;
  546         hirunningspace = 1024 * 1024;
  547 
  548 /*
  549  * Limit the amount of malloc memory since it is wired permanently into
  550  * the kernel space.  Even though this is accounted for in the buffer
  551  * allocation, we don't want the malloced region to grow uncontrolled.
  552  * The malloc scheme improves memory utilization significantly on average
  553  * (small) directories.
  554  */
  555         maxbufmallocspace = hibufspace / 20;
  556 
  557 /*
  558  * Reduce the chance of a deadlock occuring by limiting the number
  559  * of delayed-write dirty buffers we allow to stack up.
  560  */
  561         hidirtybuffers = nbuf / 4 + 20;
  562         dirtybufthresh = hidirtybuffers * 9 / 10;
  563         numdirtybuffers = 0;
  564 /*
  565  * To support extreme low-memory systems, make sure hidirtybuffers cannot
  566  * eat up all available buffer space.  This occurs when our minimum cannot
  567  * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
  568  * BKVASIZE'd (8K) buffers.
  569  */
  570         while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
  571                 hidirtybuffers >>= 1;
  572         }
  573         lodirtybuffers = hidirtybuffers / 2;
  574 
  575 /*
  576  * Try to keep the number of free buffers in the specified range,
  577  * and give special processes (e.g. like buf_daemon) access to an 
  578  * emergency reserve.
  579  */
  580         lofreebuffers = nbuf / 18 + 5;
  581         hifreebuffers = 2 * lofreebuffers;
  582         numfreebuffers = nbuf;
  583 
  584 /*
  585  * Maximum number of async ops initiated per buf_daemon loop.  This is
  586  * somewhat of a hack at the moment, we really need to limit ourselves
  587  * based on the number of bytes of I/O in-transit that were initiated
  588  * from buf_daemon.
  589  */
  590 
  591         bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
  592             VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
  593 }
  594 
  595 /*
  596  * bfreekva() - free the kva allocation for a buffer.
  597  *
  598  *      Must be called at splbio() or higher as this is the only locking for
  599  *      buffer_map.
  600  *
  601  *      Since this call frees up buffer space, we call bufspacewakeup().
  602  */
  603 static void
  604 bfreekva(struct buf * bp)
  605 {
  606         GIANT_REQUIRED;
  607 
  608         if (bp->b_kvasize) {
  609                 atomic_add_int(&buffreekvacnt, 1);
  610                 atomic_subtract_int(&bufspace, bp->b_kvasize);
  611                 vm_map_delete(buffer_map,
  612                     (vm_offset_t) bp->b_kvabase,
  613                     (vm_offset_t) bp->b_kvabase + bp->b_kvasize
  614                 );
  615                 bp->b_kvasize = 0;
  616                 bufspacewakeup();
  617         }
  618 }
  619 
  620 /*
  621  *      bremfree:
  622  *
  623  *      Remove the buffer from the appropriate free list.
  624  */
  625 void
  626 bremfree(struct buf * bp)
  627 {
  628         mtx_lock(&bqlock);
  629         bremfreel(bp);
  630         mtx_unlock(&bqlock);
  631 }
  632 
  633 void
  634 bremfreel(struct buf * bp)
  635 {
  636         int s = splbio();
  637         int old_qindex = bp->b_qindex;
  638 
  639         GIANT_REQUIRED;
  640 
  641         if (bp->b_qindex != QUEUE_NONE) {
  642                 KASSERT(BUF_REFCNT(bp) == 1, ("bremfree: bp %p not locked",bp));
  643                 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
  644                 bp->b_qindex = QUEUE_NONE;
  645         } else {
  646                 if (BUF_REFCNT(bp) <= 1)
  647                         panic("bremfree: removing a buffer not on a queue");
  648         }
  649 
  650         /*
  651          * Fixup numfreebuffers count.  If the buffer is invalid or not
  652          * delayed-write, and it was on the EMPTY, LRU, or AGE queues,
  653          * the buffer was free and we must decrement numfreebuffers.
  654          */
  655         if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
  656                 switch(old_qindex) {
  657                 case QUEUE_DIRTY:
  658                 case QUEUE_CLEAN:
  659                 case QUEUE_EMPTY:
  660                 case QUEUE_EMPTYKVA:
  661                         atomic_subtract_int(&numfreebuffers, 1);
  662                         break;
  663                 default:
  664                         break;
  665                 }
  666         }
  667         splx(s);
  668 }
  669 
  670 
  671 /*
  672  * Get a buffer with the specified data.  Look in the cache first.  We
  673  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
  674  * is set, the buffer is valid and we do not have to do anything ( see
  675  * getblk() ).  This is really just a special case of breadn().
  676  */
  677 int
  678 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
  679     struct buf ** bpp)
  680 {
  681 
  682         return (breadn(vp, blkno, size, 0, 0, 0, cred, bpp));
  683 }
  684 
  685 /*
  686  * Operates like bread, but also starts asynchronous I/O on
  687  * read-ahead blocks.  We must clear BIO_ERROR and B_INVAL prior
  688  * to initiating I/O . If B_CACHE is set, the buffer is valid 
  689  * and we do not have to do anything.
  690  */
  691 int
  692 breadn(struct vnode * vp, daddr_t blkno, int size,
  693     daddr_t * rablkno, int *rabsize,
  694     int cnt, struct ucred * cred, struct buf ** bpp)
  695 {
  696         struct buf *bp, *rabp;
  697         int i;
  698         int rv = 0, readwait = 0;
  699 
  700         *bpp = bp = getblk(vp, blkno, size, 0, 0, 0);
  701 
  702         /* if not found in cache, do some I/O */
  703         if ((bp->b_flags & B_CACHE) == 0) {
  704                 if (curthread != PCPU_GET(idlethread))
  705                         curthread->td_proc->p_stats->p_ru.ru_inblock++;
  706                 bp->b_iocmd = BIO_READ;
  707                 bp->b_flags &= ~B_INVAL;
  708                 bp->b_ioflags &= ~BIO_ERROR;
  709                 if (bp->b_rcred == NOCRED && cred != NOCRED)
  710                         bp->b_rcred = crhold(cred);
  711                 vfs_busy_pages(bp, 0);
  712                 bp->b_iooffset = dbtob(bp->b_blkno);
  713                 if (vp->v_type == VCHR)
  714                         VOP_SPECSTRATEGY(vp, bp);
  715                 else
  716                         VOP_STRATEGY(vp, bp);
  717                 ++readwait;
  718         }
  719 
  720         for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
  721                 if (inmem(vp, *rablkno))
  722                         continue;
  723                 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
  724 
  725                 if ((rabp->b_flags & B_CACHE) == 0) {
  726                         if (curthread != PCPU_GET(idlethread))
  727                                 curthread->td_proc->p_stats->p_ru.ru_inblock++;
  728                         rabp->b_flags |= B_ASYNC;
  729                         rabp->b_flags &= ~B_INVAL;
  730                         rabp->b_ioflags &= ~BIO_ERROR;
  731                         rabp->b_iocmd = BIO_READ;
  732                         if (rabp->b_rcred == NOCRED && cred != NOCRED)
  733                                 rabp->b_rcred = crhold(cred);
  734                         vfs_busy_pages(rabp, 0);
  735                         BUF_KERNPROC(rabp);
  736                         rabp->b_iooffset = dbtob(rabp->b_blkno);
  737                         if (vp->v_type == VCHR)
  738                                 VOP_SPECSTRATEGY(vp, rabp);
  739                         else
  740                                 VOP_STRATEGY(vp, rabp);
  741                 } else {
  742                         brelse(rabp);
  743                 }
  744         }
  745 
  746         if (readwait) {
  747                 rv = bufwait(bp);
  748         }
  749         return (rv);
  750 }
  751 
  752 /*
  753  * Write, release buffer on completion.  (Done by iodone
  754  * if async).  Do not bother writing anything if the buffer
  755  * is invalid.
  756  *
  757  * Note that we set B_CACHE here, indicating that buffer is
  758  * fully valid and thus cacheable.  This is true even of NFS
  759  * now so we set it generally.  This could be set either here 
  760  * or in biodone() since the I/O is synchronous.  We put it
  761  * here.
  762  */
  763 
  764 int
  765 bwrite(struct buf * bp)
  766 {
  767         int oldflags, s;
  768         struct buf *newbp;
  769 
  770         if (bp->b_flags & B_INVAL) {
  771                 brelse(bp);
  772                 return (0);
  773         }
  774 
  775         oldflags = bp->b_flags;
  776 
  777         if (BUF_REFCNT(bp) == 0)
  778                 panic("bwrite: buffer is not busy???");
  779         s = splbio();
  780         /*
  781          * If a background write is already in progress, delay
  782          * writing this block if it is asynchronous. Otherwise
  783          * wait for the background write to complete.
  784          */
  785         VI_LOCK(bp->b_vp);
  786         if (bp->b_vflags & BV_BKGRDINPROG) {
  787                 if (bp->b_flags & B_ASYNC) {
  788                         VI_UNLOCK(bp->b_vp);
  789                         splx(s);
  790                         bdwrite(bp);
  791                         return (0);
  792                 }
  793                 bp->b_vflags |= BV_BKGRDWAIT;
  794                 msleep(&bp->b_xflags, VI_MTX(bp->b_vp), PRIBIO, "bwrbg", 0);
  795                 if (bp->b_vflags & BV_BKGRDINPROG)
  796                         panic("bwrite: still writing");
  797         }
  798         VI_UNLOCK(bp->b_vp);
  799 
  800         /* Mark the buffer clean */
  801         bundirty(bp);
  802 
  803         /*
  804          * If this buffer is marked for background writing and we
  805          * do not have to wait for it, make a copy and write the
  806          * copy so as to leave this buffer ready for further use.
  807          *
  808          * This optimization eats a lot of memory.  If we have a page
  809          * or buffer shortfall we can't do it.
  810          */
  811         if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 
  812             (bp->b_flags & B_ASYNC) &&
  813             !vm_page_count_severe() &&
  814             !buf_dirty_count_severe()) {
  815                 if (bp->b_iodone != NULL) {
  816                         printf("bp->b_iodone = %p\n", bp->b_iodone);
  817                         panic("bwrite: need chained iodone");
  818                 }
  819 
  820                 /* get a new block */
  821                 newbp = geteblk(bp->b_bufsize);
  822 
  823                 /*
  824                  * set it to be identical to the old block.  We have to
  825                  * set b_lblkno and BKGRDMARKER before calling bgetvp()
  826                  * to avoid confusing the splay tree and gbincore().
  827                  */
  828                 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
  829                 newbp->b_lblkno = bp->b_lblkno;
  830                 newbp->b_xflags |= BX_BKGRDMARKER;
  831                 VI_LOCK(bp->b_vp);
  832                 bp->b_vflags |= BV_BKGRDINPROG;
  833                 bgetvp(bp->b_vp, newbp);
  834                 VI_UNLOCK(bp->b_vp);
  835                 newbp->b_blkno = bp->b_blkno;
  836                 newbp->b_offset = bp->b_offset;
  837                 newbp->b_iodone = vfs_backgroundwritedone;
  838                 newbp->b_flags |= B_ASYNC;
  839                 newbp->b_flags &= ~B_INVAL;
  840 
  841                 /* move over the dependencies */
  842                 if (LIST_FIRST(&bp->b_dep) != NULL)
  843                         buf_movedeps(bp, newbp);
  844 
  845                 /*
  846                  * Initiate write on the copy, release the original to
  847                  * the B_LOCKED queue so that it cannot go away until
  848                  * the background write completes. If not locked it could go
  849                  * away and then be reconstituted while it was being written.
  850                  * If the reconstituted buffer were written, we could end up
  851                  * with two background copies being written at the same time.
  852                  */
  853                 bqrelse(bp);
  854                 bp = newbp;
  855         }
  856 
  857         bp->b_flags &= ~B_DONE;
  858         bp->b_ioflags &= ~BIO_ERROR;
  859         bp->b_flags |= B_WRITEINPROG | B_CACHE;
  860         bp->b_iocmd = BIO_WRITE;
  861 
  862         VI_LOCK(bp->b_vp);
  863         bp->b_vp->v_numoutput++;
  864         VI_UNLOCK(bp->b_vp);
  865         vfs_busy_pages(bp, 1);
  866 
  867         /*
  868          * Normal bwrites pipeline writes
  869          */
  870         bp->b_runningbufspace = bp->b_bufsize;
  871         atomic_add_int(&runningbufspace, bp->b_runningbufspace);
  872 
  873         if (curthread != PCPU_GET(idlethread))
  874                 curthread->td_proc->p_stats->p_ru.ru_oublock++;
  875         splx(s);
  876         if (oldflags & B_ASYNC)
  877                 BUF_KERNPROC(bp);
  878         bp->b_iooffset = dbtob(bp->b_blkno);
  879         if (bp->b_vp->v_type == VCHR)
  880                 VOP_SPECSTRATEGY(bp->b_vp, bp);
  881         else
  882                 VOP_STRATEGY(bp->b_vp, bp);
  883 
  884         if ((oldflags & B_ASYNC) == 0) {
  885                 int rtval = bufwait(bp);
  886                 brelse(bp);
  887                 return (rtval);
  888         } else {
  889                 /*
  890                  * don't allow the async write to saturate the I/O
  891                  * system.  We will not deadlock here because
  892                  * we are blocking waiting for I/O that is already in-progress
  893                  * to complete. We do not block here if it is the update
  894                  * or syncer daemon trying to clean up as that can lead
  895                  * to deadlock.
  896                  */
  897                 if (curthread->td_proc != bufdaemonproc &&
  898                     curthread->td_proc != updateproc)
  899                         waitrunningbufspace();
  900         }
  901 
  902         return (0);
  903 }
  904 
  905 /*
  906  * Complete a background write started from bwrite.
  907  */
  908 static void
  909 vfs_backgroundwritedone(bp)
  910         struct buf *bp;
  911 {
  912         struct buf *origbp;
  913 
  914         /*
  915          * Find the original buffer that we are writing.
  916          */
  917         VI_LOCK(bp->b_vp);
  918         if ((origbp = gbincore(bp->b_vp, bp->b_lblkno)) == NULL)
  919                 panic("backgroundwritedone: lost buffer");
  920 
  921         /*
  922          * Clear the BV_BKGRDINPROG flag in the original buffer
  923          * and awaken it if it is waiting for the write to complete.
  924          * If BV_BKGRDINPROG is not set in the original buffer it must
  925          * have been released and re-instantiated - which is not legal.
  926          */
  927         KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
  928             ("backgroundwritedone: lost buffer2"));
  929         origbp->b_vflags &= ~BV_BKGRDINPROG;
  930         if (origbp->b_vflags & BV_BKGRDWAIT) {
  931                 origbp->b_vflags &= ~BV_BKGRDWAIT;
  932                 wakeup(&origbp->b_xflags);
  933         }
  934         VI_UNLOCK(bp->b_vp);
  935         /*
  936          * Process dependencies then return any unfinished ones.
  937          */
  938         if (LIST_FIRST(&bp->b_dep) != NULL)
  939                 buf_complete(bp);
  940         if (LIST_FIRST(&bp->b_dep) != NULL)
  941                 buf_movedeps(bp, origbp);
  942 
  943         /*
  944          * This buffer is marked B_NOCACHE, so when it is released
  945          * by biodone, it will be tossed. We mark it with BIO_READ
  946          * to avoid biodone doing a second vwakeup.
  947          */
  948         bp->b_flags |= B_NOCACHE;
  949         bp->b_iocmd = BIO_READ;
  950         bp->b_flags &= ~(B_CACHE | B_DONE);
  951         bp->b_iodone = 0;
  952         bufdone(bp);
  953 }
  954 
  955 /*
  956  * Delayed write. (Buffer is marked dirty).  Do not bother writing
  957  * anything if the buffer is marked invalid.
  958  *
  959  * Note that since the buffer must be completely valid, we can safely
  960  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
  961  * biodone() in order to prevent getblk from writing the buffer
  962  * out synchronously.
  963  */
  964 void
  965 bdwrite(struct buf * bp)
  966 {
  967         struct thread *td = curthread;
  968         struct vnode *vp;
  969         struct buf *nbp;
  970 
  971         GIANT_REQUIRED;
  972 
  973         if (BUF_REFCNT(bp) == 0)
  974                 panic("bdwrite: buffer is not busy");
  975 
  976         if (bp->b_flags & B_INVAL) {
  977                 brelse(bp);
  978                 return;
  979         }
  980 
  981         /*
  982          * If we have too many dirty buffers, don't create any more.
  983          * If we are wildly over our limit, then force a complete
  984          * cleanup. Otherwise, just keep the situation from getting
  985          * out of control. Note that we have to avoid a recursive
  986          * disaster and not try to clean up after our own cleanup!
  987          */
  988         vp = bp->b_vp;
  989         VI_LOCK(vp);
  990         if (td->td_pflags & TDP_COWINPROGRESS) {
  991                 recursiveflushes++;
  992         } else if (vp != NULL && vp->v_dirtybufcnt > dirtybufthresh + 10) {
  993                 VI_UNLOCK(vp);
  994                 (void) VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td);
  995                 VI_LOCK(vp);
  996                 altbufferflushes++;
  997         } else if (vp != NULL && vp->v_dirtybufcnt > dirtybufthresh) {
  998                 /*
  999                  * Try to find a buffer to flush.
 1000                  */
 1001                 TAILQ_FOREACH(nbp, &vp->v_dirtyblkhd, b_vnbufs) {
 1002                         if ((nbp->b_vflags & BV_BKGRDINPROG) ||
 1003                             buf_countdeps(nbp, 0) ||
 1004                             BUF_LOCK(nbp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
 1005                                 continue;
 1006                         if (bp == nbp)
 1007                                 panic("bdwrite: found ourselves");
 1008                         VI_UNLOCK(vp);
 1009                         if (nbp->b_flags & B_CLUSTEROK) {
 1010                                 vfs_bio_awrite(nbp);
 1011                         } else {
 1012                                 bremfree(nbp);
 1013                                 bawrite(nbp);
 1014                         }
 1015                         VI_LOCK(vp);
 1016                         dirtybufferflushes++;
 1017                         break;
 1018                 }
 1019         }
 1020         VI_UNLOCK(vp);
 1021 
 1022         bdirty(bp);
 1023         /*
 1024          * Set B_CACHE, indicating that the buffer is fully valid.  This is
 1025          * true even of NFS now.
 1026          */
 1027         bp->b_flags |= B_CACHE;
 1028 
 1029         /*
 1030          * This bmap keeps the system from needing to do the bmap later,
 1031          * perhaps when the system is attempting to do a sync.  Since it
 1032          * is likely that the indirect block -- or whatever other datastructure
 1033          * that the filesystem needs is still in memory now, it is a good
 1034          * thing to do this.  Note also, that if the pageout daemon is
 1035          * requesting a sync -- there might not be enough memory to do
 1036          * the bmap then...  So, this is important to do.
 1037          */
 1038         if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
 1039                 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
 1040         }
 1041 
 1042         /*
 1043          * Set the *dirty* buffer range based upon the VM system dirty pages.
 1044          */
 1045         vfs_setdirty(bp);
 1046 
 1047         /*
 1048          * We need to do this here to satisfy the vnode_pager and the
 1049          * pageout daemon, so that it thinks that the pages have been
 1050          * "cleaned".  Note that since the pages are in a delayed write
 1051          * buffer -- the VFS layer "will" see that the pages get written
 1052          * out on the next sync, or perhaps the cluster will be completed.
 1053          */
 1054         vfs_clean_pages(bp);
 1055         bqrelse(bp);
 1056 
 1057         /*
 1058          * Wakeup the buffer flushing daemon if we have a lot of dirty
 1059          * buffers (midpoint between our recovery point and our stall
 1060          * point).
 1061          */
 1062         bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
 1063 
 1064         /*
 1065          * note: we cannot initiate I/O from a bdwrite even if we wanted to,
 1066          * due to the softdep code.
 1067          */
 1068 }
 1069 
 1070 /*
 1071  *      bdirty:
 1072  *
 1073  *      Turn buffer into delayed write request.  We must clear BIO_READ and
 1074  *      B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to 
 1075  *      itself to properly update it in the dirty/clean lists.  We mark it
 1076  *      B_DONE to ensure that any asynchronization of the buffer properly
 1077  *      clears B_DONE ( else a panic will occur later ).  
 1078  *
 1079  *      bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
 1080  *      might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
 1081  *      should only be called if the buffer is known-good.
 1082  *
 1083  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 1084  *      count.
 1085  *
 1086  *      Must be called at splbio().
 1087  *      The buffer must be on QUEUE_NONE.
 1088  */
 1089 void
 1090 bdirty(bp)
 1091         struct buf *bp;
 1092 {
 1093         KASSERT(bp->b_qindex == QUEUE_NONE,
 1094             ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1095         bp->b_flags &= ~(B_RELBUF);
 1096         bp->b_iocmd = BIO_WRITE;
 1097 
 1098         if ((bp->b_flags & B_DELWRI) == 0) {
 1099                 bp->b_flags |= B_DONE | B_DELWRI;
 1100                 reassignbuf(bp, bp->b_vp);
 1101                 atomic_add_int(&numdirtybuffers, 1);
 1102                 bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
 1103         }
 1104 }
 1105 
 1106 /*
 1107  *      bundirty:
 1108  *
 1109  *      Clear B_DELWRI for buffer.
 1110  *
 1111  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 1112  *      count.
 1113  *      
 1114  *      Must be called at splbio().
 1115  *      The buffer must be on QUEUE_NONE.
 1116  */
 1117 
 1118 void
 1119 bundirty(bp)
 1120         struct buf *bp;
 1121 {
 1122         KASSERT(bp->b_qindex == QUEUE_NONE,
 1123             ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1124 
 1125         if (bp->b_flags & B_DELWRI) {
 1126                 bp->b_flags &= ~B_DELWRI;
 1127                 reassignbuf(bp, bp->b_vp);
 1128                 atomic_subtract_int(&numdirtybuffers, 1);
 1129                 numdirtywakeup(lodirtybuffers);
 1130         }
 1131         /*
 1132          * Since it is now being written, we can clear its deferred write flag.
 1133          */
 1134         bp->b_flags &= ~B_DEFERRED;
 1135 }
 1136 
 1137 /*
 1138  *      bawrite:
 1139  *
 1140  *      Asynchronous write.  Start output on a buffer, but do not wait for
 1141  *      it to complete.  The buffer is released when the output completes.
 1142  *
 1143  *      bwrite() ( or the VOP routine anyway ) is responsible for handling 
 1144  *      B_INVAL buffers.  Not us.
 1145  */
 1146 void
 1147 bawrite(struct buf * bp)
 1148 {
 1149         bp->b_flags |= B_ASYNC;
 1150         (void) BUF_WRITE(bp);
 1151 }
 1152 
 1153 /*
 1154  *      bwillwrite:
 1155  *
 1156  *      Called prior to the locking of any vnodes when we are expecting to
 1157  *      write.  We do not want to starve the buffer cache with too many
 1158  *      dirty buffers so we block here.  By blocking prior to the locking
 1159  *      of any vnodes we attempt to avoid the situation where a locked vnode
 1160  *      prevents the various system daemons from flushing related buffers.
 1161  */
 1162 
 1163 void
 1164 bwillwrite(void)
 1165 {
 1166         if (numdirtybuffers >= hidirtybuffers) {
 1167                 int s;
 1168 
 1169                 mtx_lock(&Giant);
 1170                 s = splbio();
 1171                 mtx_lock(&nblock);
 1172                 while (numdirtybuffers >= hidirtybuffers) {
 1173                         bd_wakeup(1);
 1174                         needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
 1175                         msleep(&needsbuffer, &nblock,
 1176                             (PRIBIO + 4), "flswai", 0);
 1177                 }
 1178                 splx(s);
 1179                 mtx_unlock(&nblock);
 1180                 mtx_unlock(&Giant);
 1181         }
 1182 }
 1183 
 1184 /*
 1185  * Return true if we have too many dirty buffers.
 1186  */
 1187 int
 1188 buf_dirty_count_severe(void)
 1189 {
 1190         return(numdirtybuffers >= hidirtybuffers);
 1191 }
 1192 
 1193 /*
 1194  *      brelse:
 1195  *
 1196  *      Release a busy buffer and, if requested, free its resources.  The
 1197  *      buffer will be stashed in the appropriate bufqueue[] allowing it
 1198  *      to be accessed later as a cache entity or reused for other purposes.
 1199  */
 1200 void
 1201 brelse(struct buf * bp)
 1202 {
 1203         int s;
 1204 
 1205         GIANT_REQUIRED;
 1206 
 1207         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
 1208             ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1209 
 1210         s = splbio();
 1211 
 1212         if (bp->b_iocmd == BIO_WRITE &&
 1213             (bp->b_ioflags & BIO_ERROR) &&
 1214             !(bp->b_flags & B_INVAL)) {
 1215                 /*
 1216                  * Failed write, redirty.  Must clear BIO_ERROR to prevent
 1217                  * pages from being scrapped.  If B_INVAL is set then
 1218                  * this case is not run and the next case is run to 
 1219                  * destroy the buffer.  B_INVAL can occur if the buffer
 1220                  * is outside the range supported by the underlying device.
 1221                  */
 1222                 bp->b_ioflags &= ~BIO_ERROR;
 1223                 bdirty(bp);
 1224         } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
 1225             (bp->b_ioflags & BIO_ERROR) ||
 1226             bp->b_iocmd == BIO_DELETE || (bp->b_bufsize <= 0)) {
 1227                 /*
 1228                  * Either a failed I/O or we were asked to free or not
 1229                  * cache the buffer.
 1230                  */
 1231                 bp->b_flags |= B_INVAL;
 1232                 if (LIST_FIRST(&bp->b_dep) != NULL)
 1233                         buf_deallocate(bp);
 1234                 if (bp->b_flags & B_DELWRI) {
 1235                         atomic_subtract_int(&numdirtybuffers, 1);
 1236                         numdirtywakeup(lodirtybuffers);
 1237                 }
 1238                 bp->b_flags &= ~(B_DELWRI | B_CACHE);
 1239                 if ((bp->b_flags & B_VMIO) == 0) {
 1240                         if (bp->b_bufsize)
 1241                                 allocbuf(bp, 0);
 1242                         if (bp->b_vp)
 1243                                 brelvp(bp);
 1244                 }
 1245         }
 1246 
 1247         /*
 1248          * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release() 
 1249          * is called with B_DELWRI set, the underlying pages may wind up
 1250          * getting freed causing a previous write (bdwrite()) to get 'lost'
 1251          * because pages associated with a B_DELWRI bp are marked clean.
 1252          * 
 1253          * We still allow the B_INVAL case to call vfs_vmio_release(), even
 1254          * if B_DELWRI is set.
 1255          *
 1256          * If B_DELWRI is not set we may have to set B_RELBUF if we are low
 1257          * on pages to return pages to the VM page queues.
 1258          */
 1259         if (bp->b_flags & B_DELWRI)
 1260                 bp->b_flags &= ~B_RELBUF;
 1261         else if (vm_page_count_severe()) {
 1262                 /*
 1263                  * XXX This lock may not be necessary since BKGRDINPROG
 1264                  * cannot be set while we hold the buf lock, it can only be
 1265                  * cleared if it is already pending.
 1266                  */
 1267                 if (bp->b_vp) {
 1268                         VI_LOCK(bp->b_vp);
 1269                         if (!(bp->b_vflags & BV_BKGRDINPROG))
 1270                                 bp->b_flags |= B_RELBUF;
 1271                         VI_UNLOCK(bp->b_vp);
 1272                 } else
 1273                         bp->b_flags |= B_RELBUF;
 1274         }
 1275 
 1276         /*
 1277          * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
 1278          * constituted, not even NFS buffers now.  Two flags effect this.  If
 1279          * B_INVAL, the struct buf is invalidated but the VM object is kept
 1280          * around ( i.e. so it is trivial to reconstitute the buffer later ).
 1281          *
 1282          * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
 1283          * invalidated.  BIO_ERROR cannot be set for a failed write unless the
 1284          * buffer is also B_INVAL because it hits the re-dirtying code above.
 1285          *
 1286          * Normally we can do this whether a buffer is B_DELWRI or not.  If
 1287          * the buffer is an NFS buffer, it is tracking piecemeal writes or
 1288          * the commit state and we cannot afford to lose the buffer. If the
 1289          * buffer has a background write in progress, we need to keep it
 1290          * around to prevent it from being reconstituted and starting a second
 1291          * background write.
 1292          */
 1293         if ((bp->b_flags & B_VMIO)
 1294             && !(bp->b_vp->v_mount != NULL &&
 1295                  (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
 1296                  !vn_isdisk(bp->b_vp, NULL) &&
 1297                  (bp->b_flags & B_DELWRI))
 1298             ) {
 1299 
 1300                 int i, j, resid;
 1301                 vm_page_t m;
 1302                 off_t foff;
 1303                 vm_pindex_t poff;
 1304                 vm_object_t obj;
 1305                 struct vnode *vp;
 1306 
 1307                 vp = bp->b_vp;
 1308                 obj = bp->b_object;
 1309 
 1310                 /*
 1311                  * Get the base offset and length of the buffer.  Note that 
 1312                  * in the VMIO case if the buffer block size is not
 1313                  * page-aligned then b_data pointer may not be page-aligned.
 1314                  * But our b_pages[] array *IS* page aligned.
 1315                  *
 1316                  * block sizes less then DEV_BSIZE (usually 512) are not 
 1317                  * supported due to the page granularity bits (m->valid,
 1318                  * m->dirty, etc...). 
 1319                  *
 1320                  * See man buf(9) for more information
 1321                  */
 1322                 resid = bp->b_bufsize;
 1323                 foff = bp->b_offset;
 1324                 VM_OBJECT_LOCK(obj);
 1325                 for (i = 0; i < bp->b_npages; i++) {
 1326                         int had_bogus = 0;
 1327 
 1328                         m = bp->b_pages[i];
 1329                         vm_page_lock_queues();
 1330                         vm_page_flag_clear(m, PG_ZERO);
 1331                         vm_page_unlock_queues();
 1332 
 1333                         /*
 1334                          * If we hit a bogus page, fixup *all* the bogus pages
 1335                          * now.
 1336                          */
 1337                         if (m == bogus_page) {
 1338                                 poff = OFF_TO_IDX(bp->b_offset);
 1339                                 had_bogus = 1;
 1340 
 1341                                 for (j = i; j < bp->b_npages; j++) {
 1342                                         vm_page_t mtmp;
 1343                                         mtmp = bp->b_pages[j];
 1344                                         if (mtmp == bogus_page) {
 1345                                                 mtmp = vm_page_lookup(obj, poff + j);
 1346                                                 if (!mtmp) {
 1347                                                         panic("brelse: page missing\n");
 1348                                                 }
 1349                                                 bp->b_pages[j] = mtmp;
 1350                                         }
 1351                                 }
 1352 
 1353                                 if ((bp->b_flags & B_INVAL) == 0) {
 1354                                         pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 1355                                 }
 1356                                 m = bp->b_pages[i];
 1357                         }
 1358                         if ((bp->b_flags & B_NOCACHE) || (bp->b_ioflags & BIO_ERROR)) {
 1359                                 int poffset = foff & PAGE_MASK;
 1360                                 int presid = resid > (PAGE_SIZE - poffset) ?
 1361                                         (PAGE_SIZE - poffset) : resid;
 1362 
 1363                                 KASSERT(presid >= 0, ("brelse: extra page"));
 1364                                 vm_page_lock_queues();
 1365                                 vm_page_set_invalid(m, poffset, presid);
 1366                                 vm_page_unlock_queues();
 1367                                 if (had_bogus)
 1368                                         printf("avoided corruption bug in bogus_page/brelse code\n");
 1369                         }
 1370                         resid -= PAGE_SIZE - (foff & PAGE_MASK);
 1371                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 1372                 }
 1373                 VM_OBJECT_UNLOCK(obj);
 1374                 if (bp->b_flags & (B_INVAL | B_RELBUF))
 1375                         vfs_vmio_release(bp);
 1376 
 1377         } else if (bp->b_flags & B_VMIO) {
 1378 
 1379                 if (bp->b_flags & (B_INVAL | B_RELBUF)) {
 1380                         vfs_vmio_release(bp);
 1381                 }
 1382 
 1383         }
 1384                         
 1385         if (bp->b_qindex != QUEUE_NONE)
 1386                 panic("brelse: free buffer onto another queue???");
 1387         if (BUF_REFCNT(bp) > 1) {
 1388                 /* do not release to free list */
 1389                 BUF_UNLOCK(bp);
 1390                 splx(s);
 1391                 return;
 1392         }
 1393 
 1394         /* enqueue */
 1395         mtx_lock(&bqlock);
 1396 
 1397         /* buffers with no memory */
 1398         if (bp->b_bufsize == 0) {
 1399                 bp->b_flags |= B_INVAL;
 1400                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1401                 if (bp->b_vflags & BV_BKGRDINPROG)
 1402                         panic("losing buffer 1");
 1403                 if (bp->b_kvasize) {
 1404                         bp->b_qindex = QUEUE_EMPTYKVA;
 1405                 } else {
 1406                         bp->b_qindex = QUEUE_EMPTY;
 1407                 }
 1408                 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
 1409                 bp->b_dev = NODEV;
 1410         /* buffers with junk contents */
 1411         } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
 1412             (bp->b_ioflags & BIO_ERROR)) {
 1413                 bp->b_flags |= B_INVAL;
 1414                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1415                 if (bp->b_vflags & BV_BKGRDINPROG)
 1416                         panic("losing buffer 2");
 1417                 bp->b_qindex = QUEUE_CLEAN;
 1418                 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
 1419                 bp->b_dev = NODEV;
 1420         /* remaining buffers */
 1421         } else {
 1422                 if (bp->b_flags & B_DELWRI)
 1423                         bp->b_qindex = QUEUE_DIRTY;
 1424                 else
 1425                         bp->b_qindex = QUEUE_CLEAN;
 1426                 if (bp->b_flags & B_AGE)
 1427                         TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
 1428                 else
 1429                         TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
 1430         }
 1431         mtx_unlock(&bqlock);
 1432 
 1433         /*
 1434          * If B_INVAL and B_DELWRI is set, clear B_DELWRI.  We have already
 1435          * placed the buffer on the correct queue.  We must also disassociate
 1436          * the device and vnode for a B_INVAL buffer so gbincore() doesn't
 1437          * find it.
 1438          */
 1439         if (bp->b_flags & B_INVAL) {
 1440                 if (bp->b_flags & B_DELWRI)
 1441                         bundirty(bp);
 1442                 if (bp->b_vp)
 1443                         brelvp(bp);
 1444         }
 1445 
 1446         /*
 1447          * Fixup numfreebuffers count.  The bp is on an appropriate queue
 1448          * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
 1449          * We've already handled the B_INVAL case ( B_DELWRI will be clear
 1450          * if B_INVAL is set ).
 1451          */
 1452 
 1453         if (!(bp->b_flags & B_DELWRI))
 1454                 bufcountwakeup();
 1455 
 1456         /*
 1457          * Something we can maybe free or reuse
 1458          */
 1459         if (bp->b_bufsize || bp->b_kvasize)
 1460                 bufspacewakeup();
 1461 
 1462         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
 1463         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 1464                 panic("brelse: not dirty");
 1465         /* unlock */
 1466         BUF_UNLOCK(bp);
 1467         splx(s);
 1468 }
 1469 
 1470 /*
 1471  * Release a buffer back to the appropriate queue but do not try to free
 1472  * it.  The buffer is expected to be used again soon.
 1473  *
 1474  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
 1475  * biodone() to requeue an async I/O on completion.  It is also used when
 1476  * known good buffers need to be requeued but we think we may need the data
 1477  * again soon.
 1478  *
 1479  * XXX we should be able to leave the B_RELBUF hint set on completion.
 1480  */
 1481 void
 1482 bqrelse(struct buf * bp)
 1483 {
 1484         int s;
 1485 
 1486         s = splbio();
 1487 
 1488         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1489 
 1490         if (bp->b_qindex != QUEUE_NONE)
 1491                 panic("bqrelse: free buffer onto another queue???");
 1492         if (BUF_REFCNT(bp) > 1) {
 1493                 /* do not release to free list */
 1494                 BUF_UNLOCK(bp);
 1495                 splx(s);
 1496                 return;
 1497         }
 1498         mtx_lock(&bqlock);
 1499         /* buffers with stale but valid contents */
 1500         if (bp->b_flags & B_DELWRI) {
 1501                 bp->b_qindex = QUEUE_DIRTY;
 1502                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist);
 1503         } else {
 1504                 /*
 1505                  * XXX This lock may not be necessary since BKGRDINPROG
 1506                  * cannot be set while we hold the buf lock, it can only be
 1507                  * cleared if it is already pending.
 1508                  */
 1509                 VI_LOCK(bp->b_vp);
 1510                 if (!vm_page_count_severe() || bp->b_vflags & BV_BKGRDINPROG) {
 1511                         VI_UNLOCK(bp->b_vp);
 1512                         bp->b_qindex = QUEUE_CLEAN;
 1513                         TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp,
 1514                             b_freelist);
 1515                 } else {
 1516                         /*
 1517                          * We are too low on memory, we have to try to free
 1518                          * the buffer (most importantly: the wired pages
 1519                          * making up its backing store) *now*.
 1520                          */
 1521                         VI_UNLOCK(bp->b_vp);
 1522                         mtx_unlock(&bqlock);
 1523                         splx(s);
 1524                         brelse(bp);
 1525                         return;
 1526                 }
 1527         }
 1528         mtx_unlock(&bqlock);
 1529 
 1530         if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
 1531                 bufcountwakeup();
 1532 
 1533         /*
 1534          * Something we can maybe free or reuse.
 1535          */
 1536         if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
 1537                 bufspacewakeup();
 1538 
 1539         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
 1540         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 1541                 panic("bqrelse: not dirty");
 1542         /* unlock */
 1543         BUF_UNLOCK(bp);
 1544         splx(s);
 1545 }
 1546 
 1547 /* Give pages used by the bp back to the VM system (where possible) */
 1548 static void
 1549 vfs_vmio_release(bp)
 1550         struct buf *bp;
 1551 {
 1552         int i;
 1553         vm_page_t m;
 1554 
 1555         GIANT_REQUIRED;
 1556         VM_OBJECT_LOCK(bp->b_object);
 1557         vm_page_lock_queues();
 1558         for (i = 0; i < bp->b_npages; i++) {
 1559                 m = bp->b_pages[i];
 1560                 bp->b_pages[i] = NULL;
 1561                 /*
 1562                  * In order to keep page LRU ordering consistent, put
 1563                  * everything on the inactive queue.
 1564                  */
 1565                 vm_page_unwire(m, 0);
 1566                 /*
 1567                  * We don't mess with busy pages, it is
 1568                  * the responsibility of the process that
 1569                  * busied the pages to deal with them.
 1570                  */
 1571                 if ((m->flags & PG_BUSY) || (m->busy != 0))
 1572                         continue;
 1573                         
 1574                 if (m->wire_count == 0) {
 1575                         vm_page_flag_clear(m, PG_ZERO);
 1576                         /*
 1577                          * Might as well free the page if we can and it has
 1578                          * no valid data.  We also free the page if the
 1579                          * buffer was used for direct I/O
 1580                          */
 1581                         if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
 1582                             m->hold_count == 0) {
 1583                                 vm_page_busy(m);
 1584                                 pmap_remove_all(m);
 1585                                 vm_page_free(m);
 1586                         } else if (bp->b_flags & B_DIRECT) {
 1587                                 vm_page_try_to_free(m);
 1588                         } else if (vm_page_count_severe()) {
 1589                                 vm_page_try_to_cache(m);
 1590                         }
 1591                 }
 1592         }
 1593         vm_page_unlock_queues();
 1594         VM_OBJECT_UNLOCK(bp->b_object);
 1595         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
 1596         
 1597         if (bp->b_bufsize) {
 1598                 bufspacewakeup();
 1599                 bp->b_bufsize = 0;
 1600         }
 1601         bp->b_npages = 0;
 1602         bp->b_flags &= ~B_VMIO;
 1603         if (bp->b_vp)
 1604                 brelvp(bp);
 1605 }
 1606 
 1607 /*
 1608  * Check to see if a block at a particular lbn is available for a clustered
 1609  * write.
 1610  */
 1611 static int
 1612 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
 1613 {
 1614         struct buf *bpa;
 1615         int match;
 1616 
 1617         match = 0;
 1618 
 1619         /* If the buf isn't in core skip it */
 1620         if ((bpa = gbincore(vp, lblkno)) == NULL)
 1621                 return (0);
 1622 
 1623         /* If the buf is busy we don't want to wait for it */
 1624         if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 1625                 return (0);
 1626 
 1627         /* Only cluster with valid clusterable delayed write buffers */
 1628         if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
 1629             (B_DELWRI | B_CLUSTEROK))
 1630                 goto done;
 1631 
 1632         if (bpa->b_bufsize != size)
 1633                 goto done;
 1634 
 1635         /*
 1636          * Check to see if it is in the expected place on disk and that the
 1637          * block has been mapped.
 1638          */
 1639         if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
 1640                 match = 1;
 1641 done:
 1642         BUF_UNLOCK(bpa);
 1643         return (match);
 1644 }
 1645 
 1646 /*
 1647  *      vfs_bio_awrite:
 1648  *
 1649  *      Implement clustered async writes for clearing out B_DELWRI buffers.
 1650  *      This is much better then the old way of writing only one buffer at
 1651  *      a time.  Note that we may not be presented with the buffers in the 
 1652  *      correct order, so we search for the cluster in both directions.
 1653  */
 1654 int
 1655 vfs_bio_awrite(struct buf * bp)
 1656 {
 1657         int i;
 1658         int j;
 1659         daddr_t lblkno = bp->b_lblkno;
 1660         struct vnode *vp = bp->b_vp;
 1661         int s;
 1662         int ncl;
 1663         int nwritten;
 1664         int size;
 1665         int maxcl;
 1666 
 1667         s = splbio();
 1668         /*
 1669          * right now we support clustered writing only to regular files.  If
 1670          * we find a clusterable block we could be in the middle of a cluster
 1671          * rather then at the beginning.
 1672          */
 1673         if ((vp->v_type == VREG) && 
 1674             (vp->v_mount != 0) && /* Only on nodes that have the size info */
 1675             (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
 1676 
 1677                 size = vp->v_mount->mnt_stat.f_iosize;
 1678                 maxcl = MAXPHYS / size;
 1679 
 1680                 VI_LOCK(vp);
 1681                 for (i = 1; i < maxcl; i++)
 1682                         if (vfs_bio_clcheck(vp, size, lblkno + i,
 1683                             bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
 1684                                 break;
 1685 
 1686                 for (j = 1; i + j <= maxcl && j <= lblkno; j++) 
 1687                         if (vfs_bio_clcheck(vp, size, lblkno - j,
 1688                             bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
 1689                                 break;
 1690 
 1691                 VI_UNLOCK(vp);
 1692                 --j;
 1693                 ncl = i + j;
 1694                 /*
 1695                  * this is a possible cluster write
 1696                  */
 1697                 if (ncl != 1) {
 1698                         BUF_UNLOCK(bp);
 1699                         nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
 1700                         splx(s);
 1701                         return nwritten;
 1702                 }
 1703         }
 1704 
 1705         bremfree(bp);
 1706         bp->b_flags |= B_ASYNC;
 1707 
 1708         splx(s);
 1709         /*
 1710          * default (old) behavior, writing out only one block
 1711          *
 1712          * XXX returns b_bufsize instead of b_bcount for nwritten?
 1713          */
 1714         nwritten = bp->b_bufsize;
 1715         (void) BUF_WRITE(bp);
 1716 
 1717         return nwritten;
 1718 }
 1719 
 1720 /*
 1721  *      getnewbuf:
 1722  *
 1723  *      Find and initialize a new buffer header, freeing up existing buffers 
 1724  *      in the bufqueues as necessary.  The new buffer is returned locked.
 1725  *
 1726  *      Important:  B_INVAL is not set.  If the caller wishes to throw the
 1727  *      buffer away, the caller must set B_INVAL prior to calling brelse().
 1728  *
 1729  *      We block if:
 1730  *              We have insufficient buffer headers
 1731  *              We have insufficient buffer space
 1732  *              buffer_map is too fragmented ( space reservation fails )
 1733  *              If we have to flush dirty buffers ( but we try to avoid this )
 1734  *
 1735  *      To avoid VFS layer recursion we do not flush dirty buffers ourselves.
 1736  *      Instead we ask the buf daemon to do it for us.  We attempt to
 1737  *      avoid piecemeal wakeups of the pageout daemon.
 1738  */
 1739 
 1740 static struct buf *
 1741 getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
 1742 {
 1743         struct buf *bp;
 1744         struct buf *nbp;
 1745         int defrag = 0;
 1746         int nqindex;
 1747         static int flushingbufs;
 1748 
 1749         GIANT_REQUIRED;
 1750 
 1751         /*
 1752          * We can't afford to block since we might be holding a vnode lock,
 1753          * which may prevent system daemons from running.  We deal with
 1754          * low-memory situations by proactively returning memory and running
 1755          * async I/O rather then sync I/O.
 1756          */
 1757 
 1758         atomic_add_int(&getnewbufcalls, 1);
 1759         atomic_subtract_int(&getnewbufrestarts, 1);
 1760 restart:
 1761         atomic_add_int(&getnewbufrestarts, 1);
 1762 
 1763         /*
 1764          * Setup for scan.  If we do not have enough free buffers,
 1765          * we setup a degenerate case that immediately fails.  Note
 1766          * that if we are specially marked process, we are allowed to
 1767          * dip into our reserves.
 1768          *
 1769          * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
 1770          *
 1771          * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
 1772          * However, there are a number of cases (defragging, reusing, ...)
 1773          * where we cannot backup.
 1774          */
 1775         mtx_lock(&bqlock);
 1776         nqindex = QUEUE_EMPTYKVA;
 1777         nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
 1778 
 1779         if (nbp == NULL) {
 1780                 /*
 1781                  * If no EMPTYKVA buffers and we are either
 1782                  * defragging or reusing, locate a CLEAN buffer
 1783                  * to free or reuse.  If bufspace useage is low
 1784                  * skip this step so we can allocate a new buffer.
 1785                  */
 1786                 if (defrag || bufspace >= lobufspace) {
 1787                         nqindex = QUEUE_CLEAN;
 1788                         nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
 1789                 }
 1790 
 1791                 /*
 1792                  * If we could not find or were not allowed to reuse a
 1793                  * CLEAN buffer, check to see if it is ok to use an EMPTY
 1794                  * buffer.  We can only use an EMPTY buffer if allocating
 1795                  * its KVA would not otherwise run us out of buffer space.
 1796                  */
 1797                 if (nbp == NULL && defrag == 0 &&
 1798                     bufspace + maxsize < hibufspace) {
 1799                         nqindex = QUEUE_EMPTY;
 1800                         nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
 1801                 }
 1802         }
 1803 
 1804         /*
 1805          * Run scan, possibly freeing data and/or kva mappings on the fly
 1806          * depending.
 1807          */
 1808 
 1809         while ((bp = nbp) != NULL) {
 1810                 int qindex = nqindex;
 1811 
 1812                 /*
 1813                  * Calculate next bp ( we can only use it if we do not block
 1814                  * or do other fancy things ).
 1815                  */
 1816                 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
 1817                         switch(qindex) {
 1818                         case QUEUE_EMPTY:
 1819                                 nqindex = QUEUE_EMPTYKVA;
 1820                                 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
 1821                                         break;
 1822                                 /* FALLTHROUGH */
 1823                         case QUEUE_EMPTYKVA:
 1824                                 nqindex = QUEUE_CLEAN;
 1825                                 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
 1826                                         break;
 1827                                 /* FALLTHROUGH */
 1828                         case QUEUE_CLEAN:
 1829                                 /*
 1830                                  * nbp is NULL. 
 1831                                  */
 1832                                 break;
 1833                         }
 1834                 }
 1835                 if (bp->b_vp) {
 1836                         VI_LOCK(bp->b_vp);
 1837                         if (bp->b_vflags & BV_BKGRDINPROG) {
 1838                                 VI_UNLOCK(bp->b_vp);
 1839                                 continue;
 1840                         }
 1841                         VI_UNLOCK(bp->b_vp);
 1842                 }
 1843 
 1844                 /*
 1845                  * Sanity Checks
 1846                  */
 1847                 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
 1848 
 1849                 /*
 1850                  * Note: we no longer distinguish between VMIO and non-VMIO
 1851                  * buffers.
 1852                  */
 1853 
 1854                 KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
 1855 
 1856                 /*
 1857                  * If we are defragging then we need a buffer with 
 1858                  * b_kvasize != 0.  XXX this situation should no longer
 1859                  * occur, if defrag is non-zero the buffer's b_kvasize
 1860                  * should also be non-zero at this point.  XXX
 1861                  */
 1862                 if (defrag && bp->b_kvasize == 0) {
 1863                         printf("Warning: defrag empty buffer %p\n", bp);
 1864                         continue;
 1865                 }
 1866 
 1867                 /*
 1868                  * Start freeing the bp.  This is somewhat involved.  nbp
 1869                  * remains valid only for QUEUE_EMPTY[KVA] bp's.
 1870                  */
 1871 
 1872                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 1873                         panic("getnewbuf: locked buf");
 1874                 bremfreel(bp);
 1875                 mtx_unlock(&bqlock);
 1876 
 1877                 if (qindex == QUEUE_CLEAN) {
 1878                         if (bp->b_flags & B_VMIO) {
 1879                                 bp->b_flags &= ~B_ASYNC;
 1880                                 vfs_vmio_release(bp);
 1881                         }
 1882                         if (bp->b_vp)
 1883                                 brelvp(bp);
 1884                 }
 1885 
 1886                 /*
 1887                  * NOTE:  nbp is now entirely invalid.  We can only restart
 1888                  * the scan from this point on.
 1889                  *
 1890                  * Get the rest of the buffer freed up.  b_kva* is still
 1891                  * valid after this operation.
 1892                  */
 1893 
 1894                 if (bp->b_rcred != NOCRED) {
 1895                         crfree(bp->b_rcred);
 1896                         bp->b_rcred = NOCRED;
 1897                 }
 1898                 if (bp->b_wcred != NOCRED) {
 1899                         crfree(bp->b_wcred);
 1900                         bp->b_wcred = NOCRED;
 1901                 }
 1902                 if (LIST_FIRST(&bp->b_dep) != NULL)
 1903                         buf_deallocate(bp);
 1904                 if (bp->b_vflags & BV_BKGRDINPROG)
 1905                         panic("losing buffer 3");
 1906 
 1907                 if (bp->b_bufsize)
 1908                         allocbuf(bp, 0);
 1909 
 1910                 bp->b_flags = 0;
 1911                 bp->b_ioflags = 0;
 1912                 bp->b_xflags = 0;
 1913                 bp->b_vflags = 0;
 1914                 bp->b_dev = NODEV;
 1915                 bp->b_vp = NULL;
 1916                 bp->b_blkno = bp->b_lblkno = 0;
 1917                 bp->b_offset = NOOFFSET;
 1918                 bp->b_iodone = 0;
 1919                 bp->b_error = 0;
 1920                 bp->b_resid = 0;
 1921                 bp->b_bcount = 0;
 1922                 bp->b_npages = 0;
 1923                 bp->b_dirtyoff = bp->b_dirtyend = 0;
 1924                 bp->b_magic = B_MAGIC_BIO;
 1925                 bp->b_op = &buf_ops_bio;
 1926                 bp->b_object = NULL;
 1927 
 1928                 LIST_INIT(&bp->b_dep);
 1929 
 1930                 /*
 1931                  * If we are defragging then free the buffer.
 1932                  */
 1933                 if (defrag) {
 1934                         bp->b_flags |= B_INVAL;
 1935                         bfreekva(bp);
 1936                         brelse(bp);
 1937                         defrag = 0;
 1938                         goto restart;
 1939                 }
 1940 
 1941                 /*
 1942                  * If we are overcomitted then recover the buffer and its
 1943                  * KVM space.  This occurs in rare situations when multiple
 1944                  * processes are blocked in getnewbuf() or allocbuf().
 1945                  */
 1946                 if (bufspace >= hibufspace)
 1947                         flushingbufs = 1;
 1948                 if (flushingbufs && bp->b_kvasize != 0) {
 1949                         bp->b_flags |= B_INVAL;
 1950                         bfreekva(bp);
 1951                         brelse(bp);
 1952                         goto restart;
 1953                 }
 1954                 if (bufspace < lobufspace)
 1955                         flushingbufs = 0;
 1956                 break;
 1957         }
 1958 
 1959         /*
 1960          * If we exhausted our list, sleep as appropriate.  We may have to
 1961          * wakeup various daemons and write out some dirty buffers.
 1962          *
 1963          * Generally we are sleeping due to insufficient buffer space.
 1964          */
 1965 
 1966         if (bp == NULL) {
 1967                 int flags;
 1968                 char *waitmsg;
 1969 
 1970                 mtx_unlock(&bqlock);
 1971                 if (defrag) {
 1972                         flags = VFS_BIO_NEED_BUFSPACE;
 1973                         waitmsg = "nbufkv";
 1974                 } else if (bufspace >= hibufspace) {
 1975                         waitmsg = "nbufbs";
 1976                         flags = VFS_BIO_NEED_BUFSPACE;
 1977                 } else {
 1978                         waitmsg = "newbuf";
 1979                         flags = VFS_BIO_NEED_ANY;
 1980                 }
 1981 
 1982                 bd_speedup();   /* heeeelp */
 1983 
 1984                 mtx_lock(&nblock);
 1985                 needsbuffer |= flags;
 1986                 while (needsbuffer & flags) {
 1987                         if (msleep(&needsbuffer, &nblock,
 1988                             (PRIBIO + 4) | slpflag, waitmsg, slptimeo)) {
 1989                                 mtx_unlock(&nblock);
 1990                                 return (NULL);
 1991                         }
 1992                 }
 1993                 mtx_unlock(&nblock);
 1994         } else {
 1995                 /*
 1996                  * We finally have a valid bp.  We aren't quite out of the
 1997                  * woods, we still have to reserve kva space.  In order
 1998                  * to keep fragmentation sane we only allocate kva in
 1999                  * BKVASIZE chunks.
 2000                  */
 2001                 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
 2002 
 2003                 if (maxsize != bp->b_kvasize) {
 2004                         vm_offset_t addr = 0;
 2005 
 2006                         bfreekva(bp);
 2007 
 2008                         if (vm_map_findspace(buffer_map,
 2009                                 vm_map_min(buffer_map), maxsize, &addr)) {
 2010                                 /*
 2011                                  * Uh oh.  Buffer map is to fragmented.  We
 2012                                  * must defragment the map.
 2013                                  */
 2014                                 atomic_add_int(&bufdefragcnt, 1);
 2015                                 defrag = 1;
 2016                                 bp->b_flags |= B_INVAL;
 2017                                 brelse(bp);
 2018                                 goto restart;
 2019                         }
 2020                         if (addr) {
 2021                                 vm_map_insert(buffer_map, NULL, 0,
 2022                                         addr, addr + maxsize,
 2023                                         VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
 2024 
 2025                                 bp->b_kvabase = (caddr_t) addr;
 2026                                 bp->b_kvasize = maxsize;
 2027                                 atomic_add_int(&bufspace, bp->b_kvasize);
 2028                                 atomic_add_int(&bufreusecnt, 1);
 2029                         }
 2030                 }
 2031                 bp->b_saveaddr = bp->b_kvabase;
 2032                 bp->b_data = bp->b_saveaddr;
 2033         }
 2034         return(bp);
 2035 }
 2036 
 2037 /*
 2038  *      buf_daemon:
 2039  *
 2040  *      buffer flushing daemon.  Buffers are normally flushed by the
 2041  *      update daemon but if it cannot keep up this process starts to
 2042  *      take the load in an attempt to prevent getnewbuf() from blocking.
 2043  */
 2044 
 2045 static struct kproc_desc buf_kp = {
 2046         "bufdaemon",
 2047         buf_daemon,
 2048         &bufdaemonproc
 2049 };
 2050 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp)
 2051 
 2052 static void
 2053 buf_daemon()
 2054 {
 2055         int s;
 2056 
 2057         mtx_lock(&Giant);
 2058 
 2059         /*
 2060          * This process needs to be suspended prior to shutdown sync.
 2061          */
 2062         EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
 2063             SHUTDOWN_PRI_LAST);
 2064 
 2065         /*
 2066          * This process is allowed to take the buffer cache to the limit
 2067          */
 2068         s = splbio();
 2069         mtx_lock(&bdlock);
 2070 
 2071         for (;;) {
 2072                 bd_request = 0;
 2073                 mtx_unlock(&bdlock);
 2074 
 2075                 kthread_suspend_check(bufdaemonproc);
 2076 
 2077                 /*
 2078                  * Do the flush.  Limit the amount of in-transit I/O we
 2079                  * allow to build up, otherwise we would completely saturate
 2080                  * the I/O system.  Wakeup any waiting processes before we
 2081                  * normally would so they can run in parallel with our drain.
 2082                  */
 2083                 while (numdirtybuffers > lodirtybuffers) {
 2084                         if (flushbufqueues(0) == 0) {
 2085                                 /*
 2086                                  * Could not find any buffers without rollback
 2087                                  * dependencies, so just write the first one
 2088                                  * in the hopes of eventually making progress.
 2089                                  */
 2090                                 flushbufqueues(1);
 2091                                 break;
 2092                         }
 2093                         waitrunningbufspace();
 2094                         numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
 2095                 }
 2096 
 2097                 /*
 2098                  * Only clear bd_request if we have reached our low water
 2099                  * mark.  The buf_daemon normally waits 1 second and
 2100                  * then incrementally flushes any dirty buffers that have
 2101                  * built up, within reason.
 2102                  *
 2103                  * If we were unable to hit our low water mark and couldn't
 2104                  * find any flushable buffers, we sleep half a second.
 2105                  * Otherwise we loop immediately.
 2106                  */
 2107                 mtx_lock(&bdlock);
 2108                 if (numdirtybuffers <= lodirtybuffers) {
 2109                         /*
 2110                          * We reached our low water mark, reset the
 2111                          * request and sleep until we are needed again.
 2112                          * The sleep is just so the suspend code works.
 2113                          */
 2114                         bd_request = 0;
 2115                         msleep(&bd_request, &bdlock, PVM, "psleep", hz);
 2116                 } else {
 2117                         /*
 2118                          * We couldn't find any flushable dirty buffers but
 2119                          * still have too many dirty buffers, we
 2120                          * have to sleep and try again.  (rare)
 2121                          */
 2122                         msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
 2123                 }
 2124         }
 2125 }
 2126 
 2127 /*
 2128  *      flushbufqueues:
 2129  *
 2130  *      Try to flush a buffer in the dirty queue.  We must be careful to
 2131  *      free up B_INVAL buffers instead of write them, which NFS is 
 2132  *      particularly sensitive to.
 2133  */
 2134 int flushwithdeps = 0;
 2135 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
 2136     0, "Number of buffers flushed with dependecies that require rollbacks");
 2137 static int
 2138 flushbufqueues(int flushdeps)
 2139 {
 2140         struct thread *td = curthread;
 2141         struct vnode *vp;
 2142         struct mount *mp;
 2143         struct buf *bp;
 2144         int hasdeps;
 2145 
 2146         mtx_lock(&bqlock);
 2147         TAILQ_FOREACH(bp, &bufqueues[QUEUE_DIRTY], b_freelist) {
 2148                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 2149                         continue;
 2150                 KASSERT((bp->b_flags & B_DELWRI),
 2151                     ("unexpected clean buffer %p", bp));
 2152                 VI_LOCK(bp->b_vp);
 2153                 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
 2154                         VI_UNLOCK(bp->b_vp);
 2155                         BUF_UNLOCK(bp);
 2156                         continue;
 2157                 }
 2158                 VI_UNLOCK(bp->b_vp);
 2159                 if (bp->b_flags & B_INVAL) {
 2160                         bremfreel(bp);
 2161                         mtx_unlock(&bqlock);
 2162                         brelse(bp);
 2163                         return (1);
 2164                 }
 2165 
 2166                 if (LIST_FIRST(&bp->b_dep) != NULL && buf_countdeps(bp, 0)) {
 2167                         if (flushdeps == 0) {
 2168                                 BUF_UNLOCK(bp);
 2169                                 continue;
 2170                         }
 2171                         hasdeps = 1;
 2172                 } else
 2173                         hasdeps = 0;
 2174                 /*
 2175                  * We must hold the lock on a vnode before writing
 2176                  * one of its buffers. Otherwise we may confuse, or
 2177                  * in the case of a snapshot vnode, deadlock the
 2178                  * system.
 2179                  *
 2180                  * The lock order here is the reverse of the normal
 2181                  * of vnode followed by buf lock.  This is ok because
 2182                  * the NOWAIT will prevent deadlock.
 2183                  */
 2184                 vp = bp->b_vp;
 2185                 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
 2186                         BUF_UNLOCK(bp);
 2187                         continue;
 2188                 }
 2189                 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) == 0) {
 2190                         mtx_unlock(&bqlock);
 2191                         vfs_bio_awrite(bp);
 2192                         vn_finished_write(mp);
 2193                         VOP_UNLOCK(vp, 0, td);
 2194                         flushwithdeps += hasdeps;
 2195                         return (1);
 2196                 }
 2197                 vn_finished_write(mp);
 2198                 BUF_UNLOCK(bp);
 2199         }
 2200         mtx_unlock(&bqlock);
 2201         return (0);
 2202 }
 2203 
 2204 /*
 2205  * Check to see if a block is currently memory resident.
 2206  */
 2207 struct buf *
 2208 incore(struct vnode * vp, daddr_t blkno)
 2209 {
 2210         struct buf *bp;
 2211 
 2212         int s = splbio();
 2213         VI_LOCK(vp);
 2214         bp = gbincore(vp, blkno);
 2215         VI_UNLOCK(vp);
 2216         splx(s);
 2217         return (bp);
 2218 }
 2219 
 2220 /*
 2221  * Returns true if no I/O is needed to access the
 2222  * associated VM object.  This is like incore except
 2223  * it also hunts around in the VM system for the data.
 2224  */
 2225 
 2226 int
 2227 inmem(struct vnode * vp, daddr_t blkno)
 2228 {
 2229         vm_object_t obj;
 2230         vm_offset_t toff, tinc, size;
 2231         vm_page_t m;
 2232         vm_ooffset_t off;
 2233 
 2234         GIANT_REQUIRED;
 2235         ASSERT_VOP_LOCKED(vp, "inmem");
 2236 
 2237         if (incore(vp, blkno))
 2238                 return 1;
 2239         if (vp->v_mount == NULL)
 2240                 return 0;
 2241         if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_vflag & VV_OBJBUF) == 0)
 2242                 return 0;
 2243 
 2244         size = PAGE_SIZE;
 2245         if (size > vp->v_mount->mnt_stat.f_iosize)
 2246                 size = vp->v_mount->mnt_stat.f_iosize;
 2247         off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
 2248 
 2249         VM_OBJECT_LOCK(obj);
 2250         for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
 2251                 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
 2252                 if (!m)
 2253                         goto notinmem;
 2254                 tinc = size;
 2255                 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
 2256                         tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
 2257                 if (vm_page_is_valid(m,
 2258                     (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
 2259                         goto notinmem;
 2260         }
 2261         VM_OBJECT_UNLOCK(obj);
 2262         return 1;
 2263 
 2264 notinmem:
 2265         VM_OBJECT_UNLOCK(obj);
 2266         return (0);
 2267 }
 2268 
 2269 /*
 2270  *      vfs_setdirty:
 2271  *
 2272  *      Sets the dirty range for a buffer based on the status of the dirty
 2273  *      bits in the pages comprising the buffer.
 2274  *
 2275  *      The range is limited to the size of the buffer.
 2276  *
 2277  *      This routine is primarily used by NFS, but is generalized for the
 2278  *      B_VMIO case.
 2279  */
 2280 static void
 2281 vfs_setdirty(struct buf *bp) 
 2282 {
 2283         int i;
 2284         vm_object_t object;
 2285 
 2286         GIANT_REQUIRED;
 2287         /*
 2288          * Degenerate case - empty buffer
 2289          */
 2290 
 2291         if (bp->b_bufsize == 0)
 2292                 return;
 2293 
 2294         /*
 2295          * We qualify the scan for modified pages on whether the
 2296          * object has been flushed yet.  The OBJ_WRITEABLE flag
 2297          * is not cleared simply by protecting pages off.
 2298          */
 2299 
 2300         if ((bp->b_flags & B_VMIO) == 0)
 2301                 return;
 2302 
 2303         object = bp->b_pages[0]->object;
 2304         VM_OBJECT_LOCK(object);
 2305         if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
 2306                 printf("Warning: object %p writeable but not mightbedirty\n", object);
 2307         if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
 2308                 printf("Warning: object %p mightbedirty but not writeable\n", object);
 2309 
 2310         if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
 2311                 vm_offset_t boffset;
 2312                 vm_offset_t eoffset;
 2313 
 2314                 vm_page_lock_queues();
 2315                 /*
 2316                  * test the pages to see if they have been modified directly
 2317                  * by users through the VM system.
 2318                  */
 2319                 for (i = 0; i < bp->b_npages; i++) {
 2320                         vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
 2321                         vm_page_test_dirty(bp->b_pages[i]);
 2322                 }
 2323 
 2324                 /*
 2325                  * Calculate the encompassing dirty range, boffset and eoffset,
 2326                  * (eoffset - boffset) bytes.
 2327                  */
 2328 
 2329                 for (i = 0; i < bp->b_npages; i++) {
 2330                         if (bp->b_pages[i]->dirty)
 2331                                 break;
 2332                 }
 2333                 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2334 
 2335                 for (i = bp->b_npages - 1; i >= 0; --i) {
 2336                         if (bp->b_pages[i]->dirty) {
 2337                                 break;
 2338                         }
 2339                 }
 2340                 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2341 
 2342                 vm_page_unlock_queues();
 2343                 /*
 2344                  * Fit it to the buffer.
 2345                  */
 2346 
 2347                 if (eoffset > bp->b_bcount)
 2348                         eoffset = bp->b_bcount;
 2349 
 2350                 /*
 2351                  * If we have a good dirty range, merge with the existing
 2352                  * dirty range.
 2353                  */
 2354 
 2355                 if (boffset < eoffset) {
 2356                         if (bp->b_dirtyoff > boffset)
 2357                                 bp->b_dirtyoff = boffset;
 2358                         if (bp->b_dirtyend < eoffset)
 2359                                 bp->b_dirtyend = eoffset;
 2360                 }
 2361         }
 2362         VM_OBJECT_UNLOCK(object);
 2363 }
 2364 
 2365 /*
 2366  *      getblk:
 2367  *
 2368  *      Get a block given a specified block and offset into a file/device.
 2369  *      The buffers B_DONE bit will be cleared on return, making it almost
 2370  *      ready for an I/O initiation.  B_INVAL may or may not be set on 
 2371  *      return.  The caller should clear B_INVAL prior to initiating a
 2372  *      READ.
 2373  *
 2374  *      For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
 2375  *      an existing buffer.
 2376  *
 2377  *      For a VMIO buffer, B_CACHE is modified according to the backing VM.
 2378  *      If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
 2379  *      and then cleared based on the backing VM.  If the previous buffer is
 2380  *      non-0-sized but invalid, B_CACHE will be cleared.
 2381  *
 2382  *      If getblk() must create a new buffer, the new buffer is returned with
 2383  *      both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
 2384  *      case it is returned with B_INVAL clear and B_CACHE set based on the
 2385  *      backing VM.
 2386  *
 2387  *      getblk() also forces a BUF_WRITE() for any B_DELWRI buffer whos
 2388  *      B_CACHE bit is clear.
 2389  *      
 2390  *      What this means, basically, is that the caller should use B_CACHE to
 2391  *      determine whether the buffer is fully valid or not and should clear
 2392  *      B_INVAL prior to issuing a read.  If the caller intends to validate
 2393  *      the buffer by loading its data area with something, the caller needs
 2394  *      to clear B_INVAL.  If the caller does this without issuing an I/O, 
 2395  *      the caller should set B_CACHE ( as an optimization ), else the caller
 2396  *      should issue the I/O and biodone() will set B_CACHE if the I/O was
 2397  *      a write attempt or if it was a successfull read.  If the caller 
 2398  *      intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
 2399  *      prior to issuing the READ.  biodone() will *not* clear B_INVAL.
 2400  */
 2401 struct buf *
 2402 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
 2403     int flags)
 2404 {
 2405         struct buf *bp;
 2406         int s;
 2407         int error;
 2408         ASSERT_VOP_LOCKED(vp, "getblk");
 2409 
 2410         if (size > MAXBSIZE)
 2411                 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
 2412 
 2413         s = splbio();
 2414 loop:
 2415         /*
 2416          * Block if we are low on buffers.   Certain processes are allowed
 2417          * to completely exhaust the buffer cache.
 2418          *
 2419          * If this check ever becomes a bottleneck it may be better to
 2420          * move it into the else, when gbincore() fails.  At the moment
 2421          * it isn't a problem.
 2422          *
 2423          * XXX remove if 0 sections (clean this up after its proven)
 2424          */
 2425         if (numfreebuffers == 0) {
 2426                 if (curthread == PCPU_GET(idlethread))
 2427                         return NULL;
 2428                 mtx_lock(&nblock);
 2429                 needsbuffer |= VFS_BIO_NEED_ANY;
 2430                 mtx_unlock(&nblock);
 2431         }
 2432 
 2433         VI_LOCK(vp);
 2434         if ((bp = gbincore(vp, blkno))) {
 2435                 int lockflags;
 2436                 /*
 2437                  * Buffer is in-core.  If the buffer is not busy, it must
 2438                  * be on a queue.
 2439                  */
 2440                 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
 2441 
 2442                 if (flags & GB_LOCK_NOWAIT)
 2443                         lockflags |= LK_NOWAIT;
 2444 
 2445                 error = BUF_TIMELOCK(bp, lockflags,
 2446                     VI_MTX(vp), "getblk", slpflag, slptimeo);
 2447 
 2448                 /*
 2449                  * If we slept and got the lock we have to restart in case
 2450                  * the buffer changed identities.
 2451                  */
 2452                 if (error == ENOLCK)
 2453                         goto loop;
 2454                 /* We timed out or were interrupted. */
 2455                 else if (error)
 2456                         return (NULL);
 2457 
 2458                 /*
 2459                  * The buffer is locked.  B_CACHE is cleared if the buffer is 
 2460                  * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
 2461                  * and for a VMIO buffer B_CACHE is adjusted according to the
 2462                  * backing VM cache.
 2463                  */
 2464                 if (bp->b_flags & B_INVAL)
 2465                         bp->b_flags &= ~B_CACHE;
 2466                 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
 2467                         bp->b_flags |= B_CACHE;
 2468                 bremfree(bp);
 2469 
 2470                 /*
 2471                  * check for size inconsistancies for non-VMIO case.
 2472                  */
 2473 
 2474                 if (bp->b_bcount != size) {
 2475                         if ((bp->b_flags & B_VMIO) == 0 ||
 2476                             (size > bp->b_kvasize)) {
 2477                                 if (bp->b_flags & B_DELWRI) {
 2478                                         bp->b_flags |= B_NOCACHE;
 2479                                         BUF_WRITE(bp);
 2480                                 } else {
 2481                                         if ((bp->b_flags & B_VMIO) &&
 2482                                            (LIST_FIRST(&bp->b_dep) == NULL)) {
 2483                                                 bp->b_flags |= B_RELBUF;
 2484                                                 brelse(bp);
 2485                                         } else {
 2486                                                 bp->b_flags |= B_NOCACHE;
 2487                                                 BUF_WRITE(bp);
 2488                                         }
 2489                                 }
 2490                                 goto loop;
 2491                         }
 2492                 }
 2493 
 2494                 /*
 2495                  * If the size is inconsistant in the VMIO case, we can resize
 2496                  * the buffer.  This might lead to B_CACHE getting set or
 2497                  * cleared.  If the size has not changed, B_CACHE remains
 2498                  * unchanged from its previous state.
 2499                  */
 2500 
 2501                 if (bp->b_bcount != size)
 2502                         allocbuf(bp, size);
 2503 
 2504                 KASSERT(bp->b_offset != NOOFFSET, 
 2505                     ("getblk: no buffer offset"));
 2506 
 2507                 /*
 2508                  * A buffer with B_DELWRI set and B_CACHE clear must
 2509                  * be committed before we can return the buffer in
 2510                  * order to prevent the caller from issuing a read
 2511                  * ( due to B_CACHE not being set ) and overwriting
 2512                  * it.
 2513                  *
 2514                  * Most callers, including NFS and FFS, need this to
 2515                  * operate properly either because they assume they
 2516                  * can issue a read if B_CACHE is not set, or because
 2517                  * ( for example ) an uncached B_DELWRI might loop due 
 2518                  * to softupdates re-dirtying the buffer.  In the latter
 2519                  * case, B_CACHE is set after the first write completes,
 2520                  * preventing further loops.
 2521                  * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
 2522                  * above while extending the buffer, we cannot allow the
 2523                  * buffer to remain with B_CACHE set after the write
 2524                  * completes or it will represent a corrupt state.  To
 2525                  * deal with this we set B_NOCACHE to scrap the buffer
 2526                  * after the write.
 2527                  *
 2528                  * We might be able to do something fancy, like setting
 2529                  * B_CACHE in bwrite() except if B_DELWRI is already set,
 2530                  * so the below call doesn't set B_CACHE, but that gets real
 2531                  * confusing.  This is much easier.
 2532                  */
 2533 
 2534                 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
 2535                         bp->b_flags |= B_NOCACHE;
 2536                         BUF_WRITE(bp);
 2537                         goto loop;
 2538                 }
 2539 
 2540                 splx(s);
 2541                 bp->b_flags &= ~B_DONE;
 2542         } else {
 2543                 int bsize, maxsize, vmio;
 2544                 off_t offset;
 2545 
 2546                 /*
 2547                  * Buffer is not in-core, create new buffer.  The buffer
 2548                  * returned by getnewbuf() is locked.  Note that the returned
 2549                  * buffer is also considered valid (not marked B_INVAL).
 2550                  */
 2551                 VI_UNLOCK(vp);
 2552                 /*
 2553                  * If the user does not want us to create the buffer, bail out
 2554                  * here.
 2555                  */
 2556                 if (flags & GB_NOCREAT) {
 2557                         splx(s);
 2558                         return NULL;
 2559                 }
 2560                 if (vn_isdisk(vp, NULL))
 2561                         bsize = DEV_BSIZE;
 2562                 else if (vp->v_mountedhere)
 2563                         bsize = vp->v_mountedhere->mnt_stat.f_iosize;
 2564                 else if (vp->v_mount)
 2565                         bsize = vp->v_mount->mnt_stat.f_iosize;
 2566                 else
 2567                         bsize = size;
 2568 
 2569                 offset = blkno * bsize;
 2570                 vmio = (VOP_GETVOBJECT(vp, NULL) == 0) &&
 2571                     (vp->v_vflag & VV_OBJBUF);
 2572                 maxsize = vmio ? size + (offset & PAGE_MASK) : size;
 2573                 maxsize = imax(maxsize, bsize);
 2574 
 2575                 if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == NULL) {
 2576                         if (slpflag || slptimeo) {
 2577                                 splx(s);
 2578                                 return NULL;
 2579                         }
 2580                         goto loop;
 2581                 }
 2582 
 2583                 /*
 2584                  * This code is used to make sure that a buffer is not
 2585                  * created while the getnewbuf routine is blocked.
 2586                  * This can be a problem whether the vnode is locked or not.
 2587                  * If the buffer is created out from under us, we have to
 2588                  * throw away the one we just created.  There is now window
 2589                  * race because we are safely running at splbio() from the
 2590                  * point of the duplicate buffer creation through to here,
 2591                  * and we've locked the buffer.
 2592                  *
 2593                  * Note: this must occur before we associate the buffer
 2594                  * with the vp especially considering limitations in
 2595                  * the splay tree implementation when dealing with duplicate
 2596                  * lblkno's.
 2597                  */
 2598                 VI_LOCK(vp);
 2599                 if (gbincore(vp, blkno)) {
 2600                         VI_UNLOCK(vp);
 2601                         bp->b_flags |= B_INVAL;
 2602                         brelse(bp);
 2603                         goto loop;
 2604                 }
 2605 
 2606                 /*
 2607                  * Insert the buffer into the hash, so that it can
 2608                  * be found by incore.
 2609                  */
 2610                 bp->b_blkno = bp->b_lblkno = blkno;
 2611                 bp->b_offset = offset;
 2612 
 2613                 bgetvp(vp, bp);
 2614                 VI_UNLOCK(vp);
 2615 
 2616                 /*
 2617                  * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
 2618                  * buffer size starts out as 0, B_CACHE will be set by
 2619                  * allocbuf() for the VMIO case prior to it testing the
 2620                  * backing store for validity.
 2621                  */
 2622 
 2623                 if (vmio) {
 2624                         bp->b_flags |= B_VMIO;
 2625 #if defined(VFS_BIO_DEBUG)
 2626                         if (vp->v_type != VREG)
 2627                                 printf("getblk: vmioing file type %d???\n", vp->v_type);
 2628 #endif
 2629                         VOP_GETVOBJECT(vp, &bp->b_object);
 2630                 } else {
 2631                         bp->b_flags &= ~B_VMIO;
 2632                         bp->b_object = NULL;
 2633                 }
 2634 
 2635                 allocbuf(bp, size);
 2636 
 2637                 splx(s);
 2638                 bp->b_flags &= ~B_DONE;
 2639         }
 2640         KASSERT(BUF_REFCNT(bp) == 1, ("getblk: bp %p not locked",bp));
 2641         return (bp);
 2642 }
 2643 
 2644 /*
 2645  * Get an empty, disassociated buffer of given size.  The buffer is initially
 2646  * set to B_INVAL.
 2647  */
 2648 struct buf *
 2649 geteblk(int size)
 2650 {
 2651         struct buf *bp;
 2652         int s;
 2653         int maxsize;
 2654 
 2655         maxsize = (size + BKVAMASK) & ~BKVAMASK;
 2656 
 2657         s = splbio();
 2658         while ((bp = getnewbuf(0, 0, size, maxsize)) == 0)
 2659                 continue;
 2660         splx(s);
 2661         allocbuf(bp, size);
 2662         bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
 2663         KASSERT(BUF_REFCNT(bp) == 1, ("geteblk: bp %p not locked",bp));
 2664         return (bp);
 2665 }
 2666 
 2667 
 2668 /*
 2669  * This code constitutes the buffer memory from either anonymous system
 2670  * memory (in the case of non-VMIO operations) or from an associated
 2671  * VM object (in the case of VMIO operations).  This code is able to
 2672  * resize a buffer up or down.
 2673  *
 2674  * Note that this code is tricky, and has many complications to resolve
 2675  * deadlock or inconsistant data situations.  Tread lightly!!! 
 2676  * There are B_CACHE and B_DELWRI interactions that must be dealt with by 
 2677  * the caller.  Calling this code willy nilly can result in the loss of data.
 2678  *
 2679  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
 2680  * B_CACHE for the non-VMIO case.
 2681  */
 2682 
 2683 int
 2684 allocbuf(struct buf *bp, int size)
 2685 {
 2686         int newbsize, mbsize;
 2687         int i;
 2688 
 2689         GIANT_REQUIRED;
 2690 
 2691         if (BUF_REFCNT(bp) == 0)
 2692                 panic("allocbuf: buffer not busy");
 2693 
 2694         if (bp->b_kvasize < size)
 2695                 panic("allocbuf: buffer too small");
 2696 
 2697         if ((bp->b_flags & B_VMIO) == 0) {
 2698                 caddr_t origbuf;
 2699                 int origbufsize;
 2700                 /*
 2701                  * Just get anonymous memory from the kernel.  Don't
 2702                  * mess with B_CACHE.
 2703                  */
 2704                 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 2705                 if (bp->b_flags & B_MALLOC)
 2706                         newbsize = mbsize;
 2707                 else
 2708                         newbsize = round_page(size);
 2709 
 2710                 if (newbsize < bp->b_bufsize) {
 2711                         /*
 2712                          * malloced buffers are not shrunk
 2713                          */
 2714                         if (bp->b_flags & B_MALLOC) {
 2715                                 if (newbsize) {
 2716                                         bp->b_bcount = size;
 2717                                 } else {
 2718                                         free(bp->b_data, M_BIOBUF);
 2719                                         if (bp->b_bufsize) {
 2720                                                 atomic_subtract_int(
 2721                                                     &bufmallocspace,
 2722                                                     bp->b_bufsize);
 2723                                                 bufspacewakeup();
 2724                                                 bp->b_bufsize = 0;
 2725                                         }
 2726                                         bp->b_saveaddr = bp->b_kvabase;
 2727                                         bp->b_data = bp->b_saveaddr;
 2728                                         bp->b_bcount = 0;
 2729                                         bp->b_flags &= ~B_MALLOC;
 2730                                 }
 2731                                 return 1;
 2732                         }               
 2733                         vm_hold_free_pages(
 2734                             bp,
 2735                             (vm_offset_t) bp->b_data + newbsize,
 2736                             (vm_offset_t) bp->b_data + bp->b_bufsize);
 2737                 } else if (newbsize > bp->b_bufsize) {
 2738                         /*
 2739                          * We only use malloced memory on the first allocation.
 2740                          * and revert to page-allocated memory when the buffer
 2741                          * grows.
 2742                          */
 2743                         /*
 2744                          * There is a potential smp race here that could lead
 2745                          * to bufmallocspace slightly passing the max.  It
 2746                          * is probably extremely rare and not worth worrying
 2747                          * over.
 2748                          */
 2749                         if ( (bufmallocspace < maxbufmallocspace) &&
 2750                                 (bp->b_bufsize == 0) &&
 2751                                 (mbsize <= PAGE_SIZE/2)) {
 2752 
 2753                                 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
 2754                                 bp->b_bufsize = mbsize;
 2755                                 bp->b_bcount = size;
 2756                                 bp->b_flags |= B_MALLOC;
 2757                                 atomic_add_int(&bufmallocspace, mbsize);
 2758                                 return 1;
 2759                         }
 2760                         origbuf = NULL;
 2761                         origbufsize = 0;
 2762                         /*
 2763                          * If the buffer is growing on its other-than-first allocation,
 2764                          * then we revert to the page-allocation scheme.
 2765                          */
 2766                         if (bp->b_flags & B_MALLOC) {
 2767                                 origbuf = bp->b_data;
 2768                                 origbufsize = bp->b_bufsize;
 2769                                 bp->b_data = bp->b_kvabase;
 2770                                 if (bp->b_bufsize) {
 2771                                         atomic_subtract_int(&bufmallocspace,
 2772                                             bp->b_bufsize);
 2773                                         bufspacewakeup();
 2774                                         bp->b_bufsize = 0;
 2775                                 }
 2776                                 bp->b_flags &= ~B_MALLOC;
 2777                                 newbsize = round_page(newbsize);
 2778                         }
 2779                         vm_hold_load_pages(
 2780                             bp,
 2781                             (vm_offset_t) bp->b_data + bp->b_bufsize,
 2782                             (vm_offset_t) bp->b_data + newbsize);
 2783                         if (origbuf) {
 2784                                 bcopy(origbuf, bp->b_data, origbufsize);
 2785                                 free(origbuf, M_BIOBUF);
 2786                         }
 2787                 }
 2788         } else {
 2789                 int desiredpages;
 2790 
 2791                 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 2792                 desiredpages = (size == 0) ? 0 :
 2793                         num_pages((bp->b_offset & PAGE_MASK) + newbsize);
 2794 
 2795                 if (bp->b_flags & B_MALLOC)
 2796                         panic("allocbuf: VMIO buffer can't be malloced");
 2797                 /*
 2798                  * Set B_CACHE initially if buffer is 0 length or will become
 2799                  * 0-length.
 2800                  */
 2801                 if (size == 0 || bp->b_bufsize == 0)
 2802                         bp->b_flags |= B_CACHE;
 2803 
 2804                 if (newbsize < bp->b_bufsize) {
 2805                         /*
 2806                          * DEV_BSIZE aligned new buffer size is less then the
 2807                          * DEV_BSIZE aligned existing buffer size.  Figure out
 2808                          * if we have to remove any pages.
 2809                          */
 2810                         if (desiredpages < bp->b_npages) {
 2811                                 vm_page_t m;
 2812 
 2813                                 vm_page_lock_queues();
 2814                                 for (i = desiredpages; i < bp->b_npages; i++) {
 2815                                         /*
 2816                                          * the page is not freed here -- it
 2817                                          * is the responsibility of 
 2818                                          * vnode_pager_setsize
 2819                                          */
 2820                                         m = bp->b_pages[i];
 2821                                         KASSERT(m != bogus_page,
 2822                                             ("allocbuf: bogus page found"));
 2823                                         while (vm_page_sleep_if_busy(m, TRUE, "biodep"))
 2824                                                 vm_page_lock_queues();
 2825 
 2826                                         bp->b_pages[i] = NULL;
 2827                                         vm_page_unwire(m, 0);
 2828                                 }
 2829                                 vm_page_unlock_queues();
 2830                                 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
 2831                                     (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
 2832                                 bp->b_npages = desiredpages;
 2833                         }
 2834                 } else if (size > bp->b_bcount) {
 2835                         /*
 2836                          * We are growing the buffer, possibly in a 
 2837                          * byte-granular fashion.
 2838                          */
 2839                         struct vnode *vp;
 2840                         vm_object_t obj;
 2841                         vm_offset_t toff;
 2842                         vm_offset_t tinc;
 2843 
 2844                         /*
 2845                          * Step 1, bring in the VM pages from the object, 
 2846                          * allocating them if necessary.  We must clear
 2847                          * B_CACHE if these pages are not valid for the 
 2848                          * range covered by the buffer.
 2849                          */
 2850 
 2851                         vp = bp->b_vp;
 2852                         obj = bp->b_object;
 2853 
 2854                         VM_OBJECT_LOCK(obj);
 2855                         while (bp->b_npages < desiredpages) {
 2856                                 vm_page_t m;
 2857                                 vm_pindex_t pi;
 2858 
 2859                                 pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
 2860                                 if ((m = vm_page_lookup(obj, pi)) == NULL) {
 2861                                         /*
 2862                                          * note: must allocate system pages
 2863                                          * since blocking here could intefere
 2864                                          * with paging I/O, no matter which
 2865                                          * process we are.
 2866                                          */
 2867                                         m = vm_page_alloc(obj, pi,
 2868                                             VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
 2869                                         if (m == NULL) {
 2870                                                 atomic_add_int(&vm_pageout_deficit,
 2871                                                     desiredpages - bp->b_npages);
 2872                                                 VM_OBJECT_UNLOCK(obj);
 2873                                                 VM_WAIT;
 2874                                                 VM_OBJECT_LOCK(obj);
 2875                                         } else {
 2876                                                 vm_page_lock_queues();
 2877                                                 vm_page_wakeup(m);
 2878                                                 vm_page_unlock_queues();
 2879                                                 bp->b_flags &= ~B_CACHE;
 2880                                                 bp->b_pages[bp->b_npages] = m;
 2881                                                 ++bp->b_npages;
 2882                                         }
 2883                                         continue;
 2884                                 }
 2885 
 2886                                 /*
 2887                                  * We found a page.  If we have to sleep on it,
 2888                                  * retry because it might have gotten freed out
 2889                                  * from under us.
 2890                                  *
 2891                                  * We can only test PG_BUSY here.  Blocking on
 2892                                  * m->busy might lead to a deadlock:
 2893                                  *
 2894                                  *  vm_fault->getpages->cluster_read->allocbuf
 2895                                  *
 2896                                  */
 2897                                 vm_page_lock_queues();
 2898                                 if (vm_page_sleep_if_busy(m, FALSE, "pgtblk"))
 2899                                         continue;
 2900 
 2901                                 /*
 2902                                  * We have a good page.  Should we wakeup the
 2903                                  * page daemon?
 2904                                  */
 2905                                 if ((curproc != pageproc) &&
 2906                                     ((m->queue - m->pc) == PQ_CACHE) &&
 2907                                     ((cnt.v_free_count + cnt.v_cache_count) <
 2908                                         (cnt.v_free_min + cnt.v_cache_min))) {
 2909                                         pagedaemon_wakeup();
 2910                                 }
 2911                                 vm_page_flag_clear(m, PG_ZERO);
 2912                                 vm_page_wire(m);
 2913                                 vm_page_unlock_queues();
 2914                                 bp->b_pages[bp->b_npages] = m;
 2915                                 ++bp->b_npages;
 2916                         }
 2917 
 2918                         /*
 2919                          * Step 2.  We've loaded the pages into the buffer,
 2920                          * we have to figure out if we can still have B_CACHE
 2921                          * set.  Note that B_CACHE is set according to the
 2922                          * byte-granular range ( bcount and size ), new the
 2923                          * aligned range ( newbsize ).
 2924                          *
 2925                          * The VM test is against m->valid, which is DEV_BSIZE
 2926                          * aligned.  Needless to say, the validity of the data
 2927                          * needs to also be DEV_BSIZE aligned.  Note that this
 2928                          * fails with NFS if the server or some other client
 2929                          * extends the file's EOF.  If our buffer is resized, 
 2930                          * B_CACHE may remain set! XXX
 2931                          */
 2932 
 2933                         toff = bp->b_bcount;
 2934                         tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
 2935 
 2936                         while ((bp->b_flags & B_CACHE) && toff < size) {
 2937                                 vm_pindex_t pi;
 2938 
 2939                                 if (tinc > (size - toff))
 2940                                         tinc = size - toff;
 2941 
 2942                                 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 
 2943                                     PAGE_SHIFT;
 2944 
 2945                                 vfs_buf_test_cache(
 2946                                     bp, 
 2947                                     bp->b_offset,
 2948                                     toff, 
 2949                                     tinc, 
 2950                                     bp->b_pages[pi]
 2951                                 );
 2952                                 toff += tinc;
 2953                                 tinc = PAGE_SIZE;
 2954                         }
 2955                         VM_OBJECT_UNLOCK(obj);
 2956 
 2957                         /*
 2958                          * Step 3, fixup the KVM pmap.  Remember that
 2959                          * bp->b_data is relative to bp->b_offset, but 
 2960                          * bp->b_offset may be offset into the first page.
 2961                          */
 2962 
 2963                         bp->b_data = (caddr_t)
 2964                             trunc_page((vm_offset_t)bp->b_data);
 2965                         pmap_qenter(
 2966                             (vm_offset_t)bp->b_data,
 2967                             bp->b_pages, 
 2968                             bp->b_npages
 2969                         );
 2970                         
 2971                         bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 
 2972                             (vm_offset_t)(bp->b_offset & PAGE_MASK));
 2973                 }
 2974         }
 2975         if (newbsize < bp->b_bufsize)
 2976                 bufspacewakeup();
 2977         bp->b_bufsize = newbsize;       /* actual buffer allocation     */
 2978         bp->b_bcount = size;            /* requested buffer size        */
 2979         return 1;
 2980 }
 2981 
 2982 void
 2983 biodone(struct bio *bp)
 2984 {
 2985         mtx_lock(&bdonelock);
 2986         bp->bio_flags |= BIO_DONE;
 2987         if (bp->bio_done == NULL)
 2988                 wakeup(bp);
 2989         mtx_unlock(&bdonelock);
 2990         if (bp->bio_done != NULL)
 2991                 bp->bio_done(bp);
 2992 }
 2993 
 2994 /*
 2995  * Wait for a BIO to finish.
 2996  *
 2997  * XXX: resort to a timeout for now.  The optimal locking (if any) for this
 2998  * case is not yet clear.
 2999  */
 3000 int
 3001 biowait(struct bio *bp, const char *wchan)
 3002 {
 3003 
 3004         mtx_lock(&bdonelock);
 3005         while ((bp->bio_flags & BIO_DONE) == 0)
 3006                 msleep(bp, &bdonelock, PRIBIO, wchan, hz / 10);
 3007         mtx_unlock(&bdonelock);
 3008         if (bp->bio_error != 0)
 3009                 return (bp->bio_error);
 3010         if (!(bp->bio_flags & BIO_ERROR))
 3011                 return (0);
 3012         return (EIO);
 3013 }
 3014 
 3015 void
 3016 biofinish(struct bio *bp, struct devstat *stat, int error)
 3017 {
 3018         
 3019         if (error) {
 3020                 bp->bio_error = error;
 3021                 bp->bio_flags |= BIO_ERROR;
 3022         }
 3023         if (stat != NULL)
 3024                 devstat_end_transaction_bio(stat, bp);
 3025         biodone(bp);
 3026 }
 3027 
 3028 /*
 3029  *      bufwait:
 3030  *
 3031  *      Wait for buffer I/O completion, returning error status.  The buffer
 3032  *      is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
 3033  *      error and cleared.
 3034  */
 3035 int
 3036 bufwait(register struct buf * bp)
 3037 {
 3038         int s;
 3039 
 3040         s = splbio();
 3041         if (bp->b_iocmd == BIO_READ)
 3042                 bwait(bp, PRIBIO, "biord");
 3043         else
 3044                 bwait(bp, PRIBIO, "biowr");
 3045         splx(s);
 3046         if (bp->b_flags & B_EINTR) {
 3047                 bp->b_flags &= ~B_EINTR;
 3048                 return (EINTR);
 3049         }
 3050         if (bp->b_ioflags & BIO_ERROR) {
 3051                 return (bp->b_error ? bp->b_error : EIO);
 3052         } else {
 3053                 return (0);
 3054         }
 3055 }
 3056 
 3057  /*
 3058   * Call back function from struct bio back up to struct buf.
 3059   * The corresponding initialization lives in sys/conf.h:DEV_STRATEGY().
 3060   */
 3061 static void
 3062 bufdonebio(struct bio *bp)
 3063 {
 3064 
 3065         /* Device drivers may or may not hold giant, hold it here. */
 3066         mtx_lock(&Giant);
 3067         bufdone(bp->bio_caller2);
 3068         mtx_unlock(&Giant);
 3069 }
 3070 
 3071 void
 3072 dev_strategy(struct buf *bp)
 3073 {
 3074 
 3075         if ((!bp->b_iocmd) || (bp->b_iocmd & (bp->b_iocmd - 1)))
 3076                 panic("b_iocmd botch");
 3077         bp->b_io.bio_done = bufdonebio;
 3078         bp->b_io.bio_caller2 = bp;
 3079         (*devsw(bp->b_io.bio_dev)->d_strategy)(&bp->b_io);
 3080 }
 3081 
 3082 /*
 3083  *      bufdone:
 3084  *
 3085  *      Finish I/O on a buffer, optionally calling a completion function.
 3086  *      This is usually called from an interrupt so process blocking is
 3087  *      not allowed.
 3088  *
 3089  *      biodone is also responsible for setting B_CACHE in a B_VMIO bp.
 3090  *      In a non-VMIO bp, B_CACHE will be set on the next getblk() 
 3091  *      assuming B_INVAL is clear.
 3092  *
 3093  *      For the VMIO case, we set B_CACHE if the op was a read and no
 3094  *      read error occured, or if the op was a write.  B_CACHE is never
 3095  *      set if the buffer is invalid or otherwise uncacheable.
 3096  *
 3097  *      biodone does not mess with B_INVAL, allowing the I/O routine or the
 3098  *      initiator to leave B_INVAL set to brelse the buffer out of existance
 3099  *      in the biodone routine.
 3100  */
 3101 void
 3102 bufdone(struct buf *bp)
 3103 {
 3104         int s;
 3105         void    (*biodone)(struct buf *);
 3106 
 3107         GIANT_REQUIRED;
 3108 
 3109         s = splbio();
 3110 
 3111         KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNT(bp)));
 3112         KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
 3113 
 3114         bp->b_flags |= B_DONE;
 3115         runningbufwakeup(bp);
 3116 
 3117         if (bp->b_iocmd == BIO_DELETE) {
 3118                 brelse(bp);
 3119                 splx(s);
 3120                 return;
 3121         }
 3122 
 3123         if (bp->b_iocmd == BIO_WRITE) {
 3124                 vwakeup(bp);
 3125         }
 3126 
 3127         /* call optional completion function if requested */
 3128         if (bp->b_iodone != NULL) {
 3129                 biodone = bp->b_iodone;
 3130                 bp->b_iodone = NULL;
 3131                 (*biodone) (bp);
 3132                 splx(s);
 3133                 return;
 3134         }
 3135         if (LIST_FIRST(&bp->b_dep) != NULL)
 3136                 buf_complete(bp);
 3137 
 3138         if (bp->b_flags & B_VMIO) {
 3139                 int i;
 3140                 vm_ooffset_t foff;
 3141                 vm_page_t m;
 3142                 vm_object_t obj;
 3143                 int iosize;
 3144                 struct vnode *vp = bp->b_vp;
 3145 
 3146                 obj = bp->b_object;
 3147 
 3148 #if defined(VFS_BIO_DEBUG)
 3149                 mp_fixme("usecount and vflag accessed without locks.");
 3150                 if (vp->v_usecount == 0) {
 3151                         panic("biodone: zero vnode ref count");
 3152                 }
 3153 
 3154                 if ((vp->v_vflag & VV_OBJBUF) == 0) {
 3155                         panic("biodone: vnode is not setup for merged cache");
 3156                 }
 3157 #endif
 3158 
 3159                 foff = bp->b_offset;
 3160                 KASSERT(bp->b_offset != NOOFFSET,
 3161                     ("biodone: no buffer offset"));
 3162 
 3163                 VM_OBJECT_LOCK(obj);
 3164 #if defined(VFS_BIO_DEBUG)
 3165                 if (obj->paging_in_progress < bp->b_npages) {
 3166                         printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
 3167                             obj->paging_in_progress, bp->b_npages);
 3168                 }
 3169 #endif
 3170 
 3171                 /*
 3172                  * Set B_CACHE if the op was a normal read and no error
 3173                  * occured.  B_CACHE is set for writes in the b*write()
 3174                  * routines.
 3175                  */
 3176                 iosize = bp->b_bcount - bp->b_resid;
 3177                 if (bp->b_iocmd == BIO_READ &&
 3178                     !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
 3179                     !(bp->b_ioflags & BIO_ERROR)) {
 3180                         bp->b_flags |= B_CACHE;
 3181                 }
 3182                 vm_page_lock_queues();
 3183                 for (i = 0; i < bp->b_npages; i++) {
 3184                         int bogusflag = 0;
 3185                         int resid;
 3186 
 3187                         resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
 3188                         if (resid > iosize)
 3189                                 resid = iosize;
 3190 
 3191                         /*
 3192                          * cleanup bogus pages, restoring the originals
 3193                          */
 3194                         m = bp->b_pages[i];
 3195                         if (m == bogus_page) {
 3196                                 bogusflag = 1;
 3197                                 m = vm_page_lookup(obj, OFF_TO_IDX(foff));
 3198                                 if (m == NULL)
 3199                                         panic("biodone: page disappeared!");
 3200                                 bp->b_pages[i] = m;
 3201                                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 3202                         }
 3203 #if defined(VFS_BIO_DEBUG)
 3204                         if (OFF_TO_IDX(foff) != m->pindex) {
 3205                                 printf(
 3206 "biodone: foff(%jd)/m->pindex(%ju) mismatch\n",
 3207                                     (intmax_t)foff, (uintmax_t)m->pindex);
 3208                         }
 3209 #endif
 3210 
 3211                         /*
 3212                          * In the write case, the valid and clean bits are
 3213                          * already changed correctly ( see bdwrite() ), so we 
 3214                          * only need to do this here in the read case.
 3215                          */
 3216                         if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
 3217                                 vfs_page_set_valid(bp, foff, i, m);
 3218                         }
 3219                         vm_page_flag_clear(m, PG_ZERO);
 3220 
 3221                         /*
 3222                          * when debugging new filesystems or buffer I/O methods, this
 3223                          * is the most common error that pops up.  if you see this, you
 3224                          * have not set the page busy flag correctly!!!
 3225                          */
 3226                         if (m->busy == 0) {
 3227                                 printf("biodone: page busy < 0, "
 3228                                     "pindex: %d, foff: 0x(%x,%x), "
 3229                                     "resid: %d, index: %d\n",
 3230                                     (int) m->pindex, (int)(foff >> 32),
 3231                                                 (int) foff & 0xffffffff, resid, i);
 3232                                 if (!vn_isdisk(vp, NULL))
 3233                                         printf(" iosize: %jd, lblkno: %jd, flags: 0x%x, npages: %d\n",
 3234                                             (intmax_t)bp->b_vp->v_mount->mnt_stat.f_iosize,
 3235                                             (intmax_t) bp->b_lblkno,
 3236                                             bp->b_flags, bp->b_npages);
 3237                                 else
 3238                                         printf(" VDEV, lblkno: %jd, flags: 0x%x, npages: %d\n",
 3239                                             (intmax_t) bp->b_lblkno,
 3240                                             bp->b_flags, bp->b_npages);
 3241                                 printf(" valid: 0x%lx, dirty: 0x%lx, wired: %d\n",
 3242                                     (u_long)m->valid, (u_long)m->dirty,
 3243                                     m->wire_count);
 3244                                 panic("biodone: page busy < 0\n");
 3245                         }
 3246                         vm_page_io_finish(m);
 3247                         vm_object_pip_subtract(obj, 1);
 3248                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3249                         iosize -= resid;
 3250                 }
 3251                 vm_page_unlock_queues();
 3252                 vm_object_pip_wakeupn(obj, 0);
 3253                 VM_OBJECT_UNLOCK(obj);
 3254         }
 3255 
 3256         /*
 3257          * For asynchronous completions, release the buffer now. The brelse
 3258          * will do a wakeup there if necessary - so no need to do a wakeup
 3259          * here in the async case. The sync case always needs to do a wakeup.
 3260          */
 3261 
 3262         if (bp->b_flags & B_ASYNC) {
 3263                 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
 3264                         brelse(bp);
 3265                 else
 3266                         bqrelse(bp);
 3267         } else {
 3268                 bdone(bp);
 3269         }
 3270         splx(s);
 3271 }
 3272 
 3273 /*
 3274  * This routine is called in lieu of iodone in the case of
 3275  * incomplete I/O.  This keeps the busy status for pages
 3276  * consistant.
 3277  */
 3278 void
 3279 vfs_unbusy_pages(struct buf * bp)
 3280 {
 3281         int i;
 3282 
 3283         runningbufwakeup(bp);
 3284         if (bp->b_flags & B_VMIO) {
 3285                 vm_object_t obj;
 3286 
 3287                 obj = bp->b_object;
 3288                 VM_OBJECT_LOCK(obj);
 3289                 vm_page_lock_queues();
 3290                 for (i = 0; i < bp->b_npages; i++) {
 3291                         vm_page_t m = bp->b_pages[i];
 3292 
 3293                         if (m == bogus_page) {
 3294                                 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
 3295                                 if (!m) {
 3296                                         panic("vfs_unbusy_pages: page missing\n");
 3297                                 }
 3298                                 bp->b_pages[i] = m;
 3299                                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 3300                         }
 3301                         vm_object_pip_subtract(obj, 1);
 3302                         vm_page_flag_clear(m, PG_ZERO);
 3303                         vm_page_io_finish(m);
 3304                 }
 3305                 vm_page_unlock_queues();
 3306                 vm_object_pip_wakeupn(obj, 0);
 3307                 VM_OBJECT_UNLOCK(obj);
 3308         }
 3309 }
 3310 
 3311 /*
 3312  * vfs_page_set_valid:
 3313  *
 3314  *      Set the valid bits in a page based on the supplied offset.   The
 3315  *      range is restricted to the buffer's size.
 3316  *
 3317  *      This routine is typically called after a read completes.
 3318  */
 3319 static void
 3320 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
 3321 {
 3322         vm_ooffset_t soff, eoff;
 3323 
 3324         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3325         /*
 3326          * Start and end offsets in buffer.  eoff - soff may not cross a
 3327          * page boundry or cross the end of the buffer.  The end of the
 3328          * buffer, in this case, is our file EOF, not the allocation size
 3329          * of the buffer.
 3330          */
 3331         soff = off;
 3332         eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3333         if (eoff > bp->b_offset + bp->b_bcount)
 3334                 eoff = bp->b_offset + bp->b_bcount;
 3335 
 3336         /*
 3337          * Set valid range.  This is typically the entire buffer and thus the
 3338          * entire page.
 3339          */
 3340         if (eoff > soff) {
 3341                 vm_page_set_validclean(
 3342                     m,
 3343                    (vm_offset_t) (soff & PAGE_MASK),
 3344                    (vm_offset_t) (eoff - soff)
 3345                 );
 3346         }
 3347 }
 3348 
 3349 /*
 3350  * This routine is called before a device strategy routine.
 3351  * It is used to tell the VM system that paging I/O is in
 3352  * progress, and treat the pages associated with the buffer
 3353  * almost as being PG_BUSY.  Also the object paging_in_progress
 3354  * flag is handled to make sure that the object doesn't become
 3355  * inconsistant.
 3356  *
 3357  * Since I/O has not been initiated yet, certain buffer flags
 3358  * such as BIO_ERROR or B_INVAL may be in an inconsistant state
 3359  * and should be ignored.
 3360  */
 3361 void
 3362 vfs_busy_pages(struct buf * bp, int clear_modify)
 3363 {
 3364         int i, bogus;
 3365 
 3366         if (bp->b_flags & B_VMIO) {
 3367                 vm_object_t obj;
 3368                 vm_ooffset_t foff;
 3369 
 3370                 obj = bp->b_object;
 3371                 foff = bp->b_offset;
 3372                 KASSERT(bp->b_offset != NOOFFSET,
 3373                     ("vfs_busy_pages: no buffer offset"));
 3374                 vfs_setdirty(bp);
 3375                 VM_OBJECT_LOCK(obj);
 3376 retry:
 3377                 vm_page_lock_queues();
 3378                 for (i = 0; i < bp->b_npages; i++) {
 3379                         vm_page_t m = bp->b_pages[i];
 3380 
 3381                         if (vm_page_sleep_if_busy(m, FALSE, "vbpage"))
 3382                                 goto retry;
 3383                 }
 3384                 bogus = 0;
 3385                 for (i = 0; i < bp->b_npages; i++) {
 3386                         vm_page_t m = bp->b_pages[i];
 3387 
 3388                         vm_page_flag_clear(m, PG_ZERO);
 3389                         if ((bp->b_flags & B_CLUSTER) == 0) {
 3390                                 vm_object_pip_add(obj, 1);
 3391                                 vm_page_io_start(m);
 3392                         }
 3393                         /*
 3394                          * When readying a buffer for a read ( i.e
 3395                          * clear_modify == 0 ), it is important to do
 3396                          * bogus_page replacement for valid pages in 
 3397                          * partially instantiated buffers.  Partially 
 3398                          * instantiated buffers can, in turn, occur when
 3399                          * reconstituting a buffer from its VM backing store
 3400                          * base.  We only have to do this if B_CACHE is
 3401                          * clear ( which causes the I/O to occur in the
 3402                          * first place ).  The replacement prevents the read
 3403                          * I/O from overwriting potentially dirty VM-backed
 3404                          * pages.  XXX bogus page replacement is, uh, bogus.
 3405                          * It may not work properly with small-block devices.
 3406                          * We need to find a better way.
 3407                          */
 3408                         pmap_remove_all(m);
 3409                         if (clear_modify)
 3410                                 vfs_page_set_valid(bp, foff, i, m);
 3411                         else if (m->valid == VM_PAGE_BITS_ALL &&
 3412                                 (bp->b_flags & B_CACHE) == 0) {
 3413                                 bp->b_pages[i] = bogus_page;
 3414                                 bogus++;
 3415                         }
 3416                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3417                 }
 3418                 vm_page_unlock_queues();
 3419                 VM_OBJECT_UNLOCK(obj);
 3420                 if (bogus)
 3421                         pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
 3422         }
 3423 }
 3424 
 3425 /*
 3426  * Tell the VM system that the pages associated with this buffer
 3427  * are clean.  This is used for delayed writes where the data is
 3428  * going to go to disk eventually without additional VM intevention.
 3429  *
 3430  * Note that while we only really need to clean through to b_bcount, we
 3431  * just go ahead and clean through to b_bufsize.
 3432  */
 3433 static void
 3434 vfs_clean_pages(struct buf * bp)
 3435 {
 3436         int i;
 3437 
 3438         if (bp->b_flags & B_VMIO) {
 3439                 vm_ooffset_t foff;
 3440 
 3441                 foff = bp->b_offset;
 3442                 KASSERT(bp->b_offset != NOOFFSET,
 3443                     ("vfs_clean_pages: no buffer offset"));
 3444                 VM_OBJECT_LOCK(bp->b_object);
 3445                 vm_page_lock_queues();
 3446                 for (i = 0; i < bp->b_npages; i++) {
 3447                         vm_page_t m = bp->b_pages[i];
 3448                         vm_ooffset_t noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3449                         vm_ooffset_t eoff = noff;
 3450 
 3451                         if (eoff > bp->b_offset + bp->b_bufsize)
 3452                                 eoff = bp->b_offset + bp->b_bufsize;
 3453                         vfs_page_set_valid(bp, foff, i, m);
 3454                         /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
 3455                         foff = noff;
 3456                 }
 3457                 vm_page_unlock_queues();
 3458                 VM_OBJECT_UNLOCK(bp->b_object);
 3459         }
 3460 }
 3461 
 3462 /*
 3463  *      vfs_bio_set_validclean:
 3464  *
 3465  *      Set the range within the buffer to valid and clean.  The range is 
 3466  *      relative to the beginning of the buffer, b_offset.  Note that b_offset
 3467  *      itself may be offset from the beginning of the first page.
 3468  *
 3469  */
 3470 
 3471 void   
 3472 vfs_bio_set_validclean(struct buf *bp, int base, int size)
 3473 {
 3474         if (bp->b_flags & B_VMIO) {
 3475                 int i;
 3476                 int n;
 3477 
 3478                 /*
 3479                  * Fixup base to be relative to beginning of first page.
 3480                  * Set initial n to be the maximum number of bytes in the
 3481                  * first page that can be validated.
 3482                  */
 3483 
 3484                 base += (bp->b_offset & PAGE_MASK);
 3485                 n = PAGE_SIZE - (base & PAGE_MASK);
 3486 
 3487                 VM_OBJECT_LOCK(bp->b_object);
 3488                 vm_page_lock_queues();
 3489                 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
 3490                         vm_page_t m = bp->b_pages[i];
 3491 
 3492                         if (n > size)
 3493                                 n = size;
 3494 
 3495                         vm_page_set_validclean(m, base & PAGE_MASK, n);
 3496                         base += n;
 3497                         size -= n;
 3498                         n = PAGE_SIZE;
 3499                 }
 3500                 vm_page_unlock_queues();
 3501                 VM_OBJECT_UNLOCK(bp->b_object);
 3502         }
 3503 }
 3504 
 3505 /*
 3506  *      vfs_bio_clrbuf:
 3507  *
 3508  *      clear a buffer.  This routine essentially fakes an I/O, so we need
 3509  *      to clear BIO_ERROR and B_INVAL.
 3510  *
 3511  *      Note that while we only theoretically need to clear through b_bcount,
 3512  *      we go ahead and clear through b_bufsize.
 3513  */
 3514 
 3515 void
 3516 vfs_bio_clrbuf(struct buf *bp) 
 3517 {
 3518         int i, mask = 0;
 3519         caddr_t sa, ea;
 3520 
 3521         GIANT_REQUIRED;
 3522 
 3523         if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
 3524                 bp->b_flags &= ~B_INVAL;
 3525                 bp->b_ioflags &= ~BIO_ERROR;
 3526                 VM_OBJECT_LOCK(bp->b_object);
 3527                 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
 3528                     (bp->b_offset & PAGE_MASK) == 0) {
 3529                         mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
 3530                         if (bp->b_pages[0] != bogus_page)
 3531                                 VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
 3532                         if ((bp->b_pages[0]->valid & mask) == mask)
 3533                                 goto unlock;
 3534                         if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
 3535                             ((bp->b_pages[0]->valid & mask) == 0)) {
 3536                                 bzero(bp->b_data, bp->b_bufsize);
 3537                                 bp->b_pages[0]->valid |= mask;
 3538                                 goto unlock;
 3539                         }
 3540                 }
 3541                 ea = sa = bp->b_data;
 3542                 for(i=0;i<bp->b_npages;i++,sa=ea) {
 3543                         int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
 3544                         ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
 3545                         ea = (caddr_t)(vm_offset_t)ulmin(
 3546                             (u_long)(vm_offset_t)ea,
 3547                             (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
 3548                         mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
 3549                         if (bp->b_pages[i] != bogus_page)
 3550                                 VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
 3551                         if ((bp->b_pages[i]->valid & mask) == mask)
 3552                                 continue;
 3553                         if ((bp->b_pages[i]->valid & mask) == 0) {
 3554                                 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
 3555                                         bzero(sa, ea - sa);
 3556                                 }
 3557                         } else {
 3558                                 for (; sa < ea; sa += DEV_BSIZE, j++) {
 3559                                         if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
 3560                                                 (bp->b_pages[i]->valid & (1<<j)) == 0)
 3561                                                 bzero(sa, DEV_BSIZE);
 3562                                 }
 3563                         }
 3564                         bp->b_pages[i]->valid |= mask;
 3565                         vm_page_lock_queues();
 3566                         vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
 3567                         vm_page_unlock_queues();
 3568                 }
 3569 unlock:
 3570                 VM_OBJECT_UNLOCK(bp->b_object);
 3571                 bp->b_resid = 0;
 3572         } else {
 3573                 clrbuf(bp);
 3574         }
 3575 }
 3576 
 3577 /*
 3578  * vm_hold_load_pages and vm_hold_free_pages get pages into
 3579  * a buffers address space.  The pages are anonymous and are
 3580  * not associated with a file object.
 3581  */
 3582 static void
 3583 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
 3584 {
 3585         vm_offset_t pg;
 3586         vm_page_t p;
 3587         int index;
 3588 
 3589         to = round_page(to);
 3590         from = round_page(from);
 3591         index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 3592 
 3593         VM_OBJECT_LOCK(kernel_object);
 3594         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 3595 tryagain:
 3596                 /*
 3597                  * note: must allocate system pages since blocking here
 3598                  * could intefere with paging I/O, no matter which
 3599                  * process we are.
 3600                  */
 3601                 p = vm_page_alloc(kernel_object,
 3602                         ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
 3603                     VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
 3604                 if (!p) {
 3605                         atomic_add_int(&vm_pageout_deficit,
 3606                             (to - pg) >> PAGE_SHIFT);
 3607                         VM_OBJECT_UNLOCK(kernel_object);
 3608                         VM_WAIT;
 3609                         VM_OBJECT_LOCK(kernel_object);
 3610                         goto tryagain;
 3611                 }
 3612                 p->valid = VM_PAGE_BITS_ALL;
 3613                 pmap_qenter(pg, &p, 1);
 3614                 bp->b_pages[index] = p;
 3615                 vm_page_lock_queues();
 3616                 vm_page_wakeup(p);
 3617                 vm_page_unlock_queues();
 3618         }
 3619         VM_OBJECT_UNLOCK(kernel_object);
 3620         bp->b_npages = index;
 3621 }
 3622 
 3623 /* Return pages associated with this buf to the vm system */
 3624 static void
 3625 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
 3626 {
 3627         vm_offset_t pg;
 3628         vm_page_t p;
 3629         int index, newnpages;
 3630 
 3631         GIANT_REQUIRED;
 3632 
 3633         from = round_page(from);
 3634         to = round_page(to);
 3635         newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 3636 
 3637         VM_OBJECT_LOCK(kernel_object);
 3638         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 3639                 p = bp->b_pages[index];
 3640                 if (p && (index < bp->b_npages)) {
 3641                         if (p->busy) {
 3642                                 printf(
 3643                             "vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
 3644                                     (intmax_t)bp->b_blkno,
 3645                                     (intmax_t)bp->b_lblkno);
 3646                         }
 3647                         bp->b_pages[index] = NULL;
 3648                         pmap_qremove(pg, 1);
 3649                         vm_page_lock_queues();
 3650                         vm_page_busy(p);
 3651                         vm_page_unwire(p, 0);
 3652                         vm_page_free(p);
 3653                         vm_page_unlock_queues();
 3654                 }
 3655         }
 3656         VM_OBJECT_UNLOCK(kernel_object);
 3657         bp->b_npages = newnpages;
 3658 }
 3659 
 3660 /*
 3661  * Map an IO request into kernel virtual address space.
 3662  *
 3663  * All requests are (re)mapped into kernel VA space.
 3664  * Notice that we use b_bufsize for the size of the buffer
 3665  * to be mapped.  b_bcount might be modified by the driver.
 3666  *
 3667  * Note that even if the caller determines that the address space should
 3668  * be valid, a race or a smaller-file mapped into a larger space may
 3669  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
 3670  * check the return value.
 3671  */
 3672 int
 3673 vmapbuf(struct buf *bp)
 3674 {
 3675         caddr_t addr, kva;
 3676         vm_prot_t prot;
 3677         int pidx, i;
 3678         struct vm_page *m;
 3679         struct pmap *pmap = &curproc->p_vmspace->vm_pmap;
 3680 
 3681         GIANT_REQUIRED;
 3682 
 3683         if (bp->b_bufsize < 0)
 3684                 return (-1);
 3685         prot = (bp->b_iocmd == BIO_READ) ? VM_PROT_READ | VM_PROT_WRITE :
 3686             VM_PROT_READ;
 3687         for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
 3688              addr < bp->b_data + bp->b_bufsize;
 3689              addr += PAGE_SIZE, pidx++) {
 3690                 /*
 3691                  * Do the vm_fault if needed; do the copy-on-write thing
 3692                  * when reading stuff off device into memory.
 3693                  *
 3694                  * NOTE! Must use pmap_extract() because addr may be in
 3695                  * the userland address space, and kextract is only guarenteed
 3696                  * to work for the kernland address space (see: sparc64 port).
 3697                  */
 3698 retry:
 3699                 if (vm_fault_quick(addr >= bp->b_data ? addr : bp->b_data,
 3700                     prot) < 0) {
 3701                         vm_page_lock_queues();
 3702                         for (i = 0; i < pidx; ++i) {
 3703                                 vm_page_unhold(bp->b_pages[i]);
 3704                                 bp->b_pages[i] = NULL;
 3705                         }
 3706                         vm_page_unlock_queues();
 3707                         return(-1);
 3708                 }
 3709                 m = pmap_extract_and_hold(pmap, (vm_offset_t)addr, prot);
 3710                 if (m == NULL)
 3711                         goto retry;
 3712                 bp->b_pages[pidx] = m;
 3713         }
 3714         if (pidx > btoc(MAXPHYS))
 3715                 panic("vmapbuf: mapped more than MAXPHYS");
 3716         pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
 3717         
 3718         kva = bp->b_saveaddr;
 3719         bp->b_npages = pidx;
 3720         bp->b_saveaddr = bp->b_data;
 3721         bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
 3722         return(0);
 3723 }
 3724 
 3725 /*
 3726  * Free the io map PTEs associated with this IO operation.
 3727  * We also invalidate the TLB entries and restore the original b_addr.
 3728  */
 3729 void
 3730 vunmapbuf(struct buf *bp)
 3731 {
 3732         int pidx;
 3733         int npages;
 3734 
 3735         GIANT_REQUIRED;
 3736 
 3737         npages = bp->b_npages;
 3738         pmap_qremove(trunc_page((vm_offset_t)bp->b_data),
 3739                      npages);
 3740         vm_page_lock_queues();
 3741         for (pidx = 0; pidx < npages; pidx++)
 3742                 vm_page_unhold(bp->b_pages[pidx]);
 3743         vm_page_unlock_queues();
 3744 
 3745         bp->b_data = bp->b_saveaddr;
 3746 }
 3747 
 3748 void
 3749 bdone(struct buf *bp)
 3750 {
 3751         mtx_lock(&bdonelock);
 3752         bp->b_flags |= B_DONE;
 3753         wakeup(bp);
 3754         mtx_unlock(&bdonelock);
 3755 }
 3756 
 3757 void
 3758 bwait(struct buf *bp, u_char pri, const char *wchan)
 3759 {
 3760         mtx_lock(&bdonelock);
 3761         while ((bp->b_flags & B_DONE) == 0)
 3762                 msleep(bp, &bdonelock, pri, wchan, 0);
 3763         mtx_unlock(&bdonelock);
 3764 }
 3765 
 3766 #include "opt_ddb.h"
 3767 #ifdef DDB
 3768 #include <ddb/ddb.h>
 3769 
 3770 /* DDB command to show buffer data */
 3771 DB_SHOW_COMMAND(buffer, db_show_buffer)
 3772 {
 3773         /* get args */
 3774         struct buf *bp = (struct buf *)addr;
 3775 
 3776         if (!have_addr) {
 3777                 db_printf("usage: show buffer <addr>\n");
 3778                 return;
 3779         }
 3780 
 3781         db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
 3782         db_printf(
 3783             "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
 3784             "b_dev = (%d,%d), b_data = %p, b_blkno = %jd\n",
 3785             bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
 3786             major(bp->b_dev), minor(bp->b_dev), bp->b_data,
 3787             (intmax_t)bp->b_blkno);
 3788         if (bp->b_npages) {
 3789                 int i;
 3790                 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
 3791                 for (i = 0; i < bp->b_npages; i++) {
 3792                         vm_page_t m;
 3793                         m = bp->b_pages[i];
 3794                         db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
 3795                             (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
 3796                         if ((i + 1) < bp->b_npages)
 3797                                 db_printf(",");
 3798                 }
 3799                 db_printf("\n");
 3800         }
 3801 }
 3802 #endif /* DDB */

Cache object: 3e2f55b8d08a0048c5d8cc2deb333d69


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.