The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_bio.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2004 Poul-Henning Kamp
    3  * Copyright (c) 1994,1997 John S. Dyson
    4  * Copyright (c) 2013 The FreeBSD Foundation
    5  * All rights reserved.
    6  *
    7  * Portions of this software were developed by Konstantin Belousov
    8  * under sponsorship from the FreeBSD Foundation.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  */
   31 
   32 /*
   33  * this file contains a new buffer I/O scheme implementing a coherent
   34  * VM object and buffer cache scheme.  Pains have been taken to make
   35  * sure that the performance degradation associated with schemes such
   36  * as this is not realized.
   37  *
   38  * Author:  John S. Dyson
   39  * Significant help during the development and debugging phases
   40  * had been provided by David Greenman, also of the FreeBSD core team.
   41  *
   42  * see man buf(9) for more info.
   43  */
   44 
   45 #include <sys/cdefs.h>
   46 __FBSDID("$FreeBSD: releng/10.3/sys/kern/vfs_bio.c 286397 2015-08-07 04:33:06Z kib $");
   47 
   48 #include <sys/param.h>
   49 #include <sys/systm.h>
   50 #include <sys/bio.h>
   51 #include <sys/conf.h>
   52 #include <sys/buf.h>
   53 #include <sys/devicestat.h>
   54 #include <sys/eventhandler.h>
   55 #include <sys/fail.h>
   56 #include <sys/limits.h>
   57 #include <sys/lock.h>
   58 #include <sys/malloc.h>
   59 #include <sys/mount.h>
   60 #include <sys/mutex.h>
   61 #include <sys/kernel.h>
   62 #include <sys/kthread.h>
   63 #include <sys/proc.h>
   64 #include <sys/resourcevar.h>
   65 #include <sys/rwlock.h>
   66 #include <sys/sysctl.h>
   67 #include <sys/vmem.h>
   68 #include <sys/vmmeter.h>
   69 #include <sys/vnode.h>
   70 #include <geom/geom.h>
   71 #include <vm/vm.h>
   72 #include <vm/vm_param.h>
   73 #include <vm/vm_kern.h>
   74 #include <vm/vm_pageout.h>
   75 #include <vm/vm_page.h>
   76 #include <vm/vm_object.h>
   77 #include <vm/vm_extern.h>
   78 #include <vm/vm_map.h>
   79 #include "opt_compat.h"
   80 #include "opt_swap.h"
   81 
   82 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
   83 
   84 struct  bio_ops bioops;         /* I/O operation notification */
   85 
   86 struct  buf_ops buf_ops_bio = {
   87         .bop_name       =       "buf_ops_bio",
   88         .bop_write      =       bufwrite,
   89         .bop_strategy   =       bufstrategy,
   90         .bop_sync       =       bufsync,
   91         .bop_bdflush    =       bufbdflush,
   92 };
   93 
   94 /*
   95  * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
   96  * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
   97  */
   98 struct buf *buf;                /* buffer header pool */
   99 caddr_t unmapped_buf;
  100 
  101 /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
  102 struct proc *bufdaemonproc;
  103 
  104 static int inmem(struct vnode *vp, daddr_t blkno);
  105 static void vm_hold_free_pages(struct buf *bp, int newbsize);
  106 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
  107                 vm_offset_t to);
  108 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
  109 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
  110                 vm_page_t m);
  111 static void vfs_clean_pages_dirty_buf(struct buf *bp);
  112 static void vfs_setdirty_locked_object(struct buf *bp);
  113 static void vfs_vmio_release(struct buf *bp);
  114 static int vfs_bio_clcheck(struct vnode *vp, int size,
  115                 daddr_t lblkno, daddr_t blkno);
  116 static int buf_flush(struct vnode *vp, int);
  117 static int flushbufqueues(struct vnode *, int, int);
  118 static void buf_daemon(void);
  119 static void bremfreel(struct buf *bp);
  120 static __inline void bd_wakeup(void);
  121 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
  122     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
  123 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
  124 #endif
  125 
  126 int vmiodirenable = TRUE;
  127 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
  128     "Use the VM system for directory writes");
  129 long runningbufspace;
  130 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
  131     "Amount of presently outstanding async buffer io");
  132 static long bufspace;
  133 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
  134     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
  135 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
  136     &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
  137 #else
  138 SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
  139     "Virtual memory used for buffers");
  140 #endif
  141 static long unmapped_bufspace;
  142 SYSCTL_LONG(_vfs, OID_AUTO, unmapped_bufspace, CTLFLAG_RD,
  143     &unmapped_bufspace, 0,
  144     "Amount of unmapped buffers, inclusive in the bufspace");
  145 static long maxbufspace;
  146 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
  147     "Maximum allowed value of bufspace (including buf_daemon)");
  148 static long bufmallocspace;
  149 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
  150     "Amount of malloced memory for buffers");
  151 static long maxbufmallocspace;
  152 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
  153     "Maximum amount of malloced memory for buffers");
  154 static long lobufspace;
  155 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
  156     "Minimum amount of buffers we want to have");
  157 long hibufspace;
  158 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
  159     "Maximum allowed value of bufspace (excluding buf_daemon)");
  160 static int bufreusecnt;
  161 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
  162     "Number of times we have reused a buffer");
  163 static int buffreekvacnt;
  164 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
  165     "Number of times we have freed the KVA space from some buffer");
  166 static int bufdefragcnt;
  167 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
  168     "Number of times we have had to repeat buffer allocation to defragment");
  169 static long lorunningspace;
  170 SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
  171     "Minimum preferred space used for in-progress I/O");
  172 static long hirunningspace;
  173 SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
  174     "Maximum amount of space to use for in-progress I/O");
  175 int dirtybufferflushes;
  176 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
  177     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
  178 int bdwriteskip;
  179 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
  180     0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
  181 int altbufferflushes;
  182 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
  183     0, "Number of fsync flushes to limit dirty buffers");
  184 static int recursiveflushes;
  185 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
  186     0, "Number of flushes skipped due to being recursive");
  187 static int numdirtybuffers;
  188 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
  189     "Number of buffers that are dirty (has unwritten changes) at the moment");
  190 static int lodirtybuffers;
  191 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
  192     "How many buffers we want to have free before bufdaemon can sleep");
  193 static int hidirtybuffers;
  194 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
  195     "When the number of dirty buffers is considered severe");
  196 int dirtybufthresh;
  197 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
  198     0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
  199 static int numfreebuffers;
  200 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
  201     "Number of free buffers");
  202 static int lofreebuffers;
  203 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
  204    "XXX Unused");
  205 static int hifreebuffers;
  206 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
  207    "XXX Complicatedly unused");
  208 static int getnewbufcalls;
  209 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
  210    "Number of calls to getnewbuf");
  211 static int getnewbufrestarts;
  212 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
  213     "Number of times getnewbuf has had to restart a buffer aquisition");
  214 static int mappingrestarts;
  215 SYSCTL_INT(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RW, &mappingrestarts, 0,
  216     "Number of times getblk has had to restart a buffer mapping for "
  217     "unmapped buffer");
  218 static int flushbufqtarget = 100;
  219 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
  220     "Amount of work to do in flushbufqueues when helping bufdaemon");
  221 static long notbufdflushes;
  222 SYSCTL_LONG(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes, 0,
  223     "Number of dirty buffer flushes done by the bufdaemon helpers");
  224 static long barrierwrites;
  225 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0,
  226     "Number of barrier writes");
  227 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
  228     &unmapped_buf_allowed, 0,
  229     "Permit the use of the unmapped i/o");
  230 
  231 /*
  232  * Lock for the non-dirty bufqueues
  233  */
  234 static struct mtx_padalign bqclean;
  235 
  236 /*
  237  * Lock for the dirty queue.
  238  */
  239 static struct mtx_padalign bqdirty;
  240 
  241 /*
  242  * This lock synchronizes access to bd_request.
  243  */
  244 static struct mtx_padalign bdlock;
  245 
  246 /*
  247  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
  248  * waitrunningbufspace().
  249  */
  250 static struct mtx_padalign rbreqlock;
  251 
  252 /*
  253  * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
  254  */
  255 static struct rwlock_padalign nblock;
  256 
  257 /*
  258  * Lock that protects bdirtywait.
  259  */
  260 static struct mtx_padalign bdirtylock;
  261 
  262 /*
  263  * Wakeup point for bufdaemon, as well as indicator of whether it is already
  264  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
  265  * is idling.
  266  */
  267 static int bd_request;
  268 
  269 /*
  270  * Request for the buf daemon to write more buffers than is indicated by
  271  * lodirtybuf.  This may be necessary to push out excess dependencies or
  272  * defragment the address space where a simple count of the number of dirty
  273  * buffers is insufficient to characterize the demand for flushing them.
  274  */
  275 static int bd_speedupreq;
  276 
  277 /*
  278  * bogus page -- for I/O to/from partially complete buffers
  279  * this is a temporary solution to the problem, but it is not
  280  * really that bad.  it would be better to split the buffer
  281  * for input in the case of buffers partially already in memory,
  282  * but the code is intricate enough already.
  283  */
  284 vm_page_t bogus_page;
  285 
  286 /*
  287  * Synchronization (sleep/wakeup) variable for active buffer space requests.
  288  * Set when wait starts, cleared prior to wakeup().
  289  * Used in runningbufwakeup() and waitrunningbufspace().
  290  */
  291 static int runningbufreq;
  292 
  293 /* 
  294  * Synchronization (sleep/wakeup) variable for buffer requests.
  295  * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
  296  * by and/or.
  297  * Used in numdirtywakeup(), bufspacewakeup(), bufcountadd(), bwillwrite(),
  298  * getnewbuf(), and getblk().
  299  */
  300 static volatile int needsbuffer;
  301 
  302 /*
  303  * Synchronization for bwillwrite() waiters.
  304  */
  305 static int bdirtywait;
  306 
  307 /*
  308  * Definitions for the buffer free lists.
  309  */
  310 #define BUFFER_QUEUES   5       /* number of free buffer queues */
  311 
  312 #define QUEUE_NONE      0       /* on no queue */
  313 #define QUEUE_CLEAN     1       /* non-B_DELWRI buffers */
  314 #define QUEUE_DIRTY     2       /* B_DELWRI buffers */
  315 #define QUEUE_EMPTYKVA  3       /* empty buffer headers w/KVA assignment */
  316 #define QUEUE_EMPTY     4       /* empty buffer headers */
  317 #define QUEUE_SENTINEL  1024    /* not an queue index, but mark for sentinel */
  318 
  319 /* Queues for free buffers with various properties */
  320 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
  321 #ifdef INVARIANTS
  322 static int bq_len[BUFFER_QUEUES];
  323 #endif
  324 
  325 /*
  326  * Single global constant for BUF_WMESG, to avoid getting multiple references.
  327  * buf_wmesg is referred from macros.
  328  */
  329 const char *buf_wmesg = BUF_WMESG;
  330 
  331 #define VFS_BIO_NEED_ANY        0x01    /* any freeable buffer */
  332 #define VFS_BIO_NEED_FREE       0x04    /* wait for free bufs, hi hysteresis */
  333 #define VFS_BIO_NEED_BUFSPACE   0x08    /* wait for buf space, lo hysteresis */
  334 
  335 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
  336     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
  337 static int
  338 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
  339 {
  340         long lvalue;
  341         int ivalue;
  342 
  343         if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
  344                 return (sysctl_handle_long(oidp, arg1, arg2, req));
  345         lvalue = *(long *)arg1;
  346         if (lvalue > INT_MAX)
  347                 /* On overflow, still write out a long to trigger ENOMEM. */
  348                 return (sysctl_handle_long(oidp, &lvalue, 0, req));
  349         ivalue = lvalue;
  350         return (sysctl_handle_int(oidp, &ivalue, 0, req));
  351 }
  352 #endif
  353 
  354 /*
  355  *      bqlock:
  356  *
  357  *      Return the appropriate queue lock based on the index.
  358  */
  359 static inline struct mtx *
  360 bqlock(int qindex)
  361 {
  362 
  363         if (qindex == QUEUE_DIRTY)
  364                 return (struct mtx *)(&bqdirty);
  365         return (struct mtx *)(&bqclean);
  366 }
  367 
  368 /*
  369  *      bdirtywakeup:
  370  *
  371  *      Wakeup any bwillwrite() waiters.
  372  */
  373 static void
  374 bdirtywakeup(void)
  375 {
  376         mtx_lock(&bdirtylock);
  377         if (bdirtywait) {
  378                 bdirtywait = 0;
  379                 wakeup(&bdirtywait);
  380         }
  381         mtx_unlock(&bdirtylock);
  382 }
  383 
  384 /*
  385  *      bdirtysub:
  386  *
  387  *      Decrement the numdirtybuffers count by one and wakeup any
  388  *      threads blocked in bwillwrite().
  389  */
  390 static void
  391 bdirtysub(void)
  392 {
  393 
  394         if (atomic_fetchadd_int(&numdirtybuffers, -1) ==
  395             (lodirtybuffers + hidirtybuffers) / 2)
  396                 bdirtywakeup();
  397 }
  398 
  399 /*
  400  *      bdirtyadd:
  401  *
  402  *      Increment the numdirtybuffers count by one and wakeup the buf 
  403  *      daemon if needed.
  404  */
  405 static void
  406 bdirtyadd(void)
  407 {
  408 
  409         /*
  410          * Only do the wakeup once as we cross the boundary.  The
  411          * buf daemon will keep running until the condition clears.
  412          */
  413         if (atomic_fetchadd_int(&numdirtybuffers, 1) ==
  414             (lodirtybuffers + hidirtybuffers) / 2)
  415                 bd_wakeup();
  416 }
  417 
  418 /*
  419  *      bufspacewakeup:
  420  *
  421  *      Called when buffer space is potentially available for recovery.
  422  *      getnewbuf() will block on this flag when it is unable to free 
  423  *      sufficient buffer space.  Buffer space becomes recoverable when 
  424  *      bp's get placed back in the queues.
  425  */
  426 
  427 static __inline void
  428 bufspacewakeup(void)
  429 {
  430         int need_wakeup, on;
  431 
  432         /*
  433          * If someone is waiting for BUF space, wake them up.  Even
  434          * though we haven't freed the kva space yet, the waiting
  435          * process will be able to now.
  436          */
  437         rw_rlock(&nblock);
  438         for (;;) {
  439                 need_wakeup = 0;
  440                 on = needsbuffer;
  441                 if ((on & VFS_BIO_NEED_BUFSPACE) == 0)
  442                         break;
  443                 need_wakeup = 1;
  444                 if (atomic_cmpset_rel_int(&needsbuffer, on,
  445                     on & ~VFS_BIO_NEED_BUFSPACE))
  446                         break;
  447         }
  448         if (need_wakeup)
  449                 wakeup(__DEVOLATILE(void *, &needsbuffer));
  450         rw_runlock(&nblock);
  451 }
  452 
  453 /*
  454  *      runningwakeup:
  455  *
  456  *      Wake up processes that are waiting on asynchronous writes to fall
  457  *      below lorunningspace.
  458  */
  459 static void
  460 runningwakeup(void)
  461 {
  462 
  463         mtx_lock(&rbreqlock);
  464         if (runningbufreq) {
  465                 runningbufreq = 0;
  466                 wakeup(&runningbufreq);
  467         }
  468         mtx_unlock(&rbreqlock);
  469 }
  470 
  471 /*
  472  *      runningbufwakeup:
  473  *
  474  *      Decrement the outstanding write count according.
  475  */
  476 void
  477 runningbufwakeup(struct buf *bp)
  478 {
  479         long space, bspace;
  480 
  481         bspace = bp->b_runningbufspace;
  482         if (bspace == 0)
  483                 return;
  484         space = atomic_fetchadd_long(&runningbufspace, -bspace);
  485         KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
  486             space, bspace));
  487         bp->b_runningbufspace = 0;
  488         /*
  489          * Only acquire the lock and wakeup on the transition from exceeding
  490          * the threshold to falling below it.
  491          */
  492         if (space < lorunningspace)
  493                 return;
  494         if (space - bspace > lorunningspace)
  495                 return;
  496         runningwakeup();
  497 }
  498 
  499 /*
  500  *      bufcountadd:
  501  *
  502  *      Called when a buffer has been added to one of the free queues to
  503  *      account for the buffer and to wakeup anyone waiting for free buffers.
  504  *      This typically occurs when large amounts of metadata are being handled
  505  *      by the buffer cache ( else buffer space runs out first, usually ).
  506  */
  507 static __inline void
  508 bufcountadd(struct buf *bp)
  509 {
  510         int mask, need_wakeup, old, on;
  511 
  512         KASSERT((bp->b_flags & B_INFREECNT) == 0,
  513             ("buf %p already counted as free", bp));
  514         bp->b_flags |= B_INFREECNT;
  515         old = atomic_fetchadd_int(&numfreebuffers, 1);
  516         KASSERT(old >= 0 && old < nbuf,
  517             ("numfreebuffers climbed to %d", old + 1));
  518         mask = VFS_BIO_NEED_ANY;
  519         if (numfreebuffers >= hifreebuffers)
  520                 mask |= VFS_BIO_NEED_FREE;
  521         rw_rlock(&nblock);
  522         for (;;) {
  523                 need_wakeup = 0;
  524                 on = needsbuffer;
  525                 if (on == 0)
  526                         break;
  527                 need_wakeup = 1;
  528                 if (atomic_cmpset_rel_int(&needsbuffer, on, on & ~mask))
  529                         break;
  530         }
  531         if (need_wakeup)
  532                 wakeup(__DEVOLATILE(void *, &needsbuffer));
  533         rw_runlock(&nblock);
  534 }
  535 
  536 /*
  537  *      bufcountsub:
  538  *
  539  *      Decrement the numfreebuffers count as needed.
  540  */
  541 static void
  542 bufcountsub(struct buf *bp)
  543 {
  544         int old;
  545 
  546         /*
  547          * Fixup numfreebuffers count.  If the buffer is invalid or not
  548          * delayed-write, the buffer was free and we must decrement
  549          * numfreebuffers.
  550          */
  551         if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
  552                 KASSERT((bp->b_flags & B_INFREECNT) != 0,
  553                     ("buf %p not counted in numfreebuffers", bp));
  554                 bp->b_flags &= ~B_INFREECNT;
  555                 old = atomic_fetchadd_int(&numfreebuffers, -1);
  556                 KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
  557         }
  558 }
  559 
  560 /*
  561  *      waitrunningbufspace()
  562  *
  563  *      runningbufspace is a measure of the amount of I/O currently
  564  *      running.  This routine is used in async-write situations to
  565  *      prevent creating huge backups of pending writes to a device.
  566  *      Only asynchronous writes are governed by this function.
  567  *
  568  *      This does NOT turn an async write into a sync write.  It waits  
  569  *      for earlier writes to complete and generally returns before the
  570  *      caller's write has reached the device.
  571  */
  572 void
  573 waitrunningbufspace(void)
  574 {
  575 
  576         mtx_lock(&rbreqlock);
  577         while (runningbufspace > hirunningspace) {
  578                 runningbufreq = 1;
  579                 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
  580         }
  581         mtx_unlock(&rbreqlock);
  582 }
  583 
  584 
  585 /*
  586  *      vfs_buf_test_cache:
  587  *
  588  *      Called when a buffer is extended.  This function clears the B_CACHE
  589  *      bit if the newly extended portion of the buffer does not contain
  590  *      valid data.
  591  */
  592 static __inline
  593 void
  594 vfs_buf_test_cache(struct buf *bp,
  595                   vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
  596                   vm_page_t m)
  597 {
  598 
  599         VM_OBJECT_ASSERT_LOCKED(m->object);
  600         if (bp->b_flags & B_CACHE) {
  601                 int base = (foff + off) & PAGE_MASK;
  602                 if (vm_page_is_valid(m, base, size) == 0)
  603                         bp->b_flags &= ~B_CACHE;
  604         }
  605 }
  606 
  607 /* Wake up the buffer daemon if necessary */
  608 static __inline void
  609 bd_wakeup(void)
  610 {
  611 
  612         mtx_lock(&bdlock);
  613         if (bd_request == 0) {
  614                 bd_request = 1;
  615                 wakeup(&bd_request);
  616         }
  617         mtx_unlock(&bdlock);
  618 }
  619 
  620 /*
  621  * bd_speedup - speedup the buffer cache flushing code
  622  */
  623 void
  624 bd_speedup(void)
  625 {
  626         int needwake;
  627 
  628         mtx_lock(&bdlock);
  629         needwake = 0;
  630         if (bd_speedupreq == 0 || bd_request == 0)
  631                 needwake = 1;
  632         bd_speedupreq = 1;
  633         bd_request = 1;
  634         if (needwake)
  635                 wakeup(&bd_request);
  636         mtx_unlock(&bdlock);
  637 }
  638 
  639 #ifndef NSWBUF_MIN
  640 #define NSWBUF_MIN      16
  641 #endif
  642 
  643 #ifdef __i386__
  644 #define TRANSIENT_DENOM 5
  645 #else
  646 #define TRANSIENT_DENOM 10
  647 #endif
  648 
  649 /*
  650  * Calculating buffer cache scaling values and reserve space for buffer
  651  * headers.  This is called during low level kernel initialization and
  652  * may be called more then once.  We CANNOT write to the memory area
  653  * being reserved at this time.
  654  */
  655 caddr_t
  656 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
  657 {
  658         int tuned_nbuf;
  659         long maxbuf, maxbuf_sz, buf_sz, biotmap_sz;
  660 
  661         /*
  662          * physmem_est is in pages.  Convert it to kilobytes (assumes
  663          * PAGE_SIZE is >= 1K)
  664          */
  665         physmem_est = physmem_est * (PAGE_SIZE / 1024);
  666 
  667         /*
  668          * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
  669          * For the first 64MB of ram nominally allocate sufficient buffers to
  670          * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
  671          * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
  672          * the buffer cache we limit the eventual kva reservation to
  673          * maxbcache bytes.
  674          *
  675          * factor represents the 1/4 x ram conversion.
  676          */
  677         if (nbuf == 0) {
  678                 int factor = 4 * BKVASIZE / 1024;
  679 
  680                 nbuf = 50;
  681                 if (physmem_est > 4096)
  682                         nbuf += min((physmem_est - 4096) / factor,
  683                             65536 / factor);
  684                 if (physmem_est > 65536)
  685                         nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
  686                             32 * 1024 * 1024 / (factor * 5));
  687 
  688                 if (maxbcache && nbuf > maxbcache / BKVASIZE)
  689                         nbuf = maxbcache / BKVASIZE;
  690                 tuned_nbuf = 1;
  691         } else
  692                 tuned_nbuf = 0;
  693 
  694         /* XXX Avoid unsigned long overflows later on with maxbufspace. */
  695         maxbuf = (LONG_MAX / 3) / BKVASIZE;
  696         if (nbuf > maxbuf) {
  697                 if (!tuned_nbuf)
  698                         printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
  699                             maxbuf);
  700                 nbuf = maxbuf;
  701         }
  702 
  703         /*
  704          * Ideal allocation size for the transient bio submap if 10%
  705          * of the maximal space buffer map.  This roughly corresponds
  706          * to the amount of the buffer mapped for typical UFS load.
  707          *
  708          * Clip the buffer map to reserve space for the transient
  709          * BIOs, if its extent is bigger than 90% (80% on i386) of the
  710          * maximum buffer map extent on the platform.
  711          *
  712          * The fall-back to the maxbuf in case of maxbcache unset,
  713          * allows to not trim the buffer KVA for the architectures
  714          * with ample KVA space.
  715          */
  716         if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
  717                 maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
  718                 buf_sz = (long)nbuf * BKVASIZE;
  719                 if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
  720                     (TRANSIENT_DENOM - 1)) {
  721                         /*
  722                          * There is more KVA than memory.  Do not
  723                          * adjust buffer map size, and assign the rest
  724                          * of maxbuf to transient map.
  725                          */
  726                         biotmap_sz = maxbuf_sz - buf_sz;
  727                 } else {
  728                         /*
  729                          * Buffer map spans all KVA we could afford on
  730                          * this platform.  Give 10% (20% on i386) of
  731                          * the buffer map to the transient bio map.
  732                          */
  733                         biotmap_sz = buf_sz / TRANSIENT_DENOM;
  734                         buf_sz -= biotmap_sz;
  735                 }
  736                 if (biotmap_sz / INT_MAX > MAXPHYS)
  737                         bio_transient_maxcnt = INT_MAX;
  738                 else
  739                         bio_transient_maxcnt = biotmap_sz / MAXPHYS;
  740                 /*
  741                  * Artifically limit to 1024 simultaneous in-flight I/Os
  742                  * using the transient mapping.
  743                  */
  744                 if (bio_transient_maxcnt > 1024)
  745                         bio_transient_maxcnt = 1024;
  746                 if (tuned_nbuf)
  747                         nbuf = buf_sz / BKVASIZE;
  748         }
  749 
  750         /*
  751          * swbufs are used as temporary holders for I/O, such as paging I/O.
  752          * We have no less then 16 and no more then 256.
  753          */
  754         nswbuf = min(nbuf / 4, 256);
  755         TUNABLE_INT_FETCH("kern.nswbuf", &nswbuf);
  756         if (nswbuf < NSWBUF_MIN)
  757                 nswbuf = NSWBUF_MIN;
  758 
  759         /*
  760          * Reserve space for the buffer cache buffers
  761          */
  762         swbuf = (void *)v;
  763         v = (caddr_t)(swbuf + nswbuf);
  764         buf = (void *)v;
  765         v = (caddr_t)(buf + nbuf);
  766 
  767         return(v);
  768 }
  769 
  770 /* Initialize the buffer subsystem.  Called before use of any buffers. */
  771 void
  772 bufinit(void)
  773 {
  774         struct buf *bp;
  775         int i;
  776 
  777         CTASSERT(MAXBCACHEBUF >= MAXBSIZE);
  778         mtx_init(&bqclean, "bufq clean lock", NULL, MTX_DEF);
  779         mtx_init(&bqdirty, "bufq dirty lock", NULL, MTX_DEF);
  780         mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
  781         rw_init(&nblock, "needsbuffer lock");
  782         mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
  783         mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
  784 
  785         /* next, make a null set of free lists */
  786         for (i = 0; i < BUFFER_QUEUES; i++)
  787                 TAILQ_INIT(&bufqueues[i]);
  788 
  789         /* finally, initialize each buffer header and stick on empty q */
  790         for (i = 0; i < nbuf; i++) {
  791                 bp = &buf[i];
  792                 bzero(bp, sizeof *bp);
  793                 bp->b_flags = B_INVAL | B_INFREECNT;
  794                 bp->b_rcred = NOCRED;
  795                 bp->b_wcred = NOCRED;
  796                 bp->b_qindex = QUEUE_EMPTY;
  797                 bp->b_xflags = 0;
  798                 LIST_INIT(&bp->b_dep);
  799                 BUF_LOCKINIT(bp);
  800                 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
  801 #ifdef INVARIANTS
  802                 bq_len[QUEUE_EMPTY]++;
  803 #endif
  804         }
  805 
  806         /*
  807          * maxbufspace is the absolute maximum amount of buffer space we are 
  808          * allowed to reserve in KVM and in real terms.  The absolute maximum
  809          * is nominally used by buf_daemon.  hibufspace is the nominal maximum
  810          * used by most other processes.  The differential is required to 
  811          * ensure that buf_daemon is able to run when other processes might 
  812          * be blocked waiting for buffer space.
  813          *
  814          * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
  815          * this may result in KVM fragmentation which is not handled optimally
  816          * by the system.
  817          */
  818         maxbufspace = (long)nbuf * BKVASIZE;
  819         hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBCACHEBUF * 10);
  820         lobufspace = hibufspace - MAXBCACHEBUF;
  821 
  822         /*
  823          * Note: The 16 MiB upper limit for hirunningspace was chosen
  824          * arbitrarily and may need further tuning. It corresponds to
  825          * 128 outstanding write IO requests (if IO size is 128 KiB),
  826          * which fits with many RAID controllers' tagged queuing limits.
  827          * The lower 1 MiB limit is the historical upper limit for
  828          * hirunningspace.
  829          */
  830         hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBCACHEBUF),
  831             16 * 1024 * 1024), 1024 * 1024);
  832         lorunningspace = roundup((hirunningspace * 2) / 3, MAXBCACHEBUF);
  833 
  834 /*
  835  * Limit the amount of malloc memory since it is wired permanently into
  836  * the kernel space.  Even though this is accounted for in the buffer
  837  * allocation, we don't want the malloced region to grow uncontrolled.
  838  * The malloc scheme improves memory utilization significantly on average
  839  * (small) directories.
  840  */
  841         maxbufmallocspace = hibufspace / 20;
  842 
  843 /*
  844  * Reduce the chance of a deadlock occuring by limiting the number
  845  * of delayed-write dirty buffers we allow to stack up.
  846  */
  847         hidirtybuffers = nbuf / 4 + 20;
  848         dirtybufthresh = hidirtybuffers * 9 / 10;
  849         numdirtybuffers = 0;
  850 /*
  851  * To support extreme low-memory systems, make sure hidirtybuffers cannot
  852  * eat up all available buffer space.  This occurs when our minimum cannot
  853  * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
  854  * BKVASIZE'd buffers.
  855  */
  856         while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
  857                 hidirtybuffers >>= 1;
  858         }
  859         lodirtybuffers = hidirtybuffers / 2;
  860 
  861 /*
  862  * Try to keep the number of free buffers in the specified range,
  863  * and give special processes (e.g. like buf_daemon) access to an 
  864  * emergency reserve.
  865  */
  866         lofreebuffers = nbuf / 18 + 5;
  867         hifreebuffers = 2 * lofreebuffers;
  868         numfreebuffers = nbuf;
  869 
  870         bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
  871             VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
  872         unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
  873 }
  874 
  875 #ifdef INVARIANTS
  876 static inline void
  877 vfs_buf_check_mapped(struct buf *bp)
  878 {
  879 
  880         KASSERT((bp->b_flags & B_UNMAPPED) == 0,
  881             ("mapped buf %p %x", bp, bp->b_flags));
  882         KASSERT(bp->b_kvabase != unmapped_buf,
  883             ("mapped buf: b_kvabase was not updated %p", bp));
  884         KASSERT(bp->b_data != unmapped_buf,
  885             ("mapped buf: b_data was not updated %p", bp));
  886 }
  887 
  888 static inline void
  889 vfs_buf_check_unmapped(struct buf *bp)
  890 {
  891 
  892         KASSERT((bp->b_flags & B_UNMAPPED) == B_UNMAPPED,
  893             ("unmapped buf %p %x", bp, bp->b_flags));
  894         KASSERT(bp->b_kvabase == unmapped_buf,
  895             ("unmapped buf: corrupted b_kvabase %p", bp));
  896         KASSERT(bp->b_data == unmapped_buf,
  897             ("unmapped buf: corrupted b_data %p", bp));
  898 }
  899 
  900 #define BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
  901 #define BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
  902 #else
  903 #define BUF_CHECK_MAPPED(bp) do {} while (0)
  904 #define BUF_CHECK_UNMAPPED(bp) do {} while (0)
  905 #endif
  906 
  907 static void
  908 bpmap_qenter(struct buf *bp)
  909 {
  910 
  911         BUF_CHECK_MAPPED(bp);
  912 
  913         /*
  914          * bp->b_data is relative to bp->b_offset, but
  915          * bp->b_offset may be offset into the first page.
  916          */
  917         bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
  918         pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
  919         bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
  920             (vm_offset_t)(bp->b_offset & PAGE_MASK));
  921 }
  922 
  923 /*
  924  * bfreekva() - free the kva allocation for a buffer.
  925  *
  926  *      Since this call frees up buffer space, we call bufspacewakeup().
  927  */
  928 static void
  929 bfreekva(struct buf *bp)
  930 {
  931 
  932         if (bp->b_kvasize == 0)
  933                 return;
  934 
  935         atomic_add_int(&buffreekvacnt, 1);
  936         atomic_subtract_long(&bufspace, bp->b_kvasize);
  937         if ((bp->b_flags & B_UNMAPPED) == 0) {
  938                 BUF_CHECK_MAPPED(bp);
  939                 vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase,
  940                     bp->b_kvasize);
  941         } else {
  942                 BUF_CHECK_UNMAPPED(bp);
  943                 if ((bp->b_flags & B_KVAALLOC) != 0) {
  944                         vmem_free(buffer_arena, (vm_offset_t)bp->b_kvaalloc,
  945                             bp->b_kvasize);
  946                 }
  947                 atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize);
  948                 bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC);
  949         }
  950         bp->b_kvasize = 0;
  951         bufspacewakeup();
  952 }
  953 
  954 /*
  955  *      binsfree:
  956  *
  957  *      Insert the buffer into the appropriate free list.
  958  */
  959 static void
  960 binsfree(struct buf *bp, int qindex)
  961 {
  962         struct mtx *olock, *nlock;
  963 
  964         BUF_ASSERT_XLOCKED(bp);
  965 
  966         nlock = bqlock(qindex);
  967         /* Handle delayed bremfree() processing. */
  968         if (bp->b_flags & B_REMFREE) {
  969                 olock = bqlock(bp->b_qindex);
  970                 mtx_lock(olock);
  971                 bremfreel(bp);
  972                 if (olock != nlock) {
  973                         mtx_unlock(olock);
  974                         mtx_lock(nlock);
  975                 }
  976         } else
  977                 mtx_lock(nlock);
  978 
  979         if (bp->b_qindex != QUEUE_NONE)
  980                 panic("binsfree: free buffer onto another queue???");
  981 
  982         bp->b_qindex = qindex;
  983         if (bp->b_flags & B_AGE)
  984                 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
  985         else
  986                 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
  987 #ifdef INVARIANTS
  988         bq_len[bp->b_qindex]++;
  989 #endif
  990         mtx_unlock(nlock);
  991 
  992         /*
  993          * Something we can maybe free or reuse.
  994          */
  995         if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
  996                 bufspacewakeup();
  997 
  998         if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
  999                 bufcountadd(bp);
 1000 }
 1001 
 1002 /*
 1003  *      bremfree:
 1004  *
 1005  *      Mark the buffer for removal from the appropriate free list.
 1006  *      
 1007  */
 1008 void
 1009 bremfree(struct buf *bp)
 1010 {
 1011 
 1012         CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 1013         KASSERT((bp->b_flags & B_REMFREE) == 0,
 1014             ("bremfree: buffer %p already marked for delayed removal.", bp));
 1015         KASSERT(bp->b_qindex != QUEUE_NONE,
 1016             ("bremfree: buffer %p not on a queue.", bp));
 1017         BUF_ASSERT_XLOCKED(bp);
 1018 
 1019         bp->b_flags |= B_REMFREE;
 1020         bufcountsub(bp);
 1021 }
 1022 
 1023 /*
 1024  *      bremfreef:
 1025  *
 1026  *      Force an immediate removal from a free list.  Used only in nfs when
 1027  *      it abuses the b_freelist pointer.
 1028  */
 1029 void
 1030 bremfreef(struct buf *bp)
 1031 {
 1032         struct mtx *qlock;
 1033 
 1034         qlock = bqlock(bp->b_qindex);
 1035         mtx_lock(qlock);
 1036         bremfreel(bp);
 1037         mtx_unlock(qlock);
 1038 }
 1039 
 1040 /*
 1041  *      bremfreel:
 1042  *
 1043  *      Removes a buffer from the free list, must be called with the
 1044  *      correct qlock held.
 1045  */
 1046 static void
 1047 bremfreel(struct buf *bp)
 1048 {
 1049 
 1050         CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
 1051             bp, bp->b_vp, bp->b_flags);
 1052         KASSERT(bp->b_qindex != QUEUE_NONE,
 1053             ("bremfreel: buffer %p not on a queue.", bp));
 1054         BUF_ASSERT_XLOCKED(bp);
 1055         mtx_assert(bqlock(bp->b_qindex), MA_OWNED);
 1056 
 1057         TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
 1058 #ifdef INVARIANTS
 1059         KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow",
 1060             bp->b_qindex));
 1061         bq_len[bp->b_qindex]--;
 1062 #endif
 1063         bp->b_qindex = QUEUE_NONE;
 1064         /*
 1065          * If this was a delayed bremfree() we only need to remove the buffer
 1066          * from the queue and return the stats are already done.
 1067          */
 1068         if (bp->b_flags & B_REMFREE) {
 1069                 bp->b_flags &= ~B_REMFREE;
 1070                 return;
 1071         }
 1072         bufcountsub(bp);
 1073 }
 1074 
 1075 /*
 1076  * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
 1077  * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
 1078  * the buffer is valid and we do not have to do anything.
 1079  */
 1080 void
 1081 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
 1082     int cnt, struct ucred * cred)
 1083 {
 1084         struct buf *rabp;
 1085         int i;
 1086 
 1087         for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
 1088                 if (inmem(vp, *rablkno))
 1089                         continue;
 1090                 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
 1091 
 1092                 if ((rabp->b_flags & B_CACHE) == 0) {
 1093                         if (!TD_IS_IDLETHREAD(curthread))
 1094                                 curthread->td_ru.ru_inblock++;
 1095                         rabp->b_flags |= B_ASYNC;
 1096                         rabp->b_flags &= ~B_INVAL;
 1097                         rabp->b_ioflags &= ~BIO_ERROR;
 1098                         rabp->b_iocmd = BIO_READ;
 1099                         if (rabp->b_rcred == NOCRED && cred != NOCRED)
 1100                                 rabp->b_rcred = crhold(cred);
 1101                         vfs_busy_pages(rabp, 0);
 1102                         BUF_KERNPROC(rabp);
 1103                         rabp->b_iooffset = dbtob(rabp->b_blkno);
 1104                         bstrategy(rabp);
 1105                 } else {
 1106                         brelse(rabp);
 1107                 }
 1108         }
 1109 }
 1110 
 1111 /*
 1112  * Entry point for bread() and breadn() via #defines in sys/buf.h.
 1113  *
 1114  * Get a buffer with the specified data.  Look in the cache first.  We
 1115  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
 1116  * is set, the buffer is valid and we do not have to do anything, see
 1117  * getblk(). Also starts asynchronous I/O on read-ahead blocks.
 1118  */
 1119 int
 1120 breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno,
 1121     int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp)
 1122 {
 1123         struct buf *bp;
 1124         int rv = 0, readwait = 0;
 1125 
 1126         CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
 1127         /*
 1128          * Can only return NULL if GB_LOCK_NOWAIT flag is specified.
 1129          */
 1130         *bpp = bp = getblk(vp, blkno, size, 0, 0, flags);
 1131         if (bp == NULL)
 1132                 return (EBUSY);
 1133 
 1134         /* if not found in cache, do some I/O */
 1135         if ((bp->b_flags & B_CACHE) == 0) {
 1136                 if (!TD_IS_IDLETHREAD(curthread))
 1137                         curthread->td_ru.ru_inblock++;
 1138                 bp->b_iocmd = BIO_READ;
 1139                 bp->b_flags &= ~B_INVAL;
 1140                 bp->b_ioflags &= ~BIO_ERROR;
 1141                 if (bp->b_rcred == NOCRED && cred != NOCRED)
 1142                         bp->b_rcred = crhold(cred);
 1143                 vfs_busy_pages(bp, 0);
 1144                 bp->b_iooffset = dbtob(bp->b_blkno);
 1145                 bstrategy(bp);
 1146                 ++readwait;
 1147         }
 1148 
 1149         breada(vp, rablkno, rabsize, cnt, cred);
 1150 
 1151         if (readwait) {
 1152                 rv = bufwait(bp);
 1153         }
 1154         return (rv);
 1155 }
 1156 
 1157 /*
 1158  * Write, release buffer on completion.  (Done by iodone
 1159  * if async).  Do not bother writing anything if the buffer
 1160  * is invalid.
 1161  *
 1162  * Note that we set B_CACHE here, indicating that buffer is
 1163  * fully valid and thus cacheable.  This is true even of NFS
 1164  * now so we set it generally.  This could be set either here 
 1165  * or in biodone() since the I/O is synchronous.  We put it
 1166  * here.
 1167  */
 1168 int
 1169 bufwrite(struct buf *bp)
 1170 {
 1171         int oldflags;
 1172         struct vnode *vp;
 1173         long space;
 1174         int vp_md;
 1175 
 1176         CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 1177         if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
 1178                 bp->b_flags |= B_INVAL | B_RELBUF;
 1179                 bp->b_flags &= ~B_CACHE;
 1180                 brelse(bp);
 1181                 return (ENXIO);
 1182         }
 1183         if (bp->b_flags & B_INVAL) {
 1184                 brelse(bp);
 1185                 return (0);
 1186         }
 1187 
 1188         if (bp->b_flags & B_BARRIER)
 1189                 barrierwrites++;
 1190 
 1191         oldflags = bp->b_flags;
 1192 
 1193         BUF_ASSERT_HELD(bp);
 1194 
 1195         if (bp->b_pin_count > 0)
 1196                 bunpin_wait(bp);
 1197 
 1198         KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
 1199             ("FFS background buffer should not get here %p", bp));
 1200 
 1201         vp = bp->b_vp;
 1202         if (vp)
 1203                 vp_md = vp->v_vflag & VV_MD;
 1204         else
 1205                 vp_md = 0;
 1206 
 1207         /*
 1208          * Mark the buffer clean.  Increment the bufobj write count
 1209          * before bundirty() call, to prevent other thread from seeing
 1210          * empty dirty list and zero counter for writes in progress,
 1211          * falsely indicating that the bufobj is clean.
 1212          */
 1213         bufobj_wref(bp->b_bufobj);
 1214         bundirty(bp);
 1215 
 1216         bp->b_flags &= ~B_DONE;
 1217         bp->b_ioflags &= ~BIO_ERROR;
 1218         bp->b_flags |= B_CACHE;
 1219         bp->b_iocmd = BIO_WRITE;
 1220 
 1221         vfs_busy_pages(bp, 1);
 1222 
 1223         /*
 1224          * Normal bwrites pipeline writes
 1225          */
 1226         bp->b_runningbufspace = bp->b_bufsize;
 1227         space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
 1228 
 1229         if (!TD_IS_IDLETHREAD(curthread))
 1230                 curthread->td_ru.ru_oublock++;
 1231         if (oldflags & B_ASYNC)
 1232                 BUF_KERNPROC(bp);
 1233         bp->b_iooffset = dbtob(bp->b_blkno);
 1234         bstrategy(bp);
 1235 
 1236         if ((oldflags & B_ASYNC) == 0) {
 1237                 int rtval = bufwait(bp);
 1238                 brelse(bp);
 1239                 return (rtval);
 1240         } else if (space > hirunningspace) {
 1241                 /*
 1242                  * don't allow the async write to saturate the I/O
 1243                  * system.  We will not deadlock here because
 1244                  * we are blocking waiting for I/O that is already in-progress
 1245                  * to complete. We do not block here if it is the update
 1246                  * or syncer daemon trying to clean up as that can lead
 1247                  * to deadlock.
 1248                  */
 1249                 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
 1250                         waitrunningbufspace();
 1251         }
 1252 
 1253         return (0);
 1254 }
 1255 
 1256 void
 1257 bufbdflush(struct bufobj *bo, struct buf *bp)
 1258 {
 1259         struct buf *nbp;
 1260 
 1261         if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
 1262                 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
 1263                 altbufferflushes++;
 1264         } else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
 1265                 BO_LOCK(bo);
 1266                 /*
 1267                  * Try to find a buffer to flush.
 1268                  */
 1269                 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
 1270                         if ((nbp->b_vflags & BV_BKGRDINPROG) ||
 1271                             BUF_LOCK(nbp,
 1272                                      LK_EXCLUSIVE | LK_NOWAIT, NULL))
 1273                                 continue;
 1274                         if (bp == nbp)
 1275                                 panic("bdwrite: found ourselves");
 1276                         BO_UNLOCK(bo);
 1277                         /* Don't countdeps with the bo lock held. */
 1278                         if (buf_countdeps(nbp, 0)) {
 1279                                 BO_LOCK(bo);
 1280                                 BUF_UNLOCK(nbp);
 1281                                 continue;
 1282                         }
 1283                         if (nbp->b_flags & B_CLUSTEROK) {
 1284                                 vfs_bio_awrite(nbp);
 1285                         } else {
 1286                                 bremfree(nbp);
 1287                                 bawrite(nbp);
 1288                         }
 1289                         dirtybufferflushes++;
 1290                         break;
 1291                 }
 1292                 if (nbp == NULL)
 1293                         BO_UNLOCK(bo);
 1294         }
 1295 }
 1296 
 1297 /*
 1298  * Delayed write. (Buffer is marked dirty).  Do not bother writing
 1299  * anything if the buffer is marked invalid.
 1300  *
 1301  * Note that since the buffer must be completely valid, we can safely
 1302  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
 1303  * biodone() in order to prevent getblk from writing the buffer
 1304  * out synchronously.
 1305  */
 1306 void
 1307 bdwrite(struct buf *bp)
 1308 {
 1309         struct thread *td = curthread;
 1310         struct vnode *vp;
 1311         struct bufobj *bo;
 1312 
 1313         CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 1314         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
 1315         KASSERT((bp->b_flags & B_BARRIER) == 0,
 1316             ("Barrier request in delayed write %p", bp));
 1317         BUF_ASSERT_HELD(bp);
 1318 
 1319         if (bp->b_flags & B_INVAL) {
 1320                 brelse(bp);
 1321                 return;
 1322         }
 1323 
 1324         /*
 1325          * If we have too many dirty buffers, don't create any more.
 1326          * If we are wildly over our limit, then force a complete
 1327          * cleanup. Otherwise, just keep the situation from getting
 1328          * out of control. Note that we have to avoid a recursive
 1329          * disaster and not try to clean up after our own cleanup!
 1330          */
 1331         vp = bp->b_vp;
 1332         bo = bp->b_bufobj;
 1333         if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
 1334                 td->td_pflags |= TDP_INBDFLUSH;
 1335                 BO_BDFLUSH(bo, bp);
 1336                 td->td_pflags &= ~TDP_INBDFLUSH;
 1337         } else
 1338                 recursiveflushes++;
 1339 
 1340         bdirty(bp);
 1341         /*
 1342          * Set B_CACHE, indicating that the buffer is fully valid.  This is
 1343          * true even of NFS now.
 1344          */
 1345         bp->b_flags |= B_CACHE;
 1346 
 1347         /*
 1348          * This bmap keeps the system from needing to do the bmap later,
 1349          * perhaps when the system is attempting to do a sync.  Since it
 1350          * is likely that the indirect block -- or whatever other datastructure
 1351          * that the filesystem needs is still in memory now, it is a good
 1352          * thing to do this.  Note also, that if the pageout daemon is
 1353          * requesting a sync -- there might not be enough memory to do
 1354          * the bmap then...  So, this is important to do.
 1355          */
 1356         if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
 1357                 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
 1358         }
 1359 
 1360         /*
 1361          * Set the *dirty* buffer range based upon the VM system dirty
 1362          * pages.
 1363          *
 1364          * Mark the buffer pages as clean.  We need to do this here to
 1365          * satisfy the vnode_pager and the pageout daemon, so that it
 1366          * thinks that the pages have been "cleaned".  Note that since
 1367          * the pages are in a delayed write buffer -- the VFS layer
 1368          * "will" see that the pages get written out on the next sync,
 1369          * or perhaps the cluster will be completed.
 1370          */
 1371         vfs_clean_pages_dirty_buf(bp);
 1372         bqrelse(bp);
 1373 
 1374         /*
 1375          * note: we cannot initiate I/O from a bdwrite even if we wanted to,
 1376          * due to the softdep code.
 1377          */
 1378 }
 1379 
 1380 /*
 1381  *      bdirty:
 1382  *
 1383  *      Turn buffer into delayed write request.  We must clear BIO_READ and
 1384  *      B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to 
 1385  *      itself to properly update it in the dirty/clean lists.  We mark it
 1386  *      B_DONE to ensure that any asynchronization of the buffer properly
 1387  *      clears B_DONE ( else a panic will occur later ).  
 1388  *
 1389  *      bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
 1390  *      might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
 1391  *      should only be called if the buffer is known-good.
 1392  *
 1393  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 1394  *      count.
 1395  *
 1396  *      The buffer must be on QUEUE_NONE.
 1397  */
 1398 void
 1399 bdirty(struct buf *bp)
 1400 {
 1401 
 1402         CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
 1403             bp, bp->b_vp, bp->b_flags);
 1404         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
 1405         KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
 1406             ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1407         BUF_ASSERT_HELD(bp);
 1408         bp->b_flags &= ~(B_RELBUF);
 1409         bp->b_iocmd = BIO_WRITE;
 1410 
 1411         if ((bp->b_flags & B_DELWRI) == 0) {
 1412                 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
 1413                 reassignbuf(bp);
 1414                 bdirtyadd();
 1415         }
 1416 }
 1417 
 1418 /*
 1419  *      bundirty:
 1420  *
 1421  *      Clear B_DELWRI for buffer.
 1422  *
 1423  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 1424  *      count.
 1425  *      
 1426  *      The buffer must be on QUEUE_NONE.
 1427  */
 1428 
 1429 void
 1430 bundirty(struct buf *bp)
 1431 {
 1432 
 1433         CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 1434         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
 1435         KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
 1436             ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
 1437         BUF_ASSERT_HELD(bp);
 1438 
 1439         if (bp->b_flags & B_DELWRI) {
 1440                 bp->b_flags &= ~B_DELWRI;
 1441                 reassignbuf(bp);
 1442                 bdirtysub();
 1443         }
 1444         /*
 1445          * Since it is now being written, we can clear its deferred write flag.
 1446          */
 1447         bp->b_flags &= ~B_DEFERRED;
 1448 }
 1449 
 1450 /*
 1451  *      bawrite:
 1452  *
 1453  *      Asynchronous write.  Start output on a buffer, but do not wait for
 1454  *      it to complete.  The buffer is released when the output completes.
 1455  *
 1456  *      bwrite() ( or the VOP routine anyway ) is responsible for handling 
 1457  *      B_INVAL buffers.  Not us.
 1458  */
 1459 void
 1460 bawrite(struct buf *bp)
 1461 {
 1462 
 1463         bp->b_flags |= B_ASYNC;
 1464         (void) bwrite(bp);
 1465 }
 1466 
 1467 /*
 1468  *      babarrierwrite:
 1469  *
 1470  *      Asynchronous barrier write.  Start output on a buffer, but do not
 1471  *      wait for it to complete.  Place a write barrier after this write so
 1472  *      that this buffer and all buffers written before it are committed to
 1473  *      the disk before any buffers written after this write are committed
 1474  *      to the disk.  The buffer is released when the output completes.
 1475  */
 1476 void
 1477 babarrierwrite(struct buf *bp)
 1478 {
 1479 
 1480         bp->b_flags |= B_ASYNC | B_BARRIER;
 1481         (void) bwrite(bp);
 1482 }
 1483 
 1484 /*
 1485  *      bbarrierwrite:
 1486  *
 1487  *      Synchronous barrier write.  Start output on a buffer and wait for
 1488  *      it to complete.  Place a write barrier after this write so that
 1489  *      this buffer and all buffers written before it are committed to 
 1490  *      the disk before any buffers written after this write are committed
 1491  *      to the disk.  The buffer is released when the output completes.
 1492  */
 1493 int
 1494 bbarrierwrite(struct buf *bp)
 1495 {
 1496 
 1497         bp->b_flags |= B_BARRIER;
 1498         return (bwrite(bp));
 1499 }
 1500 
 1501 /*
 1502  *      bwillwrite:
 1503  *
 1504  *      Called prior to the locking of any vnodes when we are expecting to
 1505  *      write.  We do not want to starve the buffer cache with too many
 1506  *      dirty buffers so we block here.  By blocking prior to the locking
 1507  *      of any vnodes we attempt to avoid the situation where a locked vnode
 1508  *      prevents the various system daemons from flushing related buffers.
 1509  */
 1510 void
 1511 bwillwrite(void)
 1512 {
 1513 
 1514         if (numdirtybuffers >= hidirtybuffers) {
 1515                 mtx_lock(&bdirtylock);
 1516                 while (numdirtybuffers >= hidirtybuffers) {
 1517                         bdirtywait = 1;
 1518                         msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
 1519                             "flswai", 0);
 1520                 }
 1521                 mtx_unlock(&bdirtylock);
 1522         }
 1523 }
 1524 
 1525 /*
 1526  * Return true if we have too many dirty buffers.
 1527  */
 1528 int
 1529 buf_dirty_count_severe(void)
 1530 {
 1531 
 1532         return(numdirtybuffers >= hidirtybuffers);
 1533 }
 1534 
 1535 static __noinline int
 1536 buf_vm_page_count_severe(void)
 1537 {
 1538 
 1539         KFAIL_POINT_CODE(DEBUG_FP, buf_pressure, return 1);
 1540 
 1541         return vm_page_count_severe();
 1542 }
 1543 
 1544 /*
 1545  *      brelse:
 1546  *
 1547  *      Release a busy buffer and, if requested, free its resources.  The
 1548  *      buffer will be stashed in the appropriate bufqueue[] allowing it
 1549  *      to be accessed later as a cache entity or reused for other purposes.
 1550  */
 1551 void
 1552 brelse(struct buf *bp)
 1553 {
 1554         int qindex;
 1555 
 1556         CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
 1557             bp, bp->b_vp, bp->b_flags);
 1558         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
 1559             ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1560 
 1561         if (BUF_LOCKRECURSED(bp)) {
 1562                 /*
 1563                  * Do not process, in particular, do not handle the
 1564                  * B_INVAL/B_RELBUF and do not release to free list.
 1565                  */
 1566                 BUF_UNLOCK(bp);
 1567                 return;
 1568         }
 1569 
 1570         if (bp->b_flags & B_MANAGED) {
 1571                 bqrelse(bp);
 1572                 return;
 1573         }
 1574 
 1575         if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
 1576                 BO_LOCK(bp->b_bufobj);
 1577                 bp->b_vflags &= ~BV_BKGRDERR;
 1578                 BO_UNLOCK(bp->b_bufobj);
 1579                 bdirty(bp);
 1580         }
 1581         if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
 1582             bp->b_error == EIO && !(bp->b_flags & B_INVAL)) {
 1583                 /*
 1584                  * Failed write, redirty.  Must clear BIO_ERROR to prevent
 1585                  * pages from being scrapped.  If the error is anything
 1586                  * other than an I/O error (EIO), assume that retrying
 1587                  * is futile.
 1588                  */
 1589                 bp->b_ioflags &= ~BIO_ERROR;
 1590                 bdirty(bp);
 1591         } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
 1592             (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
 1593                 /*
 1594                  * Either a failed I/O or we were asked to free or not
 1595                  * cache the buffer.
 1596                  */
 1597                 bp->b_flags |= B_INVAL;
 1598                 if (!LIST_EMPTY(&bp->b_dep))
 1599                         buf_deallocate(bp);
 1600                 if (bp->b_flags & B_DELWRI)
 1601                         bdirtysub();
 1602                 bp->b_flags &= ~(B_DELWRI | B_CACHE);
 1603                 if ((bp->b_flags & B_VMIO) == 0) {
 1604                         if (bp->b_bufsize)
 1605                                 allocbuf(bp, 0);
 1606                         if (bp->b_vp)
 1607                                 brelvp(bp);
 1608                 }
 1609         }
 1610 
 1611         /*
 1612          * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release() 
 1613          * is called with B_DELWRI set, the underlying pages may wind up
 1614          * getting freed causing a previous write (bdwrite()) to get 'lost'
 1615          * because pages associated with a B_DELWRI bp are marked clean.
 1616          * 
 1617          * We still allow the B_INVAL case to call vfs_vmio_release(), even
 1618          * if B_DELWRI is set.
 1619          *
 1620          * If B_DELWRI is not set we may have to set B_RELBUF if we are low
 1621          * on pages to return pages to the VM page queues.
 1622          */
 1623         if (bp->b_flags & B_DELWRI)
 1624                 bp->b_flags &= ~B_RELBUF;
 1625         else if (buf_vm_page_count_severe()) {
 1626                 /*
 1627                  * BKGRDINPROG can only be set with the buf and bufobj
 1628                  * locks both held.  We tolerate a race to clear it here.
 1629                  */
 1630                 if (!(bp->b_vflags & BV_BKGRDINPROG))
 1631                         bp->b_flags |= B_RELBUF;
 1632         }
 1633 
 1634         /*
 1635          * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
 1636          * constituted, not even NFS buffers now.  Two flags effect this.  If
 1637          * B_INVAL, the struct buf is invalidated but the VM object is kept
 1638          * around ( i.e. so it is trivial to reconstitute the buffer later ).
 1639          *
 1640          * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
 1641          * invalidated.  BIO_ERROR cannot be set for a failed write unless the
 1642          * buffer is also B_INVAL because it hits the re-dirtying code above.
 1643          *
 1644          * Normally we can do this whether a buffer is B_DELWRI or not.  If
 1645          * the buffer is an NFS buffer, it is tracking piecemeal writes or
 1646          * the commit state and we cannot afford to lose the buffer. If the
 1647          * buffer has a background write in progress, we need to keep it
 1648          * around to prevent it from being reconstituted and starting a second
 1649          * background write.
 1650          */
 1651         if ((bp->b_flags & B_VMIO)
 1652             && !(bp->b_vp->v_mount != NULL &&
 1653                  (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
 1654                  !vn_isdisk(bp->b_vp, NULL) &&
 1655                  (bp->b_flags & B_DELWRI))
 1656             ) {
 1657 
 1658                 int i, j, resid;
 1659                 vm_page_t m;
 1660                 off_t foff;
 1661                 vm_pindex_t poff;
 1662                 vm_object_t obj;
 1663 
 1664                 obj = bp->b_bufobj->bo_object;
 1665 
 1666                 /*
 1667                  * Get the base offset and length of the buffer.  Note that 
 1668                  * in the VMIO case if the buffer block size is not
 1669                  * page-aligned then b_data pointer may not be page-aligned.
 1670                  * But our b_pages[] array *IS* page aligned.
 1671                  *
 1672                  * block sizes less then DEV_BSIZE (usually 512) are not 
 1673                  * supported due to the page granularity bits (m->valid,
 1674                  * m->dirty, etc...). 
 1675                  *
 1676                  * See man buf(9) for more information
 1677                  */
 1678                 resid = bp->b_bufsize;
 1679                 foff = bp->b_offset;
 1680                 for (i = 0; i < bp->b_npages; i++) {
 1681                         int had_bogus = 0;
 1682 
 1683                         m = bp->b_pages[i];
 1684 
 1685                         /*
 1686                          * If we hit a bogus page, fixup *all* the bogus pages
 1687                          * now.
 1688                          */
 1689                         if (m == bogus_page) {
 1690                                 poff = OFF_TO_IDX(bp->b_offset);
 1691                                 had_bogus = 1;
 1692 
 1693                                 VM_OBJECT_RLOCK(obj);
 1694                                 for (j = i; j < bp->b_npages; j++) {
 1695                                         vm_page_t mtmp;
 1696                                         mtmp = bp->b_pages[j];
 1697                                         if (mtmp == bogus_page) {
 1698                                                 mtmp = vm_page_lookup(obj, poff + j);
 1699                                                 if (!mtmp) {
 1700                                                         panic("brelse: page missing\n");
 1701                                                 }
 1702                                                 bp->b_pages[j] = mtmp;
 1703                                         }
 1704                                 }
 1705                                 VM_OBJECT_RUNLOCK(obj);
 1706 
 1707                                 if ((bp->b_flags & (B_INVAL | B_UNMAPPED)) == 0) {
 1708                                         BUF_CHECK_MAPPED(bp);
 1709                                         pmap_qenter(
 1710                                             trunc_page((vm_offset_t)bp->b_data),
 1711                                             bp->b_pages, bp->b_npages);
 1712                                 }
 1713                                 m = bp->b_pages[i];
 1714                         }
 1715                         if ((bp->b_flags & B_NOCACHE) ||
 1716                             (bp->b_ioflags & BIO_ERROR &&
 1717                              bp->b_iocmd == BIO_READ)) {
 1718                                 int poffset = foff & PAGE_MASK;
 1719                                 int presid = resid > (PAGE_SIZE - poffset) ?
 1720                                         (PAGE_SIZE - poffset) : resid;
 1721 
 1722                                 KASSERT(presid >= 0, ("brelse: extra page"));
 1723                                 VM_OBJECT_WLOCK(obj);
 1724                                 while (vm_page_xbusied(m)) {
 1725                                         vm_page_lock(m);
 1726                                         VM_OBJECT_WUNLOCK(obj);
 1727                                         vm_page_busy_sleep(m, "mbncsh");
 1728                                         VM_OBJECT_WLOCK(obj);
 1729                                 }
 1730                                 if (pmap_page_wired_mappings(m) == 0)
 1731                                         vm_page_set_invalid(m, poffset, presid);
 1732                                 VM_OBJECT_WUNLOCK(obj);
 1733                                 if (had_bogus)
 1734                                         printf("avoided corruption bug in bogus_page/brelse code\n");
 1735                         }
 1736                         resid -= PAGE_SIZE - (foff & PAGE_MASK);
 1737                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 1738                 }
 1739                 if (bp->b_flags & (B_INVAL | B_RELBUF))
 1740                         vfs_vmio_release(bp);
 1741 
 1742         } else if (bp->b_flags & B_VMIO) {
 1743 
 1744                 if (bp->b_flags & (B_INVAL | B_RELBUF)) {
 1745                         vfs_vmio_release(bp);
 1746                 }
 1747 
 1748         } else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) {
 1749                 if (bp->b_bufsize != 0)
 1750                         allocbuf(bp, 0);
 1751                 if (bp->b_vp != NULL)
 1752                         brelvp(bp);
 1753         }
 1754                         
 1755         /*
 1756          * If the buffer has junk contents signal it and eventually
 1757          * clean up B_DELWRI and diassociate the vnode so that gbincore()
 1758          * doesn't find it.
 1759          */
 1760         if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
 1761             (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
 1762                 bp->b_flags |= B_INVAL;
 1763         if (bp->b_flags & B_INVAL) {
 1764                 if (bp->b_flags & B_DELWRI)
 1765                         bundirty(bp);
 1766                 if (bp->b_vp)
 1767                         brelvp(bp);
 1768         }
 1769 
 1770         /* buffers with no memory */
 1771         if (bp->b_bufsize == 0) {
 1772                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1773                 if (bp->b_vflags & BV_BKGRDINPROG)
 1774                         panic("losing buffer 1");
 1775                 if (bp->b_kvasize)
 1776                         qindex = QUEUE_EMPTYKVA;
 1777                 else
 1778                         qindex = QUEUE_EMPTY;
 1779                 bp->b_flags |= B_AGE;
 1780         /* buffers with junk contents */
 1781         } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
 1782             (bp->b_ioflags & BIO_ERROR)) {
 1783                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 1784                 if (bp->b_vflags & BV_BKGRDINPROG)
 1785                         panic("losing buffer 2");
 1786                 qindex = QUEUE_CLEAN;
 1787                 bp->b_flags |= B_AGE;
 1788         /* remaining buffers */
 1789         } else if (bp->b_flags & B_DELWRI)
 1790                 qindex = QUEUE_DIRTY;
 1791         else
 1792                 qindex = QUEUE_CLEAN;
 1793 
 1794         binsfree(bp, qindex);
 1795 
 1796         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
 1797         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 1798                 panic("brelse: not dirty");
 1799         /* unlock */
 1800         BUF_UNLOCK(bp);
 1801 }
 1802 
 1803 /*
 1804  * Release a buffer back to the appropriate queue but do not try to free
 1805  * it.  The buffer is expected to be used again soon.
 1806  *
 1807  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
 1808  * biodone() to requeue an async I/O on completion.  It is also used when
 1809  * known good buffers need to be requeued but we think we may need the data
 1810  * again soon.
 1811  *
 1812  * XXX we should be able to leave the B_RELBUF hint set on completion.
 1813  */
 1814 void
 1815 bqrelse(struct buf *bp)
 1816 {
 1817         int qindex;
 1818 
 1819         CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 1820         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
 1821             ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 1822 
 1823         if (BUF_LOCKRECURSED(bp)) {
 1824                 /* do not release to free list */
 1825                 BUF_UNLOCK(bp);
 1826                 return;
 1827         }
 1828         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
 1829 
 1830         if (bp->b_flags & B_MANAGED) {
 1831                 if (bp->b_flags & B_REMFREE)
 1832                         bremfreef(bp);
 1833                 goto out;
 1834         }
 1835 
 1836         /* buffers with stale but valid contents */
 1837         if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
 1838             BV_BKGRDERR)) == BV_BKGRDERR) {
 1839                 BO_LOCK(bp->b_bufobj);
 1840                 bp->b_vflags &= ~BV_BKGRDERR;
 1841                 BO_UNLOCK(bp->b_bufobj);
 1842                 qindex = QUEUE_DIRTY;
 1843         } else {
 1844                 if ((bp->b_flags & B_DELWRI) == 0 &&
 1845                     (bp->b_xflags & BX_VNDIRTY))
 1846                         panic("bqrelse: not dirty");
 1847                 /*
 1848                  * BKGRDINPROG can only be set with the buf and bufobj
 1849                  * locks both held.  We tolerate a race to clear it here.
 1850                  */
 1851                 if (buf_vm_page_count_severe() &&
 1852                     (bp->b_vflags & BV_BKGRDINPROG) == 0) {
 1853                         /*
 1854                          * We are too low on memory, we have to try to free
 1855                          * the buffer (most importantly: the wired pages
 1856                          * making up its backing store) *now*.
 1857                          */
 1858                         brelse(bp);
 1859                         return;
 1860                 }
 1861                 qindex = QUEUE_CLEAN;
 1862         }
 1863         binsfree(bp, qindex);
 1864 
 1865 out:
 1866         /* unlock */
 1867         BUF_UNLOCK(bp);
 1868 }
 1869 
 1870 /* Give pages used by the bp back to the VM system (where possible) */
 1871 static void
 1872 vfs_vmio_release(struct buf *bp)
 1873 {
 1874         vm_object_t obj;
 1875         vm_page_t m;
 1876         int i;
 1877 
 1878         if ((bp->b_flags & B_UNMAPPED) == 0) {
 1879                 BUF_CHECK_MAPPED(bp);
 1880                 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
 1881         } else
 1882                 BUF_CHECK_UNMAPPED(bp);
 1883         obj = bp->b_bufobj->bo_object;
 1884         if (obj != NULL)
 1885                 VM_OBJECT_WLOCK(obj);
 1886         for (i = 0; i < bp->b_npages; i++) {
 1887                 m = bp->b_pages[i];
 1888                 bp->b_pages[i] = NULL;
 1889                 /*
 1890                  * In order to keep page LRU ordering consistent, put
 1891                  * everything on the inactive queue.
 1892                  */
 1893                 vm_page_lock(m);
 1894                 vm_page_unwire(m, 0);
 1895 
 1896                 /*
 1897                  * Might as well free the page if we can and it has
 1898                  * no valid data.  We also free the page if the
 1899                  * buffer was used for direct I/O
 1900                  */
 1901                 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) {
 1902                         if (m->wire_count == 0 && !vm_page_busied(m))
 1903                                 vm_page_free(m);
 1904                 } else if (bp->b_flags & B_DIRECT)
 1905                         vm_page_try_to_free(m);
 1906                 else if (buf_vm_page_count_severe())
 1907                         vm_page_try_to_cache(m);
 1908                 vm_page_unlock(m);
 1909         }
 1910         if (obj != NULL)
 1911                 VM_OBJECT_WUNLOCK(obj);
 1912         
 1913         if (bp->b_bufsize) {
 1914                 bufspacewakeup();
 1915                 bp->b_bufsize = 0;
 1916         }
 1917         bp->b_npages = 0;
 1918         bp->b_flags &= ~B_VMIO;
 1919         if (bp->b_vp)
 1920                 brelvp(bp);
 1921 }
 1922 
 1923 /*
 1924  * Check to see if a block at a particular lbn is available for a clustered
 1925  * write.
 1926  */
 1927 static int
 1928 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
 1929 {
 1930         struct buf *bpa;
 1931         int match;
 1932 
 1933         match = 0;
 1934 
 1935         /* If the buf isn't in core skip it */
 1936         if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
 1937                 return (0);
 1938 
 1939         /* If the buf is busy we don't want to wait for it */
 1940         if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 1941                 return (0);
 1942 
 1943         /* Only cluster with valid clusterable delayed write buffers */
 1944         if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
 1945             (B_DELWRI | B_CLUSTEROK))
 1946                 goto done;
 1947 
 1948         if (bpa->b_bufsize != size)
 1949                 goto done;
 1950 
 1951         /*
 1952          * Check to see if it is in the expected place on disk and that the
 1953          * block has been mapped.
 1954          */
 1955         if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
 1956                 match = 1;
 1957 done:
 1958         BUF_UNLOCK(bpa);
 1959         return (match);
 1960 }
 1961 
 1962 /*
 1963  *      vfs_bio_awrite:
 1964  *
 1965  *      Implement clustered async writes for clearing out B_DELWRI buffers.
 1966  *      This is much better then the old way of writing only one buffer at
 1967  *      a time.  Note that we may not be presented with the buffers in the 
 1968  *      correct order, so we search for the cluster in both directions.
 1969  */
 1970 int
 1971 vfs_bio_awrite(struct buf *bp)
 1972 {
 1973         struct bufobj *bo;
 1974         int i;
 1975         int j;
 1976         daddr_t lblkno = bp->b_lblkno;
 1977         struct vnode *vp = bp->b_vp;
 1978         int ncl;
 1979         int nwritten;
 1980         int size;
 1981         int maxcl;
 1982         int gbflags;
 1983 
 1984         bo = &vp->v_bufobj;
 1985         gbflags = (bp->b_flags & B_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
 1986         /*
 1987          * right now we support clustered writing only to regular files.  If
 1988          * we find a clusterable block we could be in the middle of a cluster
 1989          * rather then at the beginning.
 1990          */
 1991         if ((vp->v_type == VREG) && 
 1992             (vp->v_mount != 0) && /* Only on nodes that have the size info */
 1993             (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
 1994 
 1995                 size = vp->v_mount->mnt_stat.f_iosize;
 1996                 maxcl = MAXPHYS / size;
 1997 
 1998                 BO_RLOCK(bo);
 1999                 for (i = 1; i < maxcl; i++)
 2000                         if (vfs_bio_clcheck(vp, size, lblkno + i,
 2001                             bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
 2002                                 break;
 2003 
 2004                 for (j = 1; i + j <= maxcl && j <= lblkno; j++) 
 2005                         if (vfs_bio_clcheck(vp, size, lblkno - j,
 2006                             bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
 2007                                 break;
 2008                 BO_RUNLOCK(bo);
 2009                 --j;
 2010                 ncl = i + j;
 2011                 /*
 2012                  * this is a possible cluster write
 2013                  */
 2014                 if (ncl != 1) {
 2015                         BUF_UNLOCK(bp);
 2016                         nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
 2017                             gbflags);
 2018                         return (nwritten);
 2019                 }
 2020         }
 2021         bremfree(bp);
 2022         bp->b_flags |= B_ASYNC;
 2023         /*
 2024          * default (old) behavior, writing out only one block
 2025          *
 2026          * XXX returns b_bufsize instead of b_bcount for nwritten?
 2027          */
 2028         nwritten = bp->b_bufsize;
 2029         (void) bwrite(bp);
 2030 
 2031         return (nwritten);
 2032 }
 2033 
 2034 static void
 2035 setbufkva(struct buf *bp, vm_offset_t addr, int maxsize, int gbflags)
 2036 {
 2037 
 2038         KASSERT((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 &&
 2039             bp->b_kvasize == 0, ("call bfreekva(%p)", bp));
 2040         if ((gbflags & GB_UNMAPPED) == 0) {
 2041                 bp->b_kvabase = (caddr_t)addr;
 2042         } else if ((gbflags & GB_KVAALLOC) != 0) {
 2043                 KASSERT((gbflags & GB_UNMAPPED) != 0,
 2044                     ("GB_KVAALLOC without GB_UNMAPPED"));
 2045                 bp->b_kvaalloc = (caddr_t)addr;
 2046                 bp->b_flags |= B_UNMAPPED | B_KVAALLOC;
 2047                 atomic_add_long(&unmapped_bufspace, bp->b_kvasize);
 2048         }
 2049         bp->b_kvasize = maxsize;
 2050 }
 2051 
 2052 /*
 2053  * Allocate the buffer KVA and set b_kvasize. Also set b_kvabase if
 2054  * needed.
 2055  */
 2056 static int
 2057 allocbufkva(struct buf *bp, int maxsize, int gbflags)
 2058 {
 2059         vm_offset_t addr;
 2060 
 2061         bfreekva(bp);
 2062         addr = 0;
 2063 
 2064         if (vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr)) {
 2065                 /*
 2066                  * Buffer map is too fragmented.  Request the caller
 2067                  * to defragment the map.
 2068                  */
 2069                 atomic_add_int(&bufdefragcnt, 1);
 2070                 return (1);
 2071         }
 2072         setbufkva(bp, addr, maxsize, gbflags);
 2073         atomic_add_long(&bufspace, bp->b_kvasize);
 2074         return (0);
 2075 }
 2076 
 2077 /*
 2078  * Ask the bufdaemon for help, or act as bufdaemon itself, when a
 2079  * locked vnode is supplied.
 2080  */
 2081 static void
 2082 getnewbuf_bufd_help(struct vnode *vp, int gbflags, int slpflag, int slptimeo,
 2083     int defrag)
 2084 {
 2085         struct thread *td;
 2086         char *waitmsg;
 2087         int error, fl, flags, norunbuf;
 2088 
 2089         mtx_assert(&bqclean, MA_OWNED);
 2090 
 2091         if (defrag) {
 2092                 flags = VFS_BIO_NEED_BUFSPACE;
 2093                 waitmsg = "nbufkv";
 2094         } else if (bufspace >= hibufspace) {
 2095                 waitmsg = "nbufbs";
 2096                 flags = VFS_BIO_NEED_BUFSPACE;
 2097         } else {
 2098                 waitmsg = "newbuf";
 2099                 flags = VFS_BIO_NEED_ANY;
 2100         }
 2101         atomic_set_int(&needsbuffer, flags);
 2102         mtx_unlock(&bqclean);
 2103 
 2104         bd_speedup();   /* heeeelp */
 2105         if ((gbflags & GB_NOWAIT_BD) != 0)
 2106                 return;
 2107 
 2108         td = curthread;
 2109         rw_wlock(&nblock);
 2110         while ((needsbuffer & flags) != 0) {
 2111                 if (vp != NULL && vp->v_type != VCHR &&
 2112                     (td->td_pflags & TDP_BUFNEED) == 0) {
 2113                         rw_wunlock(&nblock);
 2114                         /*
 2115                          * getblk() is called with a vnode locked, and
 2116                          * some majority of the dirty buffers may as
 2117                          * well belong to the vnode.  Flushing the
 2118                          * buffers there would make a progress that
 2119                          * cannot be achieved by the buf_daemon, that
 2120                          * cannot lock the vnode.
 2121                          */
 2122                         norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
 2123                             (td->td_pflags & TDP_NORUNNINGBUF);
 2124 
 2125                         /*
 2126                          * Play bufdaemon.  The getnewbuf() function
 2127                          * may be called while the thread owns lock
 2128                          * for another dirty buffer for the same
 2129                          * vnode, which makes it impossible to use
 2130                          * VOP_FSYNC() there, due to the buffer lock
 2131                          * recursion.
 2132                          */
 2133                         td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
 2134                         fl = buf_flush(vp, flushbufqtarget);
 2135                         td->td_pflags &= norunbuf;
 2136                         rw_wlock(&nblock);
 2137                         if (fl != 0)
 2138                                 continue;
 2139                         if ((needsbuffer & flags) == 0)
 2140                                 break;
 2141                 }
 2142                 error = rw_sleep(__DEVOLATILE(void *, &needsbuffer), &nblock,
 2143                     (PRIBIO + 4) | slpflag, waitmsg, slptimeo);
 2144                 if (error != 0)
 2145                         break;
 2146         }
 2147         rw_wunlock(&nblock);
 2148 }
 2149 
 2150 static void
 2151 getnewbuf_reuse_bp(struct buf *bp, int qindex)
 2152 {
 2153 
 2154         CTR6(KTR_BUF, "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d "
 2155             "queue %d (recycling)", bp, bp->b_vp, bp->b_flags,
 2156              bp->b_kvasize, bp->b_bufsize, qindex);
 2157         mtx_assert(&bqclean, MA_NOTOWNED);
 2158 
 2159         /*
 2160          * Note: we no longer distinguish between VMIO and non-VMIO
 2161          * buffers.
 2162          */
 2163         KASSERT((bp->b_flags & B_DELWRI) == 0,
 2164             ("delwri buffer %p found in queue %d", bp, qindex));
 2165 
 2166         if (qindex == QUEUE_CLEAN) {
 2167                 if (bp->b_flags & B_VMIO) {
 2168                         bp->b_flags &= ~B_ASYNC;
 2169                         vfs_vmio_release(bp);
 2170                 }
 2171                 if (bp->b_vp != NULL)
 2172                         brelvp(bp);
 2173         }
 2174 
 2175         /*
 2176          * Get the rest of the buffer freed up.  b_kva* is still valid
 2177          * after this operation.
 2178          */
 2179 
 2180         if (bp->b_rcred != NOCRED) {
 2181                 crfree(bp->b_rcred);
 2182                 bp->b_rcred = NOCRED;
 2183         }
 2184         if (bp->b_wcred != NOCRED) {
 2185                 crfree(bp->b_wcred);
 2186                 bp->b_wcred = NOCRED;
 2187         }
 2188         if (!LIST_EMPTY(&bp->b_dep))
 2189                 buf_deallocate(bp);
 2190         if (bp->b_vflags & BV_BKGRDINPROG)
 2191                 panic("losing buffer 3");
 2192         KASSERT(bp->b_vp == NULL, ("bp: %p still has vnode %p.  qindex: %d",
 2193             bp, bp->b_vp, qindex));
 2194         KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
 2195             ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
 2196 
 2197         if (bp->b_bufsize)
 2198                 allocbuf(bp, 0);
 2199 
 2200         bp->b_flags &= B_UNMAPPED | B_KVAALLOC;
 2201         bp->b_ioflags = 0;
 2202         bp->b_xflags = 0;
 2203         KASSERT((bp->b_flags & B_INFREECNT) == 0,
 2204             ("buf %p still counted as free?", bp));
 2205         bp->b_vflags = 0;
 2206         bp->b_vp = NULL;
 2207         bp->b_blkno = bp->b_lblkno = 0;
 2208         bp->b_offset = NOOFFSET;
 2209         bp->b_iodone = 0;
 2210         bp->b_error = 0;
 2211         bp->b_resid = 0;
 2212         bp->b_bcount = 0;
 2213         bp->b_npages = 0;
 2214         bp->b_dirtyoff = bp->b_dirtyend = 0;
 2215         bp->b_bufobj = NULL;
 2216         bp->b_pin_count = 0;
 2217         bp->b_fsprivate1 = NULL;
 2218         bp->b_fsprivate2 = NULL;
 2219         bp->b_fsprivate3 = NULL;
 2220 
 2221         LIST_INIT(&bp->b_dep);
 2222 }
 2223 
 2224 static int flushingbufs;
 2225 
 2226 static struct buf *
 2227 getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata)
 2228 {
 2229         struct buf *bp, *nbp;
 2230         int nqindex, qindex, pass;
 2231 
 2232         KASSERT(!unmapped || !defrag, ("both unmapped and defrag"));
 2233 
 2234         pass = 1;
 2235 restart:
 2236         atomic_add_int(&getnewbufrestarts, 1);
 2237 
 2238         /*
 2239          * Setup for scan.  If we do not have enough free buffers,
 2240          * we setup a degenerate case that immediately fails.  Note
 2241          * that if we are specially marked process, we are allowed to
 2242          * dip into our reserves.
 2243          *
 2244          * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN
 2245          * for the allocation of the mapped buffer.  For unmapped, the
 2246          * easiest is to start with EMPTY outright.
 2247          *
 2248          * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
 2249          * However, there are a number of cases (defragging, reusing, ...)
 2250          * where we cannot backup.
 2251          */
 2252         nbp = NULL;
 2253         mtx_lock(&bqclean);
 2254         if (!defrag && unmapped) {
 2255                 nqindex = QUEUE_EMPTY;
 2256                 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
 2257         }
 2258         if (nbp == NULL) {
 2259                 nqindex = QUEUE_EMPTYKVA;
 2260                 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
 2261         }
 2262 
 2263         /*
 2264          * If no EMPTYKVA buffers and we are either defragging or
 2265          * reusing, locate a CLEAN buffer to free or reuse.  If
 2266          * bufspace useage is low skip this step so we can allocate a
 2267          * new buffer.
 2268          */
 2269         if (nbp == NULL && (defrag || bufspace >= lobufspace)) {
 2270                 nqindex = QUEUE_CLEAN;
 2271                 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
 2272         }
 2273 
 2274         /*
 2275          * If we could not find or were not allowed to reuse a CLEAN
 2276          * buffer, check to see if it is ok to use an EMPTY buffer.
 2277          * We can only use an EMPTY buffer if allocating its KVA would
 2278          * not otherwise run us out of buffer space.  No KVA is needed
 2279          * for the unmapped allocation.
 2280          */
 2281         if (nbp == NULL && defrag == 0 && (bufspace + maxsize < hibufspace ||
 2282             metadata)) {
 2283                 nqindex = QUEUE_EMPTY;
 2284                 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
 2285         }
 2286 
 2287         /*
 2288          * All available buffers might be clean, retry ignoring the
 2289          * lobufspace as the last resort.
 2290          */
 2291         if (nbp == NULL && !TAILQ_EMPTY(&bufqueues[QUEUE_CLEAN])) {
 2292                 nqindex = QUEUE_CLEAN;
 2293                 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
 2294         }
 2295 
 2296         /*
 2297          * Run scan, possibly freeing data and/or kva mappings on the fly
 2298          * depending.
 2299          */
 2300         while ((bp = nbp) != NULL) {
 2301                 qindex = nqindex;
 2302 
 2303                 /*
 2304                  * Calculate next bp (we can only use it if we do not
 2305                  * block or do other fancy things).
 2306                  */
 2307                 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
 2308                         switch (qindex) {
 2309                         case QUEUE_EMPTY:
 2310                                 nqindex = QUEUE_EMPTYKVA;
 2311                                 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
 2312                                 if (nbp != NULL)
 2313                                         break;
 2314                                 /* FALLTHROUGH */
 2315                         case QUEUE_EMPTYKVA:
 2316                                 nqindex = QUEUE_CLEAN;
 2317                                 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
 2318                                 if (nbp != NULL)
 2319                                         break;
 2320                                 /* FALLTHROUGH */
 2321                         case QUEUE_CLEAN:
 2322                                 if (metadata && pass == 1) {
 2323                                         pass = 2;
 2324                                         nqindex = QUEUE_EMPTY;
 2325                                         nbp = TAILQ_FIRST(
 2326                                             &bufqueues[QUEUE_EMPTY]);
 2327                                 }
 2328                                 /*
 2329                                  * nbp is NULL. 
 2330                                  */
 2331                                 break;
 2332                         }
 2333                 }
 2334                 /*
 2335                  * If we are defragging then we need a buffer with 
 2336                  * b_kvasize != 0.  XXX this situation should no longer
 2337                  * occur, if defrag is non-zero the buffer's b_kvasize
 2338                  * should also be non-zero at this point.  XXX
 2339                  */
 2340                 if (defrag && bp->b_kvasize == 0) {
 2341                         printf("Warning: defrag empty buffer %p\n", bp);
 2342                         continue;
 2343                 }
 2344 
 2345                 /*
 2346                  * Start freeing the bp.  This is somewhat involved.  nbp
 2347                  * remains valid only for QUEUE_EMPTY[KVA] bp's.
 2348                  */
 2349                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 2350                         continue;
 2351                 /*
 2352                  * BKGRDINPROG can only be set with the buf and bufobj
 2353                  * locks both held.  We tolerate a race to clear it here.
 2354                  */
 2355                 if (bp->b_vflags & BV_BKGRDINPROG) {
 2356                         BUF_UNLOCK(bp);
 2357                         continue;
 2358                 }
 2359 
 2360                 /*
 2361                  * Requeue the background write buffer with error.
 2362                  */
 2363                 if ((bp->b_vflags & BV_BKGRDERR) != 0) {
 2364                         bremfreel(bp);
 2365                         mtx_unlock(&bqclean);
 2366                         bqrelse(bp);
 2367                         continue;
 2368                 }
 2369 
 2370                 KASSERT(bp->b_qindex == qindex,
 2371                     ("getnewbuf: inconsistent queue %d bp %p", qindex, bp));
 2372 
 2373                 bremfreel(bp);
 2374                 mtx_unlock(&bqclean);
 2375                 /*
 2376                  * NOTE:  nbp is now entirely invalid.  We can only restart
 2377                  * the scan from this point on.
 2378                  */
 2379 
 2380                 getnewbuf_reuse_bp(bp, qindex);
 2381                 mtx_assert(&bqclean, MA_NOTOWNED);
 2382 
 2383                 /*
 2384                  * If we are defragging then free the buffer.
 2385                  */
 2386                 if (defrag) {
 2387                         bp->b_flags |= B_INVAL;
 2388                         bfreekva(bp);
 2389                         brelse(bp);
 2390                         defrag = 0;
 2391                         goto restart;
 2392                 }
 2393 
 2394                 /*
 2395                  * Notify any waiters for the buffer lock about
 2396                  * identity change by freeing the buffer.
 2397                  */
 2398                 if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) {
 2399                         bp->b_flags |= B_INVAL;
 2400                         bfreekva(bp);
 2401                         brelse(bp);
 2402                         goto restart;
 2403                 }
 2404 
 2405                 if (metadata)
 2406                         break;
 2407 
 2408                 /*
 2409                  * If we are overcomitted then recover the buffer and its
 2410                  * KVM space.  This occurs in rare situations when multiple
 2411                  * processes are blocked in getnewbuf() or allocbuf().
 2412                  */
 2413                 if (bufspace >= hibufspace)
 2414                         flushingbufs = 1;
 2415                 if (flushingbufs && bp->b_kvasize != 0) {
 2416                         bp->b_flags |= B_INVAL;
 2417                         bfreekva(bp);
 2418                         brelse(bp);
 2419                         goto restart;
 2420                 }
 2421                 if (bufspace < lobufspace)
 2422                         flushingbufs = 0;
 2423                 break;
 2424         }
 2425         return (bp);
 2426 }
 2427 
 2428 /*
 2429  *      getnewbuf:
 2430  *
 2431  *      Find and initialize a new buffer header, freeing up existing buffers
 2432  *      in the bufqueues as necessary.  The new buffer is returned locked.
 2433  *
 2434  *      Important:  B_INVAL is not set.  If the caller wishes to throw the
 2435  *      buffer away, the caller must set B_INVAL prior to calling brelse().
 2436  *
 2437  *      We block if:
 2438  *              We have insufficient buffer headers
 2439  *              We have insufficient buffer space
 2440  *              buffer_arena is too fragmented ( space reservation fails )
 2441  *              If we have to flush dirty buffers ( but we try to avoid this )
 2442  */
 2443 static struct buf *
 2444 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize,
 2445     int gbflags)
 2446 {
 2447         struct buf *bp;
 2448         int defrag, metadata;
 2449 
 2450         KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
 2451             ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
 2452         if (!unmapped_buf_allowed)
 2453                 gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
 2454 
 2455         defrag = 0;
 2456         if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
 2457             vp->v_type == VCHR)
 2458                 metadata = 1;
 2459         else
 2460                 metadata = 0;
 2461         /*
 2462          * We can't afford to block since we might be holding a vnode lock,
 2463          * which may prevent system daemons from running.  We deal with
 2464          * low-memory situations by proactively returning memory and running
 2465          * async I/O rather then sync I/O.
 2466          */
 2467         atomic_add_int(&getnewbufcalls, 1);
 2468         atomic_subtract_int(&getnewbufrestarts, 1);
 2469 restart:
 2470         bp = getnewbuf_scan(maxsize, defrag, (gbflags & (GB_UNMAPPED |
 2471             GB_KVAALLOC)) == GB_UNMAPPED, metadata);
 2472         if (bp != NULL)
 2473                 defrag = 0;
 2474 
 2475         /*
 2476          * If we exhausted our list, sleep as appropriate.  We may have to
 2477          * wakeup various daemons and write out some dirty buffers.
 2478          *
 2479          * Generally we are sleeping due to insufficient buffer space.
 2480          */
 2481         if (bp == NULL) {
 2482                 mtx_assert(&bqclean, MA_OWNED);
 2483                 getnewbuf_bufd_help(vp, gbflags, slpflag, slptimeo, defrag);
 2484                 mtx_assert(&bqclean, MA_NOTOWNED);
 2485         } else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == GB_UNMAPPED) {
 2486                 mtx_assert(&bqclean, MA_NOTOWNED);
 2487 
 2488                 bfreekva(bp);
 2489                 bp->b_flags |= B_UNMAPPED;
 2490                 bp->b_kvabase = bp->b_data = unmapped_buf;
 2491                 bp->b_kvasize = maxsize;
 2492                 atomic_add_long(&bufspace, bp->b_kvasize);
 2493                 atomic_add_long(&unmapped_bufspace, bp->b_kvasize);
 2494                 atomic_add_int(&bufreusecnt, 1);
 2495         } else {
 2496                 mtx_assert(&bqclean, MA_NOTOWNED);
 2497 
 2498                 /*
 2499                  * We finally have a valid bp.  We aren't quite out of the
 2500                  * woods, we still have to reserve kva space.  In order
 2501                  * to keep fragmentation sane we only allocate kva in
 2502                  * BKVASIZE chunks.
 2503                  */
 2504                 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
 2505 
 2506                 if (maxsize != bp->b_kvasize || (bp->b_flags & (B_UNMAPPED |
 2507                     B_KVAALLOC)) == B_UNMAPPED) {
 2508                         if (allocbufkva(bp, maxsize, gbflags)) {
 2509                                 defrag = 1;
 2510                                 bp->b_flags |= B_INVAL;
 2511                                 brelse(bp);
 2512                                 goto restart;
 2513                         }
 2514                         atomic_add_int(&bufreusecnt, 1);
 2515                 } else if ((bp->b_flags & B_KVAALLOC) != 0 &&
 2516                     (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == 0) {
 2517                         /*
 2518                          * If the reused buffer has KVA allocated,
 2519                          * reassign b_kvaalloc to b_kvabase.
 2520                          */
 2521                         bp->b_kvabase = bp->b_kvaalloc;
 2522                         bp->b_flags &= ~B_KVAALLOC;
 2523                         atomic_subtract_long(&unmapped_bufspace,
 2524                             bp->b_kvasize);
 2525                         atomic_add_int(&bufreusecnt, 1);
 2526                 } else if ((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 &&
 2527                     (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == (GB_UNMAPPED |
 2528                     GB_KVAALLOC)) {
 2529                         /*
 2530                          * The case of reused buffer already have KVA
 2531                          * mapped, but the request is for unmapped
 2532                          * buffer with KVA allocated.
 2533                          */
 2534                         bp->b_kvaalloc = bp->b_kvabase;
 2535                         bp->b_data = bp->b_kvabase = unmapped_buf;
 2536                         bp->b_flags |= B_UNMAPPED | B_KVAALLOC;
 2537                         atomic_add_long(&unmapped_bufspace,
 2538                             bp->b_kvasize);
 2539                         atomic_add_int(&bufreusecnt, 1);
 2540                 }
 2541                 if ((gbflags & GB_UNMAPPED) == 0) {
 2542                         bp->b_saveaddr = bp->b_kvabase;
 2543                         bp->b_data = bp->b_saveaddr;
 2544                         bp->b_flags &= ~B_UNMAPPED;
 2545                         BUF_CHECK_MAPPED(bp);
 2546                 }
 2547         }
 2548         return (bp);
 2549 }
 2550 
 2551 /*
 2552  *      buf_daemon:
 2553  *
 2554  *      buffer flushing daemon.  Buffers are normally flushed by the
 2555  *      update daemon but if it cannot keep up this process starts to
 2556  *      take the load in an attempt to prevent getnewbuf() from blocking.
 2557  */
 2558 
 2559 static struct kproc_desc buf_kp = {
 2560         "bufdaemon",
 2561         buf_daemon,
 2562         &bufdaemonproc
 2563 };
 2564 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
 2565 
 2566 static int
 2567 buf_flush(struct vnode *vp, int target)
 2568 {
 2569         int flushed;
 2570 
 2571         flushed = flushbufqueues(vp, target, 0);
 2572         if (flushed == 0) {
 2573                 /*
 2574                  * Could not find any buffers without rollback
 2575                  * dependencies, so just write the first one
 2576                  * in the hopes of eventually making progress.
 2577                  */
 2578                 if (vp != NULL && target > 2)
 2579                         target /= 2;
 2580                 flushbufqueues(vp, target, 1);
 2581         }
 2582         return (flushed);
 2583 }
 2584 
 2585 static void
 2586 buf_daemon()
 2587 {
 2588         int lodirty;
 2589 
 2590         /*
 2591          * This process needs to be suspended prior to shutdown sync.
 2592          */
 2593         EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
 2594             SHUTDOWN_PRI_LAST);
 2595 
 2596         /*
 2597          * This process is allowed to take the buffer cache to the limit
 2598          */
 2599         curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
 2600         mtx_lock(&bdlock);
 2601         for (;;) {
 2602                 bd_request = 0;
 2603                 mtx_unlock(&bdlock);
 2604 
 2605                 kproc_suspend_check(bufdaemonproc);
 2606                 lodirty = lodirtybuffers;
 2607                 if (bd_speedupreq) {
 2608                         lodirty = numdirtybuffers / 2;
 2609                         bd_speedupreq = 0;
 2610                 }
 2611                 /*
 2612                  * Do the flush.  Limit the amount of in-transit I/O we
 2613                  * allow to build up, otherwise we would completely saturate
 2614                  * the I/O system.
 2615                  */
 2616                 while (numdirtybuffers > lodirty) {
 2617                         if (buf_flush(NULL, numdirtybuffers - lodirty) == 0)
 2618                                 break;
 2619                         kern_yield(PRI_USER);
 2620                 }
 2621 
 2622                 /*
 2623                  * Only clear bd_request if we have reached our low water
 2624                  * mark.  The buf_daemon normally waits 1 second and
 2625                  * then incrementally flushes any dirty buffers that have
 2626                  * built up, within reason.
 2627                  *
 2628                  * If we were unable to hit our low water mark and couldn't
 2629                  * find any flushable buffers, we sleep for a short period
 2630                  * to avoid endless loops on unlockable buffers.
 2631                  */
 2632                 mtx_lock(&bdlock);
 2633                 if (numdirtybuffers <= lodirtybuffers) {
 2634                         /*
 2635                          * We reached our low water mark, reset the
 2636                          * request and sleep until we are needed again.
 2637                          * The sleep is just so the suspend code works.
 2638                          */
 2639                         bd_request = 0;
 2640                         /*
 2641                          * Do an extra wakeup in case dirty threshold
 2642                          * changed via sysctl and the explicit transition
 2643                          * out of shortfall was missed.
 2644                          */
 2645                         bdirtywakeup();
 2646                         if (runningbufspace <= lorunningspace)
 2647                                 runningwakeup();
 2648                         msleep(&bd_request, &bdlock, PVM, "psleep", hz);
 2649                 } else {
 2650                         /*
 2651                          * We couldn't find any flushable dirty buffers but
 2652                          * still have too many dirty buffers, we
 2653                          * have to sleep and try again.  (rare)
 2654                          */
 2655                         msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
 2656                 }
 2657         }
 2658 }
 2659 
 2660 /*
 2661  *      flushbufqueues:
 2662  *
 2663  *      Try to flush a buffer in the dirty queue.  We must be careful to
 2664  *      free up B_INVAL buffers instead of write them, which NFS is 
 2665  *      particularly sensitive to.
 2666  */
 2667 static int flushwithdeps = 0;
 2668 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
 2669     0, "Number of buffers flushed with dependecies that require rollbacks");
 2670 
 2671 static int
 2672 flushbufqueues(struct vnode *lvp, int target, int flushdeps)
 2673 {
 2674         struct buf *sentinel;
 2675         struct vnode *vp;
 2676         struct mount *mp;
 2677         struct buf *bp;
 2678         int hasdeps;
 2679         int flushed;
 2680         int queue;
 2681         int error;
 2682         bool unlock;
 2683 
 2684         flushed = 0;
 2685         queue = QUEUE_DIRTY;
 2686         bp = NULL;
 2687         sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
 2688         sentinel->b_qindex = QUEUE_SENTINEL;
 2689         mtx_lock(&bqdirty);
 2690         TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist);
 2691         mtx_unlock(&bqdirty);
 2692         while (flushed != target) {
 2693                 maybe_yield();
 2694                 mtx_lock(&bqdirty);
 2695                 bp = TAILQ_NEXT(sentinel, b_freelist);
 2696                 if (bp != NULL) {
 2697                         TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
 2698                         TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
 2699                             b_freelist);
 2700                 } else {
 2701                         mtx_unlock(&bqdirty);
 2702                         break;
 2703                 }
 2704                 /*
 2705                  * Skip sentinels inserted by other invocations of the
 2706                  * flushbufqueues(), taking care to not reorder them.
 2707                  *
 2708                  * Only flush the buffers that belong to the
 2709                  * vnode locked by the curthread.
 2710                  */
 2711                 if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
 2712                     bp->b_vp != lvp)) {
 2713                         mtx_unlock(&bqdirty);
 2714                         continue;
 2715                 }
 2716                 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
 2717                 mtx_unlock(&bqdirty);
 2718                 if (error != 0)
 2719                         continue;
 2720                 if (bp->b_pin_count > 0) {
 2721                         BUF_UNLOCK(bp);
 2722                         continue;
 2723                 }
 2724                 /*
 2725                  * BKGRDINPROG can only be set with the buf and bufobj
 2726                  * locks both held.  We tolerate a race to clear it here.
 2727                  */
 2728                 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
 2729                     (bp->b_flags & B_DELWRI) == 0) {
 2730                         BUF_UNLOCK(bp);
 2731                         continue;
 2732                 }
 2733                 if (bp->b_flags & B_INVAL) {
 2734                         bremfreef(bp);
 2735                         brelse(bp);
 2736                         flushed++;
 2737                         continue;
 2738                 }
 2739 
 2740                 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
 2741                         if (flushdeps == 0) {
 2742                                 BUF_UNLOCK(bp);
 2743                                 continue;
 2744                         }
 2745                         hasdeps = 1;
 2746                 } else
 2747                         hasdeps = 0;
 2748                 /*
 2749                  * We must hold the lock on a vnode before writing
 2750                  * one of its buffers. Otherwise we may confuse, or
 2751                  * in the case of a snapshot vnode, deadlock the
 2752                  * system.
 2753                  *
 2754                  * The lock order here is the reverse of the normal
 2755                  * of vnode followed by buf lock.  This is ok because
 2756                  * the NOWAIT will prevent deadlock.
 2757                  */
 2758                 vp = bp->b_vp;
 2759                 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
 2760                         BUF_UNLOCK(bp);
 2761                         continue;
 2762                 }
 2763                 if (lvp == NULL) {
 2764                         unlock = true;
 2765                         error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
 2766                 } else {
 2767                         ASSERT_VOP_LOCKED(vp, "getbuf");
 2768                         unlock = false;
 2769                         error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
 2770                             vn_lock(vp, LK_TRYUPGRADE);
 2771                 }
 2772                 if (error == 0) {
 2773                         CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
 2774                             bp, bp->b_vp, bp->b_flags);
 2775                         if (curproc == bufdaemonproc) {
 2776                                 vfs_bio_awrite(bp);
 2777                         } else {
 2778                                 bremfree(bp);
 2779                                 bwrite(bp);
 2780                                 notbufdflushes++;
 2781                         }
 2782                         vn_finished_write(mp);
 2783                         if (unlock)
 2784                                 VOP_UNLOCK(vp, 0);
 2785                         flushwithdeps += hasdeps;
 2786                         flushed++;
 2787 
 2788                         /*
 2789                          * Sleeping on runningbufspace while holding
 2790                          * vnode lock leads to deadlock.
 2791                          */
 2792                         if (curproc == bufdaemonproc &&
 2793                             runningbufspace > hirunningspace)
 2794                                 waitrunningbufspace();
 2795                         continue;
 2796                 }
 2797                 vn_finished_write(mp);
 2798                 BUF_UNLOCK(bp);
 2799         }
 2800         mtx_lock(&bqdirty);
 2801         TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
 2802         mtx_unlock(&bqdirty);
 2803         free(sentinel, M_TEMP);
 2804         return (flushed);
 2805 }
 2806 
 2807 /*
 2808  * Check to see if a block is currently memory resident.
 2809  */
 2810 struct buf *
 2811 incore(struct bufobj *bo, daddr_t blkno)
 2812 {
 2813         struct buf *bp;
 2814 
 2815         BO_RLOCK(bo);
 2816         bp = gbincore(bo, blkno);
 2817         BO_RUNLOCK(bo);
 2818         return (bp);
 2819 }
 2820 
 2821 /*
 2822  * Returns true if no I/O is needed to access the
 2823  * associated VM object.  This is like incore except
 2824  * it also hunts around in the VM system for the data.
 2825  */
 2826 
 2827 static int
 2828 inmem(struct vnode * vp, daddr_t blkno)
 2829 {
 2830         vm_object_t obj;
 2831         vm_offset_t toff, tinc, size;
 2832         vm_page_t m;
 2833         vm_ooffset_t off;
 2834 
 2835         ASSERT_VOP_LOCKED(vp, "inmem");
 2836 
 2837         if (incore(&vp->v_bufobj, blkno))
 2838                 return 1;
 2839         if (vp->v_mount == NULL)
 2840                 return 0;
 2841         obj = vp->v_object;
 2842         if (obj == NULL)
 2843                 return (0);
 2844 
 2845         size = PAGE_SIZE;
 2846         if (size > vp->v_mount->mnt_stat.f_iosize)
 2847                 size = vp->v_mount->mnt_stat.f_iosize;
 2848         off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
 2849 
 2850         VM_OBJECT_RLOCK(obj);
 2851         for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
 2852                 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
 2853                 if (!m)
 2854                         goto notinmem;
 2855                 tinc = size;
 2856                 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
 2857                         tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
 2858                 if (vm_page_is_valid(m,
 2859                     (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
 2860                         goto notinmem;
 2861         }
 2862         VM_OBJECT_RUNLOCK(obj);
 2863         return 1;
 2864 
 2865 notinmem:
 2866         VM_OBJECT_RUNLOCK(obj);
 2867         return (0);
 2868 }
 2869 
 2870 /*
 2871  * Set the dirty range for a buffer based on the status of the dirty
 2872  * bits in the pages comprising the buffer.  The range is limited
 2873  * to the size of the buffer.
 2874  *
 2875  * Tell the VM system that the pages associated with this buffer
 2876  * are clean.  This is used for delayed writes where the data is
 2877  * going to go to disk eventually without additional VM intevention.
 2878  *
 2879  * Note that while we only really need to clean through to b_bcount, we
 2880  * just go ahead and clean through to b_bufsize.
 2881  */
 2882 static void
 2883 vfs_clean_pages_dirty_buf(struct buf *bp)
 2884 {
 2885         vm_ooffset_t foff, noff, eoff;
 2886         vm_page_t m;
 2887         int i;
 2888 
 2889         if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
 2890                 return;
 2891 
 2892         foff = bp->b_offset;
 2893         KASSERT(bp->b_offset != NOOFFSET,
 2894             ("vfs_clean_pages_dirty_buf: no buffer offset"));
 2895 
 2896         VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
 2897         vfs_drain_busy_pages(bp);
 2898         vfs_setdirty_locked_object(bp);
 2899         for (i = 0; i < bp->b_npages; i++) {
 2900                 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 2901                 eoff = noff;
 2902                 if (eoff > bp->b_offset + bp->b_bufsize)
 2903                         eoff = bp->b_offset + bp->b_bufsize;
 2904                 m = bp->b_pages[i];
 2905                 vfs_page_set_validclean(bp, foff, m);
 2906                 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
 2907                 foff = noff;
 2908         }
 2909         VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
 2910 }
 2911 
 2912 static void
 2913 vfs_setdirty_locked_object(struct buf *bp)
 2914 {
 2915         vm_object_t object;
 2916         int i;
 2917 
 2918         object = bp->b_bufobj->bo_object;
 2919         VM_OBJECT_ASSERT_WLOCKED(object);
 2920 
 2921         /*
 2922          * We qualify the scan for modified pages on whether the
 2923          * object has been flushed yet.
 2924          */
 2925         if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
 2926                 vm_offset_t boffset;
 2927                 vm_offset_t eoffset;
 2928 
 2929                 /*
 2930                  * test the pages to see if they have been modified directly
 2931                  * by users through the VM system.
 2932                  */
 2933                 for (i = 0; i < bp->b_npages; i++)
 2934                         vm_page_test_dirty(bp->b_pages[i]);
 2935 
 2936                 /*
 2937                  * Calculate the encompassing dirty range, boffset and eoffset,
 2938                  * (eoffset - boffset) bytes.
 2939                  */
 2940 
 2941                 for (i = 0; i < bp->b_npages; i++) {
 2942                         if (bp->b_pages[i]->dirty)
 2943                                 break;
 2944                 }
 2945                 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2946 
 2947                 for (i = bp->b_npages - 1; i >= 0; --i) {
 2948                         if (bp->b_pages[i]->dirty) {
 2949                                 break;
 2950                         }
 2951                 }
 2952                 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 2953 
 2954                 /*
 2955                  * Fit it to the buffer.
 2956                  */
 2957 
 2958                 if (eoffset > bp->b_bcount)
 2959                         eoffset = bp->b_bcount;
 2960 
 2961                 /*
 2962                  * If we have a good dirty range, merge with the existing
 2963                  * dirty range.
 2964                  */
 2965 
 2966                 if (boffset < eoffset) {
 2967                         if (bp->b_dirtyoff > boffset)
 2968                                 bp->b_dirtyoff = boffset;
 2969                         if (bp->b_dirtyend < eoffset)
 2970                                 bp->b_dirtyend = eoffset;
 2971                 }
 2972         }
 2973 }
 2974 
 2975 /*
 2976  * Allocate the KVA mapping for an existing buffer. It handles the
 2977  * cases of both B_UNMAPPED buffer, and buffer with the preallocated
 2978  * KVA which is not mapped (B_KVAALLOC).
 2979  */
 2980 static void
 2981 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
 2982 {
 2983         struct buf *scratch_bp;
 2984         int bsize, maxsize, need_mapping, need_kva;
 2985         off_t offset;
 2986 
 2987         need_mapping = (bp->b_flags & B_UNMAPPED) != 0 &&
 2988             (gbflags & GB_UNMAPPED) == 0;
 2989         need_kva = (bp->b_flags & (B_KVAALLOC | B_UNMAPPED)) == B_UNMAPPED &&
 2990             (gbflags & GB_KVAALLOC) != 0;
 2991         if (!need_mapping && !need_kva)
 2992                 return;
 2993 
 2994         BUF_CHECK_UNMAPPED(bp);
 2995 
 2996         if (need_mapping && (bp->b_flags & B_KVAALLOC) != 0) {
 2997                 /*
 2998                  * Buffer is not mapped, but the KVA was already
 2999                  * reserved at the time of the instantiation.  Use the
 3000                  * allocated space.
 3001                  */
 3002                 bp->b_flags &= ~B_KVAALLOC;
 3003                 KASSERT(bp->b_kvaalloc != 0, ("kvaalloc == 0"));
 3004                 bp->b_kvabase = bp->b_kvaalloc;
 3005                 atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize);
 3006                 goto has_addr;
 3007         }
 3008 
 3009         /*
 3010          * Calculate the amount of the address space we would reserve
 3011          * if the buffer was mapped.
 3012          */
 3013         bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
 3014         offset = blkno * bsize;
 3015         maxsize = size + (offset & PAGE_MASK);
 3016         maxsize = imax(maxsize, bsize);
 3017 
 3018 mapping_loop:
 3019         if (allocbufkva(bp, maxsize, gbflags)) {
 3020                 /*
 3021                  * Request defragmentation. getnewbuf() returns us the
 3022                  * allocated space by the scratch buffer KVA.
 3023                  */
 3024                 scratch_bp = getnewbuf(bp->b_vp, 0, 0, size, maxsize, gbflags |
 3025                     (GB_UNMAPPED | GB_KVAALLOC));
 3026                 if (scratch_bp == NULL) {
 3027                         if ((gbflags & GB_NOWAIT_BD) != 0) {
 3028                                 /*
 3029                                  * XXXKIB: defragmentation cannot
 3030                                  * succeed, not sure what else to do.
 3031                                  */
 3032                                 panic("GB_NOWAIT_BD and B_UNMAPPED %p", bp);
 3033                         }
 3034                         atomic_add_int(&mappingrestarts, 1);
 3035                         goto mapping_loop;
 3036                 }
 3037                 KASSERT((scratch_bp->b_flags & B_KVAALLOC) != 0,
 3038                     ("scratch bp !B_KVAALLOC %p", scratch_bp));
 3039                 setbufkva(bp, (vm_offset_t)scratch_bp->b_kvaalloc,
 3040                     scratch_bp->b_kvasize, gbflags);
 3041 
 3042                 /* Get rid of the scratch buffer. */
 3043                 scratch_bp->b_kvasize = 0;
 3044                 scratch_bp->b_flags |= B_INVAL;
 3045                 scratch_bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC);
 3046                 brelse(scratch_bp);
 3047         }
 3048         if (!need_mapping)
 3049                 return;
 3050 
 3051 has_addr:
 3052         bp->b_saveaddr = bp->b_kvabase;
 3053         bp->b_data = bp->b_saveaddr; /* b_offset is handled by bpmap_qenter */
 3054         bp->b_flags &= ~B_UNMAPPED;
 3055         BUF_CHECK_MAPPED(bp);
 3056         bpmap_qenter(bp);
 3057 }
 3058 
 3059 /*
 3060  *      getblk:
 3061  *
 3062  *      Get a block given a specified block and offset into a file/device.
 3063  *      The buffers B_DONE bit will be cleared on return, making it almost
 3064  *      ready for an I/O initiation.  B_INVAL may or may not be set on 
 3065  *      return.  The caller should clear B_INVAL prior to initiating a
 3066  *      READ.
 3067  *
 3068  *      For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
 3069  *      an existing buffer.
 3070  *
 3071  *      For a VMIO buffer, B_CACHE is modified according to the backing VM.
 3072  *      If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
 3073  *      and then cleared based on the backing VM.  If the previous buffer is
 3074  *      non-0-sized but invalid, B_CACHE will be cleared.
 3075  *
 3076  *      If getblk() must create a new buffer, the new buffer is returned with
 3077  *      both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
 3078  *      case it is returned with B_INVAL clear and B_CACHE set based on the
 3079  *      backing VM.
 3080  *
 3081  *      getblk() also forces a bwrite() for any B_DELWRI buffer whos
 3082  *      B_CACHE bit is clear.
 3083  *      
 3084  *      What this means, basically, is that the caller should use B_CACHE to
 3085  *      determine whether the buffer is fully valid or not and should clear
 3086  *      B_INVAL prior to issuing a read.  If the caller intends to validate
 3087  *      the buffer by loading its data area with something, the caller needs
 3088  *      to clear B_INVAL.  If the caller does this without issuing an I/O, 
 3089  *      the caller should set B_CACHE ( as an optimization ), else the caller
 3090  *      should issue the I/O and biodone() will set B_CACHE if the I/O was
 3091  *      a write attempt or if it was a successfull read.  If the caller 
 3092  *      intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
 3093  *      prior to issuing the READ.  biodone() will *not* clear B_INVAL.
 3094  */
 3095 struct buf *
 3096 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
 3097     int flags)
 3098 {
 3099         struct buf *bp;
 3100         struct bufobj *bo;
 3101         int bsize, error, maxsize, vmio;
 3102         off_t offset;
 3103 
 3104         CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
 3105         KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
 3106             ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
 3107         ASSERT_VOP_LOCKED(vp, "getblk");
 3108         if (size > MAXBCACHEBUF)
 3109                 panic("getblk: size(%d) > MAXBCACHEBUF(%d)\n", size,
 3110                     MAXBCACHEBUF);
 3111         if (!unmapped_buf_allowed)
 3112                 flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
 3113 
 3114         bo = &vp->v_bufobj;
 3115 loop:
 3116         BO_RLOCK(bo);
 3117         bp = gbincore(bo, blkno);
 3118         if (bp != NULL) {
 3119                 int lockflags;
 3120                 /*
 3121                  * Buffer is in-core.  If the buffer is not busy nor managed,
 3122                  * it must be on a queue.
 3123                  */
 3124                 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
 3125 
 3126                 if (flags & GB_LOCK_NOWAIT)
 3127                         lockflags |= LK_NOWAIT;
 3128 
 3129                 error = BUF_TIMELOCK(bp, lockflags,
 3130                     BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
 3131 
 3132                 /*
 3133                  * If we slept and got the lock we have to restart in case
 3134                  * the buffer changed identities.
 3135                  */
 3136                 if (error == ENOLCK)
 3137                         goto loop;
 3138                 /* We timed out or were interrupted. */
 3139                 else if (error)
 3140                         return (NULL);
 3141                 /* If recursed, assume caller knows the rules. */
 3142                 else if (BUF_LOCKRECURSED(bp))
 3143                         goto end;
 3144 
 3145                 /*
 3146                  * The buffer is locked.  B_CACHE is cleared if the buffer is 
 3147                  * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
 3148                  * and for a VMIO buffer B_CACHE is adjusted according to the
 3149                  * backing VM cache.
 3150                  */
 3151                 if (bp->b_flags & B_INVAL)
 3152                         bp->b_flags &= ~B_CACHE;
 3153                 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
 3154                         bp->b_flags |= B_CACHE;
 3155                 if (bp->b_flags & B_MANAGED)
 3156                         MPASS(bp->b_qindex == QUEUE_NONE);
 3157                 else
 3158                         bremfree(bp);
 3159 
 3160                 /*
 3161                  * check for size inconsistencies for non-VMIO case.
 3162                  */
 3163                 if (bp->b_bcount != size) {
 3164                         if ((bp->b_flags & B_VMIO) == 0 ||
 3165                             (size > bp->b_kvasize)) {
 3166                                 if (bp->b_flags & B_DELWRI) {
 3167                                         /*
 3168                                          * If buffer is pinned and caller does
 3169                                          * not want sleep  waiting for it to be
 3170                                          * unpinned, bail out
 3171                                          * */
 3172                                         if (bp->b_pin_count > 0) {
 3173                                                 if (flags & GB_LOCK_NOWAIT) {
 3174                                                         bqrelse(bp);
 3175                                                         return (NULL);
 3176                                                 } else {
 3177                                                         bunpin_wait(bp);
 3178                                                 }
 3179                                         }
 3180                                         bp->b_flags |= B_NOCACHE;
 3181                                         bwrite(bp);
 3182                                 } else {
 3183                                         if (LIST_EMPTY(&bp->b_dep)) {
 3184                                                 bp->b_flags |= B_RELBUF;
 3185                                                 brelse(bp);
 3186                                         } else {
 3187                                                 bp->b_flags |= B_NOCACHE;
 3188                                                 bwrite(bp);
 3189                                         }
 3190                                 }
 3191                                 goto loop;
 3192                         }
 3193                 }
 3194 
 3195                 /*
 3196                  * Handle the case of unmapped buffer which should
 3197                  * become mapped, or the buffer for which KVA
 3198                  * reservation is requested.
 3199                  */
 3200                 bp_unmapped_get_kva(bp, blkno, size, flags);
 3201 
 3202                 /*
 3203                  * If the size is inconsistant in the VMIO case, we can resize
 3204                  * the buffer.  This might lead to B_CACHE getting set or
 3205                  * cleared.  If the size has not changed, B_CACHE remains
 3206                  * unchanged from its previous state.
 3207                  */
 3208                 if (bp->b_bcount != size)
 3209                         allocbuf(bp, size);
 3210 
 3211                 KASSERT(bp->b_offset != NOOFFSET, 
 3212                     ("getblk: no buffer offset"));
 3213 
 3214                 /*
 3215                  * A buffer with B_DELWRI set and B_CACHE clear must
 3216                  * be committed before we can return the buffer in
 3217                  * order to prevent the caller from issuing a read
 3218                  * ( due to B_CACHE not being set ) and overwriting
 3219                  * it.
 3220                  *
 3221                  * Most callers, including NFS and FFS, need this to
 3222                  * operate properly either because they assume they
 3223                  * can issue a read if B_CACHE is not set, or because
 3224                  * ( for example ) an uncached B_DELWRI might loop due 
 3225                  * to softupdates re-dirtying the buffer.  In the latter
 3226                  * case, B_CACHE is set after the first write completes,
 3227                  * preventing further loops.
 3228                  * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
 3229                  * above while extending the buffer, we cannot allow the
 3230                  * buffer to remain with B_CACHE set after the write
 3231                  * completes or it will represent a corrupt state.  To
 3232                  * deal with this we set B_NOCACHE to scrap the buffer
 3233                  * after the write.
 3234                  *
 3235                  * We might be able to do something fancy, like setting
 3236                  * B_CACHE in bwrite() except if B_DELWRI is already set,
 3237                  * so the below call doesn't set B_CACHE, but that gets real
 3238                  * confusing.  This is much easier.
 3239                  */
 3240 
 3241                 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
 3242                         bp->b_flags |= B_NOCACHE;
 3243                         bwrite(bp);
 3244                         goto loop;
 3245                 }
 3246                 bp->b_flags &= ~B_DONE;
 3247         } else {
 3248                 /*
 3249                  * Buffer is not in-core, create new buffer.  The buffer
 3250                  * returned by getnewbuf() is locked.  Note that the returned
 3251                  * buffer is also considered valid (not marked B_INVAL).
 3252                  */
 3253                 BO_RUNLOCK(bo);
 3254                 /*
 3255                  * If the user does not want us to create the buffer, bail out
 3256                  * here.
 3257                  */
 3258                 if (flags & GB_NOCREAT)
 3259                         return NULL;
 3260                 if (numfreebuffers == 0 && TD_IS_IDLETHREAD(curthread))
 3261                         return NULL;
 3262 
 3263                 bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
 3264                 offset = blkno * bsize;
 3265                 vmio = vp->v_object != NULL;
 3266                 if (vmio) {
 3267                         maxsize = size + (offset & PAGE_MASK);
 3268                 } else {
 3269                         maxsize = size;
 3270                         /* Do not allow non-VMIO notmapped buffers. */
 3271                         flags &= ~GB_UNMAPPED;
 3272                 }
 3273                 maxsize = imax(maxsize, bsize);
 3274 
 3275                 bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags);
 3276                 if (bp == NULL) {
 3277                         if (slpflag || slptimeo)
 3278                                 return NULL;
 3279                         goto loop;
 3280                 }
 3281 
 3282                 /*
 3283                  * This code is used to make sure that a buffer is not
 3284                  * created while the getnewbuf routine is blocked.
 3285                  * This can be a problem whether the vnode is locked or not.
 3286                  * If the buffer is created out from under us, we have to
 3287                  * throw away the one we just created.
 3288                  *
 3289                  * Note: this must occur before we associate the buffer
 3290                  * with the vp especially considering limitations in
 3291                  * the splay tree implementation when dealing with duplicate
 3292                  * lblkno's.
 3293                  */
 3294                 BO_LOCK(bo);
 3295                 if (gbincore(bo, blkno)) {
 3296                         BO_UNLOCK(bo);
 3297                         bp->b_flags |= B_INVAL;
 3298                         brelse(bp);
 3299                         goto loop;
 3300                 }
 3301 
 3302                 /*
 3303                  * Insert the buffer into the hash, so that it can
 3304                  * be found by incore.
 3305                  */
 3306                 bp->b_blkno = bp->b_lblkno = blkno;
 3307                 bp->b_offset = offset;
 3308                 bgetvp(vp, bp);
 3309                 BO_UNLOCK(bo);
 3310 
 3311                 /*
 3312                  * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
 3313                  * buffer size starts out as 0, B_CACHE will be set by
 3314                  * allocbuf() for the VMIO case prior to it testing the
 3315                  * backing store for validity.
 3316                  */
 3317 
 3318                 if (vmio) {
 3319                         bp->b_flags |= B_VMIO;
 3320                         KASSERT(vp->v_object == bp->b_bufobj->bo_object,
 3321                             ("ARGH! different b_bufobj->bo_object %p %p %p\n",
 3322                             bp, vp->v_object, bp->b_bufobj->bo_object));
 3323                 } else {
 3324                         bp->b_flags &= ~B_VMIO;
 3325                         KASSERT(bp->b_bufobj->bo_object == NULL,
 3326                             ("ARGH! has b_bufobj->bo_object %p %p\n",
 3327                             bp, bp->b_bufobj->bo_object));
 3328                         BUF_CHECK_MAPPED(bp);
 3329                 }
 3330 
 3331                 allocbuf(bp, size);
 3332                 bp->b_flags &= ~B_DONE;
 3333         }
 3334         CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
 3335         BUF_ASSERT_HELD(bp);
 3336 end:
 3337         KASSERT(bp->b_bufobj == bo,
 3338             ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
 3339         return (bp);
 3340 }
 3341 
 3342 /*
 3343  * Get an empty, disassociated buffer of given size.  The buffer is initially
 3344  * set to B_INVAL.
 3345  */
 3346 struct buf *
 3347 geteblk(int size, int flags)
 3348 {
 3349         struct buf *bp;
 3350         int maxsize;
 3351 
 3352         maxsize = (size + BKVAMASK) & ~BKVAMASK;
 3353         while ((bp = getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) {
 3354                 if ((flags & GB_NOWAIT_BD) &&
 3355                     (curthread->td_pflags & TDP_BUFNEED) != 0)
 3356                         return (NULL);
 3357         }
 3358         allocbuf(bp, size);
 3359         bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
 3360         BUF_ASSERT_HELD(bp);
 3361         return (bp);
 3362 }
 3363 
 3364 
 3365 /*
 3366  * This code constitutes the buffer memory from either anonymous system
 3367  * memory (in the case of non-VMIO operations) or from an associated
 3368  * VM object (in the case of VMIO operations).  This code is able to
 3369  * resize a buffer up or down.
 3370  *
 3371  * Note that this code is tricky, and has many complications to resolve
 3372  * deadlock or inconsistant data situations.  Tread lightly!!! 
 3373  * There are B_CACHE and B_DELWRI interactions that must be dealt with by 
 3374  * the caller.  Calling this code willy nilly can result in the loss of data.
 3375  *
 3376  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
 3377  * B_CACHE for the non-VMIO case.
 3378  */
 3379 
 3380 int
 3381 allocbuf(struct buf *bp, int size)
 3382 {
 3383         int newbsize, mbsize;
 3384         int i;
 3385 
 3386         BUF_ASSERT_HELD(bp);
 3387 
 3388         if (bp->b_kvasize < size)
 3389                 panic("allocbuf: buffer too small");
 3390 
 3391         if ((bp->b_flags & B_VMIO) == 0) {
 3392                 caddr_t origbuf;
 3393                 int origbufsize;
 3394                 /*
 3395                  * Just get anonymous memory from the kernel.  Don't
 3396                  * mess with B_CACHE.
 3397                  */
 3398                 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 3399                 if (bp->b_flags & B_MALLOC)
 3400                         newbsize = mbsize;
 3401                 else
 3402                         newbsize = round_page(size);
 3403 
 3404                 if (newbsize < bp->b_bufsize) {
 3405                         /*
 3406                          * malloced buffers are not shrunk
 3407                          */
 3408                         if (bp->b_flags & B_MALLOC) {
 3409                                 if (newbsize) {
 3410                                         bp->b_bcount = size;
 3411                                 } else {
 3412                                         free(bp->b_data, M_BIOBUF);
 3413                                         if (bp->b_bufsize) {
 3414                                                 atomic_subtract_long(
 3415                                                     &bufmallocspace,
 3416                                                     bp->b_bufsize);
 3417                                                 bufspacewakeup();
 3418                                                 bp->b_bufsize = 0;
 3419                                         }
 3420                                         bp->b_saveaddr = bp->b_kvabase;
 3421                                         bp->b_data = bp->b_saveaddr;
 3422                                         bp->b_bcount = 0;
 3423                                         bp->b_flags &= ~B_MALLOC;
 3424                                 }
 3425                                 return 1;
 3426                         }               
 3427                         vm_hold_free_pages(bp, newbsize);
 3428                 } else if (newbsize > bp->b_bufsize) {
 3429                         /*
 3430                          * We only use malloced memory on the first allocation.
 3431                          * and revert to page-allocated memory when the buffer
 3432                          * grows.
 3433                          */
 3434                         /*
 3435                          * There is a potential smp race here that could lead
 3436                          * to bufmallocspace slightly passing the max.  It
 3437                          * is probably extremely rare and not worth worrying
 3438                          * over.
 3439                          */
 3440                         if ( (bufmallocspace < maxbufmallocspace) &&
 3441                                 (bp->b_bufsize == 0) &&
 3442                                 (mbsize <= PAGE_SIZE/2)) {
 3443 
 3444                                 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
 3445                                 bp->b_bufsize = mbsize;
 3446                                 bp->b_bcount = size;
 3447                                 bp->b_flags |= B_MALLOC;
 3448                                 atomic_add_long(&bufmallocspace, mbsize);
 3449                                 return 1;
 3450                         }
 3451                         origbuf = NULL;
 3452                         origbufsize = 0;
 3453                         /*
 3454                          * If the buffer is growing on its other-than-first allocation,
 3455                          * then we revert to the page-allocation scheme.
 3456                          */
 3457                         if (bp->b_flags & B_MALLOC) {
 3458                                 origbuf = bp->b_data;
 3459                                 origbufsize = bp->b_bufsize;
 3460                                 bp->b_data = bp->b_kvabase;
 3461                                 if (bp->b_bufsize) {
 3462                                         atomic_subtract_long(&bufmallocspace,
 3463                                             bp->b_bufsize);
 3464                                         bufspacewakeup();
 3465                                         bp->b_bufsize = 0;
 3466                                 }
 3467                                 bp->b_flags &= ~B_MALLOC;
 3468                                 newbsize = round_page(newbsize);
 3469                         }
 3470                         vm_hold_load_pages(
 3471                             bp,
 3472                             (vm_offset_t) bp->b_data + bp->b_bufsize,
 3473                             (vm_offset_t) bp->b_data + newbsize);
 3474                         if (origbuf) {
 3475                                 bcopy(origbuf, bp->b_data, origbufsize);
 3476                                 free(origbuf, M_BIOBUF);
 3477                         }
 3478                 }
 3479         } else {
 3480                 int desiredpages;
 3481 
 3482                 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
 3483                 desiredpages = (size == 0) ? 0 :
 3484                         num_pages((bp->b_offset & PAGE_MASK) + newbsize);
 3485 
 3486                 if (bp->b_flags & B_MALLOC)
 3487                         panic("allocbuf: VMIO buffer can't be malloced");
 3488                 /*
 3489                  * Set B_CACHE initially if buffer is 0 length or will become
 3490                  * 0-length.
 3491                  */
 3492                 if (size == 0 || bp->b_bufsize == 0)
 3493                         bp->b_flags |= B_CACHE;
 3494 
 3495                 if (newbsize < bp->b_bufsize) {
 3496                         /*
 3497                          * DEV_BSIZE aligned new buffer size is less then the
 3498                          * DEV_BSIZE aligned existing buffer size.  Figure out
 3499                          * if we have to remove any pages.
 3500                          */
 3501                         if (desiredpages < bp->b_npages) {
 3502                                 vm_page_t m;
 3503 
 3504                                 if ((bp->b_flags & B_UNMAPPED) == 0) {
 3505                                         BUF_CHECK_MAPPED(bp);
 3506                                         pmap_qremove((vm_offset_t)trunc_page(
 3507                                             (vm_offset_t)bp->b_data) +
 3508                                             (desiredpages << PAGE_SHIFT),
 3509                                             (bp->b_npages - desiredpages));
 3510                                 } else
 3511                                         BUF_CHECK_UNMAPPED(bp);
 3512                                 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
 3513                                 for (i = desiredpages; i < bp->b_npages; i++) {
 3514                                         /*
 3515                                          * the page is not freed here -- it
 3516                                          * is the responsibility of 
 3517                                          * vnode_pager_setsize
 3518                                          */
 3519                                         m = bp->b_pages[i];
 3520                                         KASSERT(m != bogus_page,
 3521                                             ("allocbuf: bogus page found"));
 3522                                         while (vm_page_sleep_if_busy(m,
 3523                                             "biodep"))
 3524                                                 continue;
 3525 
 3526                                         bp->b_pages[i] = NULL;
 3527                                         vm_page_lock(m);
 3528                                         vm_page_unwire(m, 0);
 3529                                         vm_page_unlock(m);
 3530                                 }
 3531                                 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
 3532                                 bp->b_npages = desiredpages;
 3533                         }
 3534                 } else if (size > bp->b_bcount) {
 3535                         /*
 3536                          * We are growing the buffer, possibly in a 
 3537                          * byte-granular fashion.
 3538                          */
 3539                         vm_object_t obj;
 3540                         vm_offset_t toff;
 3541                         vm_offset_t tinc;
 3542 
 3543                         /*
 3544                          * Step 1, bring in the VM pages from the object, 
 3545                          * allocating them if necessary.  We must clear
 3546                          * B_CACHE if these pages are not valid for the 
 3547                          * range covered by the buffer.
 3548                          */
 3549 
 3550                         obj = bp->b_bufobj->bo_object;
 3551 
 3552                         VM_OBJECT_WLOCK(obj);
 3553                         while (bp->b_npages < desiredpages) {
 3554                                 vm_page_t m;
 3555 
 3556                                 /*
 3557                                  * We must allocate system pages since blocking
 3558                                  * here could interfere with paging I/O, no
 3559                                  * matter which process we are.
 3560                                  *
 3561                                  * Only exclusive busy can be tested here.
 3562                                  * Blocking on shared busy might lead to
 3563                                  * deadlocks once allocbuf() is called after
 3564                                  * pages are vfs_busy_pages().
 3565                                  */
 3566                                 m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) +
 3567                                     bp->b_npages, VM_ALLOC_NOBUSY |
 3568                                     VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
 3569                                     VM_ALLOC_IGN_SBUSY |
 3570                                     VM_ALLOC_COUNT(desiredpages - bp->b_npages));
 3571                                 if (m->valid == 0)
 3572                                         bp->b_flags &= ~B_CACHE;
 3573                                 bp->b_pages[bp->b_npages] = m;
 3574                                 ++bp->b_npages;
 3575                         }
 3576 
 3577                         /*
 3578                          * Step 2.  We've loaded the pages into the buffer,
 3579                          * we have to figure out if we can still have B_CACHE
 3580                          * set.  Note that B_CACHE is set according to the
 3581                          * byte-granular range ( bcount and size ), new the
 3582                          * aligned range ( newbsize ).
 3583                          *
 3584                          * The VM test is against m->valid, which is DEV_BSIZE
 3585                          * aligned.  Needless to say, the validity of the data
 3586                          * needs to also be DEV_BSIZE aligned.  Note that this
 3587                          * fails with NFS if the server or some other client
 3588                          * extends the file's EOF.  If our buffer is resized, 
 3589                          * B_CACHE may remain set! XXX
 3590                          */
 3591 
 3592                         toff = bp->b_bcount;
 3593                         tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
 3594 
 3595                         while ((bp->b_flags & B_CACHE) && toff < size) {
 3596                                 vm_pindex_t pi;
 3597 
 3598                                 if (tinc > (size - toff))
 3599                                         tinc = size - toff;
 3600 
 3601                                 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 
 3602                                     PAGE_SHIFT;
 3603 
 3604                                 vfs_buf_test_cache(
 3605                                     bp, 
 3606                                     bp->b_offset,
 3607                                     toff, 
 3608                                     tinc, 
 3609                                     bp->b_pages[pi]
 3610                                 );
 3611                                 toff += tinc;
 3612                                 tinc = PAGE_SIZE;
 3613                         }
 3614                         VM_OBJECT_WUNLOCK(obj);
 3615 
 3616                         /*
 3617                          * Step 3, fixup the KVM pmap.
 3618                          */
 3619                         if ((bp->b_flags & B_UNMAPPED) == 0)
 3620                                 bpmap_qenter(bp);
 3621                         else
 3622                                 BUF_CHECK_UNMAPPED(bp);
 3623                 }
 3624         }
 3625         if (newbsize < bp->b_bufsize)
 3626                 bufspacewakeup();
 3627         bp->b_bufsize = newbsize;       /* actual buffer allocation     */
 3628         bp->b_bcount = size;            /* requested buffer size        */
 3629         return 1;
 3630 }
 3631 
 3632 extern int inflight_transient_maps;
 3633 
 3634 void
 3635 biodone(struct bio *bp)
 3636 {
 3637         struct mtx *mtxp;
 3638         void (*done)(struct bio *);
 3639         vm_offset_t start, end;
 3640 
 3641         if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
 3642                 bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
 3643                 bp->bio_flags |= BIO_UNMAPPED;
 3644                 start = trunc_page((vm_offset_t)bp->bio_data);
 3645                 end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
 3646                 pmap_qremove(start, OFF_TO_IDX(end - start));
 3647                 vmem_free(transient_arena, start, end - start);
 3648                 atomic_add_int(&inflight_transient_maps, -1);
 3649         }
 3650         done = bp->bio_done;
 3651         if (done == NULL) {
 3652                 mtxp = mtx_pool_find(mtxpool_sleep, bp);
 3653                 mtx_lock(mtxp);
 3654                 bp->bio_flags |= BIO_DONE;
 3655                 wakeup(bp);
 3656                 mtx_unlock(mtxp);
 3657         } else {
 3658                 bp->bio_flags |= BIO_DONE;
 3659                 done(bp);
 3660         }
 3661 }
 3662 
 3663 /*
 3664  * Wait for a BIO to finish.
 3665  */
 3666 int
 3667 biowait(struct bio *bp, const char *wchan)
 3668 {
 3669         struct mtx *mtxp;
 3670 
 3671         mtxp = mtx_pool_find(mtxpool_sleep, bp);
 3672         mtx_lock(mtxp);
 3673         while ((bp->bio_flags & BIO_DONE) == 0)
 3674                 msleep(bp, mtxp, PRIBIO, wchan, 0);
 3675         mtx_unlock(mtxp);
 3676         if (bp->bio_error != 0)
 3677                 return (bp->bio_error);
 3678         if (!(bp->bio_flags & BIO_ERROR))
 3679                 return (0);
 3680         return (EIO);
 3681 }
 3682 
 3683 void
 3684 biofinish(struct bio *bp, struct devstat *stat, int error)
 3685 {
 3686         
 3687         if (error) {
 3688                 bp->bio_error = error;
 3689                 bp->bio_flags |= BIO_ERROR;
 3690         }
 3691         if (stat != NULL)
 3692                 devstat_end_transaction_bio(stat, bp);
 3693         biodone(bp);
 3694 }
 3695 
 3696 /*
 3697  *      bufwait:
 3698  *
 3699  *      Wait for buffer I/O completion, returning error status.  The buffer
 3700  *      is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
 3701  *      error and cleared.
 3702  */
 3703 int
 3704 bufwait(struct buf *bp)
 3705 {
 3706         if (bp->b_iocmd == BIO_READ)
 3707                 bwait(bp, PRIBIO, "biord");
 3708         else
 3709                 bwait(bp, PRIBIO, "biowr");
 3710         if (bp->b_flags & B_EINTR) {
 3711                 bp->b_flags &= ~B_EINTR;
 3712                 return (EINTR);
 3713         }
 3714         if (bp->b_ioflags & BIO_ERROR) {
 3715                 return (bp->b_error ? bp->b_error : EIO);
 3716         } else {
 3717                 return (0);
 3718         }
 3719 }
 3720 
 3721  /*
 3722   * Call back function from struct bio back up to struct buf.
 3723   */
 3724 static void
 3725 bufdonebio(struct bio *bip)
 3726 {
 3727         struct buf *bp;
 3728 
 3729         bp = bip->bio_caller2;
 3730         bp->b_resid = bp->b_bcount - bip->bio_completed;
 3731         bp->b_resid = bip->bio_resid;   /* XXX: remove */
 3732         bp->b_ioflags = bip->bio_flags;
 3733         bp->b_error = bip->bio_error;
 3734         if (bp->b_error)
 3735                 bp->b_ioflags |= BIO_ERROR;
 3736         bufdone(bp);
 3737         g_destroy_bio(bip);
 3738 }
 3739 
 3740 void
 3741 dev_strategy(struct cdev *dev, struct buf *bp)
 3742 {
 3743         struct cdevsw *csw;
 3744         int ref;
 3745 
 3746         KASSERT(dev->si_refcount > 0,
 3747             ("dev_strategy on un-referenced struct cdev *(%s) %p",
 3748             devtoname(dev), dev));
 3749 
 3750         csw = dev_refthread(dev, &ref);
 3751         dev_strategy_csw(dev, csw, bp);
 3752         dev_relthread(dev, ref);
 3753 }
 3754 
 3755 void
 3756 dev_strategy_csw(struct cdev *dev, struct cdevsw *csw, struct buf *bp)
 3757 {
 3758         struct bio *bip;
 3759 
 3760         KASSERT(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE,
 3761             ("b_iocmd botch"));
 3762         KASSERT(((dev->si_flags & SI_ETERNAL) != 0 && csw != NULL) ||
 3763             dev->si_threadcount > 0,
 3764             ("dev_strategy_csw threadcount cdev *(%s) %p", devtoname(dev),
 3765             dev));
 3766         if (csw == NULL) {
 3767                 bp->b_error = ENXIO;
 3768                 bp->b_ioflags = BIO_ERROR;
 3769                 bufdone(bp);
 3770                 return;
 3771         }
 3772         for (;;) {
 3773                 bip = g_new_bio();
 3774                 if (bip != NULL)
 3775                         break;
 3776                 /* Try again later */
 3777                 tsleep(&bp, PRIBIO, "dev_strat", hz/10);
 3778         }
 3779         bip->bio_cmd = bp->b_iocmd;
 3780         bip->bio_offset = bp->b_iooffset;
 3781         bip->bio_length = bp->b_bcount;
 3782         bip->bio_bcount = bp->b_bcount; /* XXX: remove */
 3783         bdata2bio(bp, bip);
 3784         bip->bio_done = bufdonebio;
 3785         bip->bio_caller2 = bp;
 3786         bip->bio_dev = dev;
 3787         (*csw->d_strategy)(bip);
 3788 }
 3789 
 3790 /*
 3791  *      bufdone:
 3792  *
 3793  *      Finish I/O on a buffer, optionally calling a completion function.
 3794  *      This is usually called from an interrupt so process blocking is
 3795  *      not allowed.
 3796  *
 3797  *      biodone is also responsible for setting B_CACHE in a B_VMIO bp.
 3798  *      In a non-VMIO bp, B_CACHE will be set on the next getblk() 
 3799  *      assuming B_INVAL is clear.
 3800  *
 3801  *      For the VMIO case, we set B_CACHE if the op was a read and no
 3802  *      read error occured, or if the op was a write.  B_CACHE is never
 3803  *      set if the buffer is invalid or otherwise uncacheable.
 3804  *
 3805  *      biodone does not mess with B_INVAL, allowing the I/O routine or the
 3806  *      initiator to leave B_INVAL set to brelse the buffer out of existance
 3807  *      in the biodone routine.
 3808  */
 3809 void
 3810 bufdone(struct buf *bp)
 3811 {
 3812         struct bufobj *dropobj;
 3813         void    (*biodone)(struct buf *);
 3814 
 3815         CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 3816         dropobj = NULL;
 3817 
 3818         KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
 3819         BUF_ASSERT_HELD(bp);
 3820 
 3821         runningbufwakeup(bp);
 3822         if (bp->b_iocmd == BIO_WRITE)
 3823                 dropobj = bp->b_bufobj;
 3824         /* call optional completion function if requested */
 3825         if (bp->b_iodone != NULL) {
 3826                 biodone = bp->b_iodone;
 3827                 bp->b_iodone = NULL;
 3828                 (*biodone) (bp);
 3829                 if (dropobj)
 3830                         bufobj_wdrop(dropobj);
 3831                 return;
 3832         }
 3833 
 3834         bufdone_finish(bp);
 3835 
 3836         if (dropobj)
 3837                 bufobj_wdrop(dropobj);
 3838 }
 3839 
 3840 void
 3841 bufdone_finish(struct buf *bp)
 3842 {
 3843         BUF_ASSERT_HELD(bp);
 3844 
 3845         if (!LIST_EMPTY(&bp->b_dep))
 3846                 buf_complete(bp);
 3847 
 3848         if (bp->b_flags & B_VMIO) {
 3849                 vm_ooffset_t foff;
 3850                 vm_page_t m;
 3851                 vm_object_t obj;
 3852                 struct vnode *vp;
 3853                 int bogus, i, iosize;
 3854 
 3855                 obj = bp->b_bufobj->bo_object;
 3856                 KASSERT(obj->paging_in_progress >= bp->b_npages,
 3857                     ("biodone_finish: paging in progress(%d) < b_npages(%d)",
 3858                     obj->paging_in_progress, bp->b_npages));
 3859 
 3860                 vp = bp->b_vp;
 3861                 KASSERT(vp->v_holdcnt > 0,
 3862                     ("biodone_finish: vnode %p has zero hold count", vp));
 3863                 KASSERT(vp->v_object != NULL,
 3864                     ("biodone_finish: vnode %p has no vm_object", vp));
 3865 
 3866                 foff = bp->b_offset;
 3867                 KASSERT(bp->b_offset != NOOFFSET,
 3868                     ("biodone_finish: bp %p has no buffer offset", bp));
 3869 
 3870                 /*
 3871                  * Set B_CACHE if the op was a normal read and no error
 3872                  * occured.  B_CACHE is set for writes in the b*write()
 3873                  * routines.
 3874                  */
 3875                 iosize = bp->b_bcount - bp->b_resid;
 3876                 if (bp->b_iocmd == BIO_READ &&
 3877                     !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
 3878                     !(bp->b_ioflags & BIO_ERROR)) {
 3879                         bp->b_flags |= B_CACHE;
 3880                 }
 3881                 bogus = 0;
 3882                 VM_OBJECT_WLOCK(obj);
 3883                 for (i = 0; i < bp->b_npages; i++) {
 3884                         int bogusflag = 0;
 3885                         int resid;
 3886 
 3887                         resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
 3888                         if (resid > iosize)
 3889                                 resid = iosize;
 3890 
 3891                         /*
 3892                          * cleanup bogus pages, restoring the originals
 3893                          */
 3894                         m = bp->b_pages[i];
 3895                         if (m == bogus_page) {
 3896                                 bogus = bogusflag = 1;
 3897                                 m = vm_page_lookup(obj, OFF_TO_IDX(foff));
 3898                                 if (m == NULL)
 3899                                         panic("biodone: page disappeared!");
 3900                                 bp->b_pages[i] = m;
 3901                         }
 3902                         KASSERT(OFF_TO_IDX(foff) == m->pindex,
 3903                             ("biodone_finish: foff(%jd)/pindex(%ju) mismatch",
 3904                             (intmax_t)foff, (uintmax_t)m->pindex));
 3905 
 3906                         /*
 3907                          * In the write case, the valid and clean bits are
 3908                          * already changed correctly ( see bdwrite() ), so we 
 3909                          * only need to do this here in the read case.
 3910                          */
 3911                         if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
 3912                                 KASSERT((m->dirty & vm_page_bits(foff &
 3913                                     PAGE_MASK, resid)) == 0, ("bufdone_finish:"
 3914                                     " page %p has unexpected dirty bits", m));
 3915                                 vfs_page_set_valid(bp, foff, m);
 3916                         }
 3917 
 3918                         vm_page_sunbusy(m);
 3919                         vm_object_pip_subtract(obj, 1);
 3920                         foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3921                         iosize -= resid;
 3922                 }
 3923                 vm_object_pip_wakeupn(obj, 0);
 3924                 VM_OBJECT_WUNLOCK(obj);
 3925                 if (bogus && (bp->b_flags & B_UNMAPPED) == 0) {
 3926                         BUF_CHECK_MAPPED(bp);
 3927                         pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 3928                             bp->b_pages, bp->b_npages);
 3929                 }
 3930         }
 3931 
 3932         /*
 3933          * For asynchronous completions, release the buffer now. The brelse
 3934          * will do a wakeup there if necessary - so no need to do a wakeup
 3935          * here in the async case. The sync case always needs to do a wakeup.
 3936          */
 3937 
 3938         if (bp->b_flags & B_ASYNC) {
 3939                 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
 3940                         brelse(bp);
 3941                 else
 3942                         bqrelse(bp);
 3943         } else
 3944                 bdone(bp);
 3945 }
 3946 
 3947 /*
 3948  * This routine is called in lieu of iodone in the case of
 3949  * incomplete I/O.  This keeps the busy status for pages
 3950  * consistant.
 3951  */
 3952 void
 3953 vfs_unbusy_pages(struct buf *bp)
 3954 {
 3955         int i;
 3956         vm_object_t obj;
 3957         vm_page_t m;
 3958 
 3959         runningbufwakeup(bp);
 3960         if (!(bp->b_flags & B_VMIO))
 3961                 return;
 3962 
 3963         obj = bp->b_bufobj->bo_object;
 3964         VM_OBJECT_WLOCK(obj);
 3965         for (i = 0; i < bp->b_npages; i++) {
 3966                 m = bp->b_pages[i];
 3967                 if (m == bogus_page) {
 3968                         m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
 3969                         if (!m)
 3970                                 panic("vfs_unbusy_pages: page missing\n");
 3971                         bp->b_pages[i] = m;
 3972                         if ((bp->b_flags & B_UNMAPPED) == 0) {
 3973                                 BUF_CHECK_MAPPED(bp);
 3974                                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 3975                                     bp->b_pages, bp->b_npages);
 3976                         } else
 3977                                 BUF_CHECK_UNMAPPED(bp);
 3978                 }
 3979                 vm_object_pip_subtract(obj, 1);
 3980                 vm_page_sunbusy(m);
 3981         }
 3982         vm_object_pip_wakeupn(obj, 0);
 3983         VM_OBJECT_WUNLOCK(obj);
 3984 }
 3985 
 3986 /*
 3987  * vfs_page_set_valid:
 3988  *
 3989  *      Set the valid bits in a page based on the supplied offset.   The
 3990  *      range is restricted to the buffer's size.
 3991  *
 3992  *      This routine is typically called after a read completes.
 3993  */
 3994 static void
 3995 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
 3996 {
 3997         vm_ooffset_t eoff;
 3998 
 3999         /*
 4000          * Compute the end offset, eoff, such that [off, eoff) does not span a
 4001          * page boundary and eoff is not greater than the end of the buffer.
 4002          * The end of the buffer, in this case, is our file EOF, not the
 4003          * allocation size of the buffer.
 4004          */
 4005         eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
 4006         if (eoff > bp->b_offset + bp->b_bcount)
 4007                 eoff = bp->b_offset + bp->b_bcount;
 4008 
 4009         /*
 4010          * Set valid range.  This is typically the entire buffer and thus the
 4011          * entire page.
 4012          */
 4013         if (eoff > off)
 4014                 vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
 4015 }
 4016 
 4017 /*
 4018  * vfs_page_set_validclean:
 4019  *
 4020  *      Set the valid bits and clear the dirty bits in a page based on the
 4021  *      supplied offset.   The range is restricted to the buffer's size.
 4022  */
 4023 static void
 4024 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
 4025 {
 4026         vm_ooffset_t soff, eoff;
 4027 
 4028         /*
 4029          * Start and end offsets in buffer.  eoff - soff may not cross a
 4030          * page boundry or cross the end of the buffer.  The end of the
 4031          * buffer, in this case, is our file EOF, not the allocation size
 4032          * of the buffer.
 4033          */
 4034         soff = off;
 4035         eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 4036         if (eoff > bp->b_offset + bp->b_bcount)
 4037                 eoff = bp->b_offset + bp->b_bcount;
 4038 
 4039         /*
 4040          * Set valid range.  This is typically the entire buffer and thus the
 4041          * entire page.
 4042          */
 4043         if (eoff > soff) {
 4044                 vm_page_set_validclean(
 4045                     m,
 4046                    (vm_offset_t) (soff & PAGE_MASK),
 4047                    (vm_offset_t) (eoff - soff)
 4048                 );
 4049         }
 4050 }
 4051 
 4052 /*
 4053  * Ensure that all buffer pages are not exclusive busied.  If any page is
 4054  * exclusive busy, drain it.
 4055  */
 4056 void
 4057 vfs_drain_busy_pages(struct buf *bp)
 4058 {
 4059         vm_page_t m;
 4060         int i, last_busied;
 4061 
 4062         VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
 4063         last_busied = 0;
 4064         for (i = 0; i < bp->b_npages; i++) {
 4065                 m = bp->b_pages[i];
 4066                 if (vm_page_xbusied(m)) {
 4067                         for (; last_busied < i; last_busied++)
 4068                                 vm_page_sbusy(bp->b_pages[last_busied]);
 4069                         while (vm_page_xbusied(m)) {
 4070                                 vm_page_lock(m);
 4071                                 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
 4072                                 vm_page_busy_sleep(m, "vbpage");
 4073                                 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
 4074                         }
 4075                 }
 4076         }
 4077         for (i = 0; i < last_busied; i++)
 4078                 vm_page_sunbusy(bp->b_pages[i]);
 4079 }
 4080 
 4081 /*
 4082  * This routine is called before a device strategy routine.
 4083  * It is used to tell the VM system that paging I/O is in
 4084  * progress, and treat the pages associated with the buffer
 4085  * almost as being exclusive busy.  Also the object paging_in_progress
 4086  * flag is handled to make sure that the object doesn't become
 4087  * inconsistant.
 4088  *
 4089  * Since I/O has not been initiated yet, certain buffer flags
 4090  * such as BIO_ERROR or B_INVAL may be in an inconsistant state
 4091  * and should be ignored.
 4092  */
 4093 void
 4094 vfs_busy_pages(struct buf *bp, int clear_modify)
 4095 {
 4096         int i, bogus;
 4097         vm_object_t obj;
 4098         vm_ooffset_t foff;
 4099         vm_page_t m;
 4100 
 4101         if (!(bp->b_flags & B_VMIO))
 4102                 return;
 4103 
 4104         obj = bp->b_bufobj->bo_object;
 4105         foff = bp->b_offset;
 4106         KASSERT(bp->b_offset != NOOFFSET,
 4107             ("vfs_busy_pages: no buffer offset"));
 4108         VM_OBJECT_WLOCK(obj);
 4109         vfs_drain_busy_pages(bp);
 4110         if (bp->b_bufsize != 0)
 4111                 vfs_setdirty_locked_object(bp);
 4112         bogus = 0;
 4113         for (i = 0; i < bp->b_npages; i++) {
 4114                 m = bp->b_pages[i];
 4115 
 4116                 if ((bp->b_flags & B_CLUSTER) == 0) {
 4117                         vm_object_pip_add(obj, 1);
 4118                         vm_page_sbusy(m);
 4119                 }
 4120                 /*
 4121                  * When readying a buffer for a read ( i.e
 4122                  * clear_modify == 0 ), it is important to do
 4123                  * bogus_page replacement for valid pages in 
 4124                  * partially instantiated buffers.  Partially 
 4125                  * instantiated buffers can, in turn, occur when
 4126                  * reconstituting a buffer from its VM backing store
 4127                  * base.  We only have to do this if B_CACHE is
 4128                  * clear ( which causes the I/O to occur in the
 4129                  * first place ).  The replacement prevents the read
 4130                  * I/O from overwriting potentially dirty VM-backed
 4131                  * pages.  XXX bogus page replacement is, uh, bogus.
 4132                  * It may not work properly with small-block devices.
 4133                  * We need to find a better way.
 4134                  */
 4135                 if (clear_modify) {
 4136                         pmap_remove_write(m);
 4137                         vfs_page_set_validclean(bp, foff, m);
 4138                 } else if (m->valid == VM_PAGE_BITS_ALL &&
 4139                     (bp->b_flags & B_CACHE) == 0) {
 4140                         bp->b_pages[i] = bogus_page;
 4141                         bogus++;
 4142                 }
 4143                 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 4144         }
 4145         VM_OBJECT_WUNLOCK(obj);
 4146         if (bogus && (bp->b_flags & B_UNMAPPED) == 0) {
 4147                 BUF_CHECK_MAPPED(bp);
 4148                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 4149                     bp->b_pages, bp->b_npages);
 4150         }
 4151 }
 4152 
 4153 /*
 4154  *      vfs_bio_set_valid:
 4155  *
 4156  *      Set the range within the buffer to valid.  The range is
 4157  *      relative to the beginning of the buffer, b_offset.  Note that
 4158  *      b_offset itself may be offset from the beginning of the first
 4159  *      page.
 4160  */
 4161 void   
 4162 vfs_bio_set_valid(struct buf *bp, int base, int size)
 4163 {
 4164         int i, n;
 4165         vm_page_t m;
 4166 
 4167         if (!(bp->b_flags & B_VMIO))
 4168                 return;
 4169 
 4170         /*
 4171          * Fixup base to be relative to beginning of first page.
 4172          * Set initial n to be the maximum number of bytes in the
 4173          * first page that can be validated.
 4174          */
 4175         base += (bp->b_offset & PAGE_MASK);
 4176         n = PAGE_SIZE - (base & PAGE_MASK);
 4177 
 4178         VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
 4179         for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
 4180                 m = bp->b_pages[i];
 4181                 if (n > size)
 4182                         n = size;
 4183                 vm_page_set_valid_range(m, base & PAGE_MASK, n);
 4184                 base += n;
 4185                 size -= n;
 4186                 n = PAGE_SIZE;
 4187         }
 4188         VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
 4189 }
 4190 
 4191 /*
 4192  *      vfs_bio_clrbuf:
 4193  *
 4194  *      If the specified buffer is a non-VMIO buffer, clear the entire
 4195  *      buffer.  If the specified buffer is a VMIO buffer, clear and
 4196  *      validate only the previously invalid portions of the buffer.
 4197  *      This routine essentially fakes an I/O, so we need to clear
 4198  *      BIO_ERROR and B_INVAL.
 4199  *
 4200  *      Note that while we only theoretically need to clear through b_bcount,
 4201  *      we go ahead and clear through b_bufsize.
 4202  */
 4203 void
 4204 vfs_bio_clrbuf(struct buf *bp) 
 4205 {
 4206         int i, j, mask, sa, ea, slide;
 4207 
 4208         if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
 4209                 clrbuf(bp);
 4210                 return;
 4211         }
 4212         bp->b_flags &= ~B_INVAL;
 4213         bp->b_ioflags &= ~BIO_ERROR;
 4214         VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
 4215         if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
 4216             (bp->b_offset & PAGE_MASK) == 0) {
 4217                 if (bp->b_pages[0] == bogus_page)
 4218                         goto unlock;
 4219                 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
 4220                 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
 4221                 if ((bp->b_pages[0]->valid & mask) == mask)
 4222                         goto unlock;
 4223                 if ((bp->b_pages[0]->valid & mask) == 0) {
 4224                         pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
 4225                         bp->b_pages[0]->valid |= mask;
 4226                         goto unlock;
 4227                 }
 4228         }
 4229         sa = bp->b_offset & PAGE_MASK;
 4230         slide = 0;
 4231         for (i = 0; i < bp->b_npages; i++, sa = 0) {
 4232                 slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
 4233                 ea = slide & PAGE_MASK;
 4234                 if (ea == 0)
 4235                         ea = PAGE_SIZE;
 4236                 if (bp->b_pages[i] == bogus_page)
 4237                         continue;
 4238                 j = sa / DEV_BSIZE;
 4239                 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
 4240                 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
 4241                 if ((bp->b_pages[i]->valid & mask) == mask)
 4242                         continue;
 4243                 if ((bp->b_pages[i]->valid & mask) == 0)
 4244                         pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
 4245                 else {
 4246                         for (; sa < ea; sa += DEV_BSIZE, j++) {
 4247                                 if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
 4248                                         pmap_zero_page_area(bp->b_pages[i],
 4249                                             sa, DEV_BSIZE);
 4250                                 }
 4251                         }
 4252                 }
 4253                 bp->b_pages[i]->valid |= mask;
 4254         }
 4255 unlock:
 4256         VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
 4257         bp->b_resid = 0;
 4258 }
 4259 
 4260 void
 4261 vfs_bio_bzero_buf(struct buf *bp, int base, int size)
 4262 {
 4263         vm_page_t m;
 4264         int i, n;
 4265 
 4266         if ((bp->b_flags & B_UNMAPPED) == 0) {
 4267                 BUF_CHECK_MAPPED(bp);
 4268                 bzero(bp->b_data + base, size);
 4269         } else {
 4270                 BUF_CHECK_UNMAPPED(bp);
 4271                 n = PAGE_SIZE - (base & PAGE_MASK);
 4272                 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
 4273                         m = bp->b_pages[i];
 4274                         if (n > size)
 4275                                 n = size;
 4276                         pmap_zero_page_area(m, base & PAGE_MASK, n);
 4277                         base += n;
 4278                         size -= n;
 4279                         n = PAGE_SIZE;
 4280                 }
 4281         }
 4282 }
 4283 
 4284 /*
 4285  * vm_hold_load_pages and vm_hold_free_pages get pages into
 4286  * a buffers address space.  The pages are anonymous and are
 4287  * not associated with a file object.
 4288  */
 4289 static void
 4290 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
 4291 {
 4292         vm_offset_t pg;
 4293         vm_page_t p;
 4294         int index;
 4295 
 4296         BUF_CHECK_MAPPED(bp);
 4297 
 4298         to = round_page(to);
 4299         from = round_page(from);
 4300         index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 4301 
 4302         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 4303 tryagain:
 4304                 /*
 4305                  * note: must allocate system pages since blocking here
 4306                  * could interfere with paging I/O, no matter which
 4307                  * process we are.
 4308                  */
 4309                 p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
 4310                     VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
 4311                 if (p == NULL) {
 4312                         VM_WAIT;
 4313                         goto tryagain;
 4314                 }
 4315                 pmap_qenter(pg, &p, 1);
 4316                 bp->b_pages[index] = p;
 4317         }
 4318         bp->b_npages = index;
 4319 }
 4320 
 4321 /* Return pages associated with this buf to the vm system */
 4322 static void
 4323 vm_hold_free_pages(struct buf *bp, int newbsize)
 4324 {
 4325         vm_offset_t from;
 4326         vm_page_t p;
 4327         int index, newnpages;
 4328 
 4329         BUF_CHECK_MAPPED(bp);
 4330 
 4331         from = round_page((vm_offset_t)bp->b_data + newbsize);
 4332         newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 4333         if (bp->b_npages > newnpages)
 4334                 pmap_qremove(from, bp->b_npages - newnpages);
 4335         for (index = newnpages; index < bp->b_npages; index++) {
 4336                 p = bp->b_pages[index];
 4337                 bp->b_pages[index] = NULL;
 4338                 if (vm_page_sbusied(p))
 4339                         printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
 4340                             (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
 4341                 p->wire_count--;
 4342                 vm_page_free(p);
 4343                 atomic_subtract_int(&cnt.v_wire_count, 1);
 4344         }
 4345         bp->b_npages = newnpages;
 4346 }
 4347 
 4348 /*
 4349  * Map an IO request into kernel virtual address space.
 4350  *
 4351  * All requests are (re)mapped into kernel VA space.
 4352  * Notice that we use b_bufsize for the size of the buffer
 4353  * to be mapped.  b_bcount might be modified by the driver.
 4354  *
 4355  * Note that even if the caller determines that the address space should
 4356  * be valid, a race or a smaller-file mapped into a larger space may
 4357  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
 4358  * check the return value.
 4359  */
 4360 int
 4361 vmapbuf(struct buf *bp, int mapbuf)
 4362 {
 4363         caddr_t kva;
 4364         vm_prot_t prot;
 4365         int pidx;
 4366 
 4367         if (bp->b_bufsize < 0)
 4368                 return (-1);
 4369         prot = VM_PROT_READ;
 4370         if (bp->b_iocmd == BIO_READ)
 4371                 prot |= VM_PROT_WRITE;  /* Less backwards than it looks */
 4372         if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
 4373             (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
 4374             btoc(MAXPHYS))) < 0)
 4375                 return (-1);
 4376         bp->b_npages = pidx;
 4377         if (mapbuf || !unmapped_buf_allowed) {
 4378                 pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
 4379                 kva = bp->b_saveaddr;
 4380                 bp->b_saveaddr = bp->b_data;
 4381                 bp->b_data = kva + (((vm_offset_t)bp->b_data) & PAGE_MASK);
 4382                 bp->b_flags &= ~B_UNMAPPED;
 4383         } else {
 4384                 bp->b_flags |= B_UNMAPPED;
 4385                 bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
 4386                 bp->b_saveaddr = bp->b_data;
 4387                 bp->b_data = unmapped_buf;
 4388         }
 4389         return(0);
 4390 }
 4391 
 4392 /*
 4393  * Free the io map PTEs associated with this IO operation.
 4394  * We also invalidate the TLB entries and restore the original b_addr.
 4395  */
 4396 void
 4397 vunmapbuf(struct buf *bp)
 4398 {
 4399         int npages;
 4400 
 4401         npages = bp->b_npages;
 4402         if (bp->b_flags & B_UNMAPPED)
 4403                 bp->b_flags &= ~B_UNMAPPED;
 4404         else
 4405                 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
 4406         vm_page_unhold_pages(bp->b_pages, npages);
 4407         
 4408         bp->b_data = bp->b_saveaddr;
 4409 }
 4410 
 4411 void
 4412 bdone(struct buf *bp)
 4413 {
 4414         struct mtx *mtxp;
 4415 
 4416         mtxp = mtx_pool_find(mtxpool_sleep, bp);
 4417         mtx_lock(mtxp);
 4418         bp->b_flags |= B_DONE;
 4419         wakeup(bp);
 4420         mtx_unlock(mtxp);
 4421 }
 4422 
 4423 void
 4424 bwait(struct buf *bp, u_char pri, const char *wchan)
 4425 {
 4426         struct mtx *mtxp;
 4427 
 4428         mtxp = mtx_pool_find(mtxpool_sleep, bp);
 4429         mtx_lock(mtxp);
 4430         while ((bp->b_flags & B_DONE) == 0)
 4431                 msleep(bp, mtxp, pri, wchan, 0);
 4432         mtx_unlock(mtxp);
 4433 }
 4434 
 4435 int
 4436 bufsync(struct bufobj *bo, int waitfor)
 4437 {
 4438 
 4439         return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread));
 4440 }
 4441 
 4442 void
 4443 bufstrategy(struct bufobj *bo, struct buf *bp)
 4444 {
 4445         int i = 0;
 4446         struct vnode *vp;
 4447 
 4448         vp = bp->b_vp;
 4449         KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
 4450         KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
 4451             ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
 4452         i = VOP_STRATEGY(vp, bp);
 4453         KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
 4454 }
 4455 
 4456 void
 4457 bufobj_wrefl(struct bufobj *bo)
 4458 {
 4459 
 4460         KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
 4461         ASSERT_BO_WLOCKED(bo);
 4462         bo->bo_numoutput++;
 4463 }
 4464 
 4465 void
 4466 bufobj_wref(struct bufobj *bo)
 4467 {
 4468 
 4469         KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
 4470         BO_LOCK(bo);
 4471         bo->bo_numoutput++;
 4472         BO_UNLOCK(bo);
 4473 }
 4474 
 4475 void
 4476 bufobj_wdrop(struct bufobj *bo)
 4477 {
 4478 
 4479         KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
 4480         BO_LOCK(bo);
 4481         KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
 4482         if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
 4483                 bo->bo_flag &= ~BO_WWAIT;
 4484                 wakeup(&bo->bo_numoutput);
 4485         }
 4486         BO_UNLOCK(bo);
 4487 }
 4488 
 4489 int
 4490 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
 4491 {
 4492         int error;
 4493 
 4494         KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
 4495         ASSERT_BO_WLOCKED(bo);
 4496         error = 0;
 4497         while (bo->bo_numoutput) {
 4498                 bo->bo_flag |= BO_WWAIT;
 4499                 error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
 4500                     slpflag | (PRIBIO + 1), "bo_wwait", timeo);
 4501                 if (error)
 4502                         break;
 4503         }
 4504         return (error);
 4505 }
 4506 
 4507 void
 4508 bpin(struct buf *bp)
 4509 {
 4510         struct mtx *mtxp;
 4511 
 4512         mtxp = mtx_pool_find(mtxpool_sleep, bp);
 4513         mtx_lock(mtxp);
 4514         bp->b_pin_count++;
 4515         mtx_unlock(mtxp);
 4516 }
 4517 
 4518 void
 4519 bunpin(struct buf *bp)
 4520 {
 4521         struct mtx *mtxp;
 4522 
 4523         mtxp = mtx_pool_find(mtxpool_sleep, bp);
 4524         mtx_lock(mtxp);
 4525         if (--bp->b_pin_count == 0)
 4526                 wakeup(bp);
 4527         mtx_unlock(mtxp);
 4528 }
 4529 
 4530 void
 4531 bunpin_wait(struct buf *bp)
 4532 {
 4533         struct mtx *mtxp;
 4534 
 4535         mtxp = mtx_pool_find(mtxpool_sleep, bp);
 4536         mtx_lock(mtxp);
 4537         while (bp->b_pin_count > 0)
 4538                 msleep(bp, mtxp, PRIBIO, "bwunpin", 0);
 4539         mtx_unlock(mtxp);
 4540 }
 4541 
 4542 /*
 4543  * Set bio_data or bio_ma for struct bio from the struct buf.
 4544  */
 4545 void
 4546 bdata2bio(struct buf *bp, struct bio *bip)
 4547 {
 4548 
 4549         if ((bp->b_flags & B_UNMAPPED) != 0) {
 4550                 KASSERT(unmapped_buf_allowed, ("unmapped"));
 4551                 bip->bio_ma = bp->b_pages;
 4552                 bip->bio_ma_n = bp->b_npages;
 4553                 bip->bio_data = unmapped_buf;
 4554                 bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
 4555                 bip->bio_flags |= BIO_UNMAPPED;
 4556                 KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
 4557                     PAGE_SIZE == bp->b_npages,
 4558                     ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
 4559                     (long long)bip->bio_length, bip->bio_ma_n));
 4560         } else {
 4561                 bip->bio_data = bp->b_data;
 4562                 bip->bio_ma = NULL;
 4563         }
 4564 }
 4565 
 4566 #include "opt_ddb.h"
 4567 #ifdef DDB
 4568 #include <ddb/ddb.h>
 4569 
 4570 /* DDB command to show buffer data */
 4571 DB_SHOW_COMMAND(buffer, db_show_buffer)
 4572 {
 4573         /* get args */
 4574         struct buf *bp = (struct buf *)addr;
 4575 
 4576         if (!have_addr) {
 4577                 db_printf("usage: show buffer <addr>\n");
 4578                 return;
 4579         }
 4580 
 4581         db_printf("buf at %p\n", bp);
 4582         db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n",
 4583             (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
 4584             PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
 4585         db_printf(
 4586             "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
 4587             "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, "
 4588             "b_dep = %p\n",
 4589             bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
 4590             bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
 4591             (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
 4592         if (bp->b_npages) {
 4593                 int i;
 4594                 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
 4595                 for (i = 0; i < bp->b_npages; i++) {
 4596                         vm_page_t m;
 4597                         m = bp->b_pages[i];
 4598                         db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
 4599                             (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
 4600                         if ((i + 1) < bp->b_npages)
 4601                                 db_printf(",");
 4602                 }
 4603                 db_printf("\n");
 4604         }
 4605         db_printf(" ");
 4606         BUF_LOCKPRINTINFO(bp);
 4607 }
 4608 
 4609 DB_SHOW_COMMAND(lockedbufs, lockedbufs)
 4610 {
 4611         struct buf *bp;
 4612         int i;
 4613 
 4614         for (i = 0; i < nbuf; i++) {
 4615                 bp = &buf[i];
 4616                 if (BUF_ISLOCKED(bp)) {
 4617                         db_show_buffer((uintptr_t)bp, 1, 0, NULL);
 4618                         db_printf("\n");
 4619                 }
 4620         }
 4621 }
 4622 
 4623 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
 4624 {
 4625         struct vnode *vp;
 4626         struct buf *bp;
 4627 
 4628         if (!have_addr) {
 4629                 db_printf("usage: show vnodebufs <addr>\n");
 4630                 return;
 4631         }
 4632         vp = (struct vnode *)addr;
 4633         db_printf("Clean buffers:\n");
 4634         TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
 4635                 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
 4636                 db_printf("\n");
 4637         }
 4638         db_printf("Dirty buffers:\n");
 4639         TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
 4640                 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
 4641                 db_printf("\n");
 4642         }
 4643 }
 4644 
 4645 DB_COMMAND(countfreebufs, db_coundfreebufs)
 4646 {
 4647         struct buf *bp;
 4648         int i, used = 0, nfree = 0;
 4649 
 4650         if (have_addr) {
 4651                 db_printf("usage: countfreebufs\n");
 4652                 return;
 4653         }
 4654 
 4655         for (i = 0; i < nbuf; i++) {
 4656                 bp = &buf[i];
 4657                 if ((bp->b_flags & B_INFREECNT) != 0)
 4658                         nfree++;
 4659                 else
 4660                         used++;
 4661         }
 4662 
 4663         db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
 4664             nfree + used);
 4665         db_printf("numfreebuffers is %d\n", numfreebuffers);
 4666 }
 4667 #endif /* DDB */

Cache object: 273e6fb75cd7871181b5fdd5f16f69af


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.