The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_bio.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2004 Poul-Henning Kamp
    5  * Copyright (c) 1994,1997 John S. Dyson
    6  * Copyright (c) 2013 The FreeBSD Foundation
    7  * All rights reserved.
    8  *
    9  * Portions of this software were developed by Konstantin Belousov
   10  * under sponsorship from the FreeBSD Foundation.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   31  * SUCH DAMAGE.
   32  */
   33 
   34 /*
   35  * this file contains a new buffer I/O scheme implementing a coherent
   36  * VM object and buffer cache scheme.  Pains have been taken to make
   37  * sure that the performance degradation associated with schemes such
   38  * as this is not realized.
   39  *
   40  * Author:  John S. Dyson
   41  * Significant help during the development and debugging phases
   42  * had been provided by David Greenman, also of the FreeBSD core team.
   43  *
   44  * see man buf(9) for more info.
   45  */
   46 
   47 #include <sys/cdefs.h>
   48 __FBSDID("$FreeBSD: head/sys/kern/vfs_bio.c 345077 2019-03-12 19:08:41Z mckusick $");
   49 
   50 #include <sys/param.h>
   51 #include <sys/systm.h>
   52 #include <sys/bio.h>
   53 #include <sys/bitset.h>
   54 #include <sys/conf.h>
   55 #include <sys/counter.h>
   56 #include <sys/buf.h>
   57 #include <sys/devicestat.h>
   58 #include <sys/eventhandler.h>
   59 #include <sys/fail.h>
   60 #include <sys/limits.h>
   61 #include <sys/lock.h>
   62 #include <sys/malloc.h>
   63 #include <sys/mount.h>
   64 #include <sys/mutex.h>
   65 #include <sys/kernel.h>
   66 #include <sys/kthread.h>
   67 #include <sys/proc.h>
   68 #include <sys/racct.h>
   69 #include <sys/resourcevar.h>
   70 #include <sys/rwlock.h>
   71 #include <sys/smp.h>
   72 #include <sys/sysctl.h>
   73 #include <sys/sysproto.h>
   74 #include <sys/vmem.h>
   75 #include <sys/vmmeter.h>
   76 #include <sys/vnode.h>
   77 #include <sys/watchdog.h>
   78 #include <geom/geom.h>
   79 #include <vm/vm.h>
   80 #include <vm/vm_param.h>
   81 #include <vm/vm_kern.h>
   82 #include <vm/vm_object.h>
   83 #include <vm/vm_page.h>
   84 #include <vm/vm_pageout.h>
   85 #include <vm/vm_pager.h>
   86 #include <vm/vm_extern.h>
   87 #include <vm/vm_map.h>
   88 #include <vm/swap_pager.h>
   89 
   90 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
   91 
   92 struct  bio_ops bioops;         /* I/O operation notification */
   93 
   94 struct  buf_ops buf_ops_bio = {
   95         .bop_name       =       "buf_ops_bio",
   96         .bop_write      =       bufwrite,
   97         .bop_strategy   =       bufstrategy,
   98         .bop_sync       =       bufsync,
   99         .bop_bdflush    =       bufbdflush,
  100 };
  101 
  102 struct bufqueue {
  103         struct mtx_padalign     bq_lock;
  104         TAILQ_HEAD(, buf)       bq_queue;
  105         uint8_t                 bq_index;
  106         uint16_t                bq_subqueue;
  107         int                     bq_len;
  108 } __aligned(CACHE_LINE_SIZE);
  109 
  110 #define BQ_LOCKPTR(bq)          (&(bq)->bq_lock)
  111 #define BQ_LOCK(bq)             mtx_lock(BQ_LOCKPTR((bq)))
  112 #define BQ_UNLOCK(bq)           mtx_unlock(BQ_LOCKPTR((bq)))
  113 #define BQ_ASSERT_LOCKED(bq)    mtx_assert(BQ_LOCKPTR((bq)), MA_OWNED)
  114 
  115 struct bufdomain {
  116         struct bufqueue bd_subq[MAXCPU + 1]; /* Per-cpu sub queues + global */
  117         struct bufqueue bd_dirtyq;
  118         struct bufqueue *bd_cleanq;
  119         struct mtx_padalign bd_run_lock;
  120         /* Constants */
  121         long            bd_maxbufspace;
  122         long            bd_hibufspace;
  123         long            bd_lobufspace;
  124         long            bd_bufspacethresh;
  125         int             bd_hifreebuffers;
  126         int             bd_lofreebuffers;
  127         int             bd_hidirtybuffers;
  128         int             bd_lodirtybuffers;
  129         int             bd_dirtybufthresh;
  130         int             bd_lim;
  131         /* atomics */
  132         int             bd_wanted;
  133         int __aligned(CACHE_LINE_SIZE)  bd_numdirtybuffers;
  134         int __aligned(CACHE_LINE_SIZE)  bd_running;
  135         long __aligned(CACHE_LINE_SIZE) bd_bufspace;
  136         int __aligned(CACHE_LINE_SIZE)  bd_freebuffers;
  137 } __aligned(CACHE_LINE_SIZE);
  138 
  139 #define BD_LOCKPTR(bd)          (&(bd)->bd_cleanq->bq_lock)
  140 #define BD_LOCK(bd)             mtx_lock(BD_LOCKPTR((bd)))
  141 #define BD_UNLOCK(bd)           mtx_unlock(BD_LOCKPTR((bd)))
  142 #define BD_ASSERT_LOCKED(bd)    mtx_assert(BD_LOCKPTR((bd)), MA_OWNED)
  143 #define BD_RUN_LOCKPTR(bd)      (&(bd)->bd_run_lock)
  144 #define BD_RUN_LOCK(bd)         mtx_lock(BD_RUN_LOCKPTR((bd)))
  145 #define BD_RUN_UNLOCK(bd)       mtx_unlock(BD_RUN_LOCKPTR((bd)))
  146 #define BD_DOMAIN(bd)           (bd - bdomain)
  147 
  148 static struct buf *buf;         /* buffer header pool */
  149 extern struct buf *swbuf;       /* Swap buffer header pool. */
  150 caddr_t unmapped_buf;
  151 
  152 /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
  153 struct proc *bufdaemonproc;
  154 
  155 static int inmem(struct vnode *vp, daddr_t blkno);
  156 static void vm_hold_free_pages(struct buf *bp, int newbsize);
  157 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
  158                 vm_offset_t to);
  159 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
  160 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
  161                 vm_page_t m);
  162 static void vfs_clean_pages_dirty_buf(struct buf *bp);
  163 static void vfs_setdirty_locked_object(struct buf *bp);
  164 static void vfs_vmio_invalidate(struct buf *bp);
  165 static void vfs_vmio_truncate(struct buf *bp, int npages);
  166 static void vfs_vmio_extend(struct buf *bp, int npages, int size);
  167 static int vfs_bio_clcheck(struct vnode *vp, int size,
  168                 daddr_t lblkno, daddr_t blkno);
  169 static void breada(struct vnode *, daddr_t *, int *, int, struct ucred *, int,
  170                 void (*)(struct buf *));
  171 static int buf_flush(struct vnode *vp, struct bufdomain *, int);
  172 static int flushbufqueues(struct vnode *, struct bufdomain *, int, int);
  173 static void buf_daemon(void);
  174 static __inline void bd_wakeup(void);
  175 static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
  176 static void bufkva_reclaim(vmem_t *, int);
  177 static void bufkva_free(struct buf *);
  178 static int buf_import(void *, void **, int, int, int);
  179 static void buf_release(void *, void **, int);
  180 static void maxbcachebuf_adjust(void);
  181 static inline struct bufdomain *bufdomain(struct buf *);
  182 static void bq_remove(struct bufqueue *bq, struct buf *bp);
  183 static void bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock);
  184 static int buf_recycle(struct bufdomain *, bool kva);
  185 static void bq_init(struct bufqueue *bq, int qindex, int cpu,
  186             const char *lockname);
  187 static void bd_init(struct bufdomain *bd);
  188 static int bd_flushall(struct bufdomain *bd);
  189 static int sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS);
  190 static int sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS);
  191 
  192 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
  193 int vmiodirenable = TRUE;
  194 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
  195     "Use the VM system for directory writes");
  196 long runningbufspace;
  197 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
  198     "Amount of presently outstanding async buffer io");
  199 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
  200     NULL, 0, sysctl_bufspace, "L", "Physical memory used for buffers");
  201 static counter_u64_t bufkvaspace;
  202 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace,
  203     "Kernel virtual memory used for buffers");
  204 static long maxbufspace;
  205 SYSCTL_PROC(_vfs, OID_AUTO, maxbufspace,
  206     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &maxbufspace,
  207     __offsetof(struct bufdomain, bd_maxbufspace), sysctl_bufdomain_long, "L",
  208     "Maximum allowed value of bufspace (including metadata)");
  209 static long bufmallocspace;
  210 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
  211     "Amount of malloced memory for buffers");
  212 static long maxbufmallocspace;
  213 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace,
  214     0, "Maximum amount of malloced memory for buffers");
  215 static long lobufspace;
  216 SYSCTL_PROC(_vfs, OID_AUTO, lobufspace,
  217     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &lobufspace,
  218     __offsetof(struct bufdomain, bd_lobufspace), sysctl_bufdomain_long, "L",
  219     "Minimum amount of buffers we want to have");
  220 long hibufspace;
  221 SYSCTL_PROC(_vfs, OID_AUTO, hibufspace,
  222     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &hibufspace,
  223     __offsetof(struct bufdomain, bd_hibufspace), sysctl_bufdomain_long, "L",
  224     "Maximum allowed value of bufspace (excluding metadata)");
  225 long bufspacethresh;
  226 SYSCTL_PROC(_vfs, OID_AUTO, bufspacethresh,
  227     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &bufspacethresh,
  228     __offsetof(struct bufdomain, bd_bufspacethresh), sysctl_bufdomain_long, "L",
  229     "Bufspace consumed before waking the daemon to free some");
  230 static counter_u64_t buffreekvacnt;
  231 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt,
  232     "Number of times we have freed the KVA space from some buffer");
  233 static counter_u64_t bufdefragcnt;
  234 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt,
  235     "Number of times we have had to repeat buffer allocation to defragment");
  236 static long lorunningspace;
  237 SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
  238     CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
  239     "Minimum preferred space used for in-progress I/O");
  240 static long hirunningspace;
  241 SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
  242     CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
  243     "Maximum amount of space to use for in-progress I/O");
  244 int dirtybufferflushes;
  245 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
  246     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
  247 int bdwriteskip;
  248 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
  249     0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
  250 int altbufferflushes;
  251 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
  252     0, "Number of fsync flushes to limit dirty buffers");
  253 static int recursiveflushes;
  254 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
  255     0, "Number of flushes skipped due to being recursive");
  256 static int sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS);
  257 SYSCTL_PROC(_vfs, OID_AUTO, numdirtybuffers,
  258     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RD, NULL, 0, sysctl_numdirtybuffers, "I",
  259     "Number of buffers that are dirty (has unwritten changes) at the moment");
  260 static int lodirtybuffers;
  261 SYSCTL_PROC(_vfs, OID_AUTO, lodirtybuffers,
  262     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lodirtybuffers,
  263     __offsetof(struct bufdomain, bd_lodirtybuffers), sysctl_bufdomain_int, "I",
  264     "How many buffers we want to have free before bufdaemon can sleep");
  265 static int hidirtybuffers;
  266 SYSCTL_PROC(_vfs, OID_AUTO, hidirtybuffers,
  267     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hidirtybuffers,
  268     __offsetof(struct bufdomain, bd_hidirtybuffers), sysctl_bufdomain_int, "I",
  269     "When the number of dirty buffers is considered severe");
  270 int dirtybufthresh;
  271 SYSCTL_PROC(_vfs, OID_AUTO, dirtybufthresh,
  272     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &dirtybufthresh,
  273     __offsetof(struct bufdomain, bd_dirtybufthresh), sysctl_bufdomain_int, "I",
  274     "Number of bdwrite to bawrite conversions to clear dirty buffers");
  275 static int numfreebuffers;
  276 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
  277     "Number of free buffers");
  278 static int lofreebuffers;
  279 SYSCTL_PROC(_vfs, OID_AUTO, lofreebuffers,
  280     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lofreebuffers,
  281     __offsetof(struct bufdomain, bd_lofreebuffers), sysctl_bufdomain_int, "I",
  282    "Target number of free buffers");
  283 static int hifreebuffers;
  284 SYSCTL_PROC(_vfs, OID_AUTO, hifreebuffers,
  285     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hifreebuffers,
  286     __offsetof(struct bufdomain, bd_hifreebuffers), sysctl_bufdomain_int, "I",
  287    "Threshold for clean buffer recycling");
  288 static counter_u64_t getnewbufcalls;
  289 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD,
  290    &getnewbufcalls, "Number of calls to getnewbuf");
  291 static counter_u64_t getnewbufrestarts;
  292 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD,
  293     &getnewbufrestarts,
  294     "Number of times getnewbuf has had to restart a buffer acquisition");
  295 static counter_u64_t mappingrestarts;
  296 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RD,
  297     &mappingrestarts,
  298     "Number of times getblk has had to restart a buffer mapping for "
  299     "unmapped buffer");
  300 static counter_u64_t numbufallocfails;
  301 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, numbufallocfails, CTLFLAG_RW,
  302     &numbufallocfails, "Number of times buffer allocations failed");
  303 static int flushbufqtarget = 100;
  304 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
  305     "Amount of work to do in flushbufqueues when helping bufdaemon");
  306 static counter_u64_t notbufdflushes;
  307 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes,
  308     "Number of dirty buffer flushes done by the bufdaemon helpers");
  309 static long barrierwrites;
  310 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0,
  311     "Number of barrier writes");
  312 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
  313     &unmapped_buf_allowed, 0,
  314     "Permit the use of the unmapped i/o");
  315 int maxbcachebuf = MAXBCACHEBUF;
  316 SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN, &maxbcachebuf, 0,
  317     "Maximum size of a buffer cache block");
  318 
  319 /*
  320  * This lock synchronizes access to bd_request.
  321  */
  322 static struct mtx_padalign __exclusive_cache_line bdlock;
  323 
  324 /*
  325  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
  326  * waitrunningbufspace().
  327  */
  328 static struct mtx_padalign __exclusive_cache_line rbreqlock;
  329 
  330 /*
  331  * Lock that protects bdirtywait.
  332  */
  333 static struct mtx_padalign __exclusive_cache_line bdirtylock;
  334 
  335 /*
  336  * Wakeup point for bufdaemon, as well as indicator of whether it is already
  337  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
  338  * is idling.
  339  */
  340 static int bd_request;
  341 
  342 /*
  343  * Request for the buf daemon to write more buffers than is indicated by
  344  * lodirtybuf.  This may be necessary to push out excess dependencies or
  345  * defragment the address space where a simple count of the number of dirty
  346  * buffers is insufficient to characterize the demand for flushing them.
  347  */
  348 static int bd_speedupreq;
  349 
  350 /*
  351  * Synchronization (sleep/wakeup) variable for active buffer space requests.
  352  * Set when wait starts, cleared prior to wakeup().
  353  * Used in runningbufwakeup() and waitrunningbufspace().
  354  */
  355 static int runningbufreq;
  356 
  357 /*
  358  * Synchronization for bwillwrite() waiters.
  359  */
  360 static int bdirtywait;
  361 
  362 /*
  363  * Definitions for the buffer free lists.
  364  */
  365 #define QUEUE_NONE      0       /* on no queue */
  366 #define QUEUE_EMPTY     1       /* empty buffer headers */
  367 #define QUEUE_DIRTY     2       /* B_DELWRI buffers */
  368 #define QUEUE_CLEAN     3       /* non-B_DELWRI buffers */
  369 #define QUEUE_SENTINEL  4       /* not an queue index, but mark for sentinel */
  370 
  371 /* Maximum number of buffer domains. */
  372 #define BUF_DOMAINS     8
  373 
  374 struct bufdomainset bdlodirty;          /* Domains > lodirty */
  375 struct bufdomainset bdhidirty;          /* Domains > hidirty */
  376 
  377 /* Configured number of clean queues. */
  378 static int __read_mostly buf_domains;
  379 
  380 BITSET_DEFINE(bufdomainset, BUF_DOMAINS);
  381 struct bufdomain __exclusive_cache_line bdomain[BUF_DOMAINS];
  382 struct bufqueue __exclusive_cache_line bqempty;
  383 
  384 /*
  385  * per-cpu empty buffer cache.
  386  */
  387 uma_zone_t buf_zone;
  388 
  389 /*
  390  * Single global constant for BUF_WMESG, to avoid getting multiple references.
  391  * buf_wmesg is referred from macros.
  392  */
  393 const char *buf_wmesg = BUF_WMESG;
  394 
  395 static int
  396 sysctl_runningspace(SYSCTL_HANDLER_ARGS)
  397 {
  398         long value;
  399         int error;
  400 
  401         value = *(long *)arg1;
  402         error = sysctl_handle_long(oidp, &value, 0, req);
  403         if (error != 0 || req->newptr == NULL)
  404                 return (error);
  405         mtx_lock(&rbreqlock);
  406         if (arg1 == &hirunningspace) {
  407                 if (value < lorunningspace)
  408                         error = EINVAL;
  409                 else
  410                         hirunningspace = value;
  411         } else {
  412                 KASSERT(arg1 == &lorunningspace,
  413                     ("%s: unknown arg1", __func__));
  414                 if (value > hirunningspace)
  415                         error = EINVAL;
  416                 else
  417                         lorunningspace = value;
  418         }
  419         mtx_unlock(&rbreqlock);
  420         return (error);
  421 }
  422 
  423 static int
  424 sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS)
  425 {
  426         int error;
  427         int value;
  428         int i;
  429 
  430         value = *(int *)arg1;
  431         error = sysctl_handle_int(oidp, &value, 0, req);
  432         if (error != 0 || req->newptr == NULL)
  433                 return (error);
  434         *(int *)arg1 = value;
  435         for (i = 0; i < buf_domains; i++)
  436                 *(int *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
  437                     value / buf_domains;
  438 
  439         return (error);
  440 }
  441 
  442 static int
  443 sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS)
  444 {
  445         long value;
  446         int error;
  447         int i;
  448 
  449         value = *(long *)arg1;
  450         error = sysctl_handle_long(oidp, &value, 0, req);
  451         if (error != 0 || req->newptr == NULL)
  452                 return (error);
  453         *(long *)arg1 = value;
  454         for (i = 0; i < buf_domains; i++)
  455                 *(long *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
  456                     value / buf_domains;
  457 
  458         return (error);
  459 }
  460 
  461 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
  462     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
  463 static int
  464 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
  465 {
  466         long lvalue;
  467         int ivalue;
  468         int i;
  469 
  470         lvalue = 0;
  471         for (i = 0; i < buf_domains; i++)
  472                 lvalue += bdomain[i].bd_bufspace;
  473         if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
  474                 return (sysctl_handle_long(oidp, &lvalue, 0, req));
  475         if (lvalue > INT_MAX)
  476                 /* On overflow, still write out a long to trigger ENOMEM. */
  477                 return (sysctl_handle_long(oidp, &lvalue, 0, req));
  478         ivalue = lvalue;
  479         return (sysctl_handle_int(oidp, &ivalue, 0, req));
  480 }
  481 #else
  482 static int
  483 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
  484 {
  485         long lvalue;
  486         int i;
  487 
  488         lvalue = 0;
  489         for (i = 0; i < buf_domains; i++)
  490                 lvalue += bdomain[i].bd_bufspace;
  491         return (sysctl_handle_long(oidp, &lvalue, 0, req));
  492 }
  493 #endif
  494 
  495 static int
  496 sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS)
  497 {
  498         int value;
  499         int i;
  500 
  501         value = 0;
  502         for (i = 0; i < buf_domains; i++)
  503                 value += bdomain[i].bd_numdirtybuffers;
  504         return (sysctl_handle_int(oidp, &value, 0, req));
  505 }
  506 
  507 /*
  508  *      bdirtywakeup:
  509  *
  510  *      Wakeup any bwillwrite() waiters.
  511  */
  512 static void
  513 bdirtywakeup(void)
  514 {
  515         mtx_lock(&bdirtylock);
  516         if (bdirtywait) {
  517                 bdirtywait = 0;
  518                 wakeup(&bdirtywait);
  519         }
  520         mtx_unlock(&bdirtylock);
  521 }
  522 
  523 /*
  524  *      bd_clear:
  525  *
  526  *      Clear a domain from the appropriate bitsets when dirtybuffers
  527  *      is decremented.
  528  */
  529 static void
  530 bd_clear(struct bufdomain *bd)
  531 {
  532 
  533         mtx_lock(&bdirtylock);
  534         if (bd->bd_numdirtybuffers <= bd->bd_lodirtybuffers)
  535                 BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
  536         if (bd->bd_numdirtybuffers <= bd->bd_hidirtybuffers)
  537                 BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
  538         mtx_unlock(&bdirtylock);
  539 }
  540 
  541 /*
  542  *      bd_set:
  543  *
  544  *      Set a domain in the appropriate bitsets when dirtybuffers
  545  *      is incremented.
  546  */
  547 static void
  548 bd_set(struct bufdomain *bd)
  549 {
  550 
  551         mtx_lock(&bdirtylock);
  552         if (bd->bd_numdirtybuffers > bd->bd_lodirtybuffers)
  553                 BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
  554         if (bd->bd_numdirtybuffers > bd->bd_hidirtybuffers)
  555                 BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
  556         mtx_unlock(&bdirtylock);
  557 }
  558 
  559 /*
  560  *      bdirtysub:
  561  *
  562  *      Decrement the numdirtybuffers count by one and wakeup any
  563  *      threads blocked in bwillwrite().
  564  */
  565 static void
  566 bdirtysub(struct buf *bp)
  567 {
  568         struct bufdomain *bd;
  569         int num;
  570 
  571         bd = bufdomain(bp);
  572         num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, -1);
  573         if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
  574                 bdirtywakeup();
  575         if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
  576                 bd_clear(bd);
  577 }
  578 
  579 /*
  580  *      bdirtyadd:
  581  *
  582  *      Increment the numdirtybuffers count by one and wakeup the buf 
  583  *      daemon if needed.
  584  */
  585 static void
  586 bdirtyadd(struct buf *bp)
  587 {
  588         struct bufdomain *bd;
  589         int num;
  590 
  591         /*
  592          * Only do the wakeup once as we cross the boundary.  The
  593          * buf daemon will keep running until the condition clears.
  594          */
  595         bd = bufdomain(bp);
  596         num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, 1);
  597         if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
  598                 bd_wakeup();
  599         if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
  600                 bd_set(bd);
  601 }
  602 
  603 /*
  604  *      bufspace_daemon_wakeup:
  605  *
  606  *      Wakeup the daemons responsible for freeing clean bufs.
  607  */
  608 static void
  609 bufspace_daemon_wakeup(struct bufdomain *bd)
  610 {
  611 
  612         /*
  613          * avoid the lock if the daemon is running.
  614          */
  615         if (atomic_fetchadd_int(&bd->bd_running, 1) == 0) {
  616                 BD_RUN_LOCK(bd);
  617                 atomic_store_int(&bd->bd_running, 1);
  618                 wakeup(&bd->bd_running);
  619                 BD_RUN_UNLOCK(bd);
  620         }
  621 }
  622 
  623 /*
  624  *      bufspace_daemon_wait:
  625  *
  626  *      Sleep until the domain falls below a limit or one second passes.
  627  */
  628 static void
  629 bufspace_daemon_wait(struct bufdomain *bd)
  630 {
  631         /*
  632          * Re-check our limits and sleep.  bd_running must be
  633          * cleared prior to checking the limits to avoid missed
  634          * wakeups.  The waker will adjust one of bufspace or
  635          * freebuffers prior to checking bd_running.
  636          */
  637         BD_RUN_LOCK(bd);
  638         atomic_store_int(&bd->bd_running, 0);
  639         if (bd->bd_bufspace < bd->bd_bufspacethresh &&
  640             bd->bd_freebuffers > bd->bd_lofreebuffers) {
  641                 msleep(&bd->bd_running, BD_RUN_LOCKPTR(bd), PRIBIO|PDROP,
  642                     "-", hz);
  643         } else {
  644                 /* Avoid spurious wakeups while running. */
  645                 atomic_store_int(&bd->bd_running, 1);
  646                 BD_RUN_UNLOCK(bd);
  647         }
  648 }
  649 
  650 /*
  651  *      bufspace_adjust:
  652  *
  653  *      Adjust the reported bufspace for a KVA managed buffer, possibly
  654  *      waking any waiters.
  655  */
  656 static void
  657 bufspace_adjust(struct buf *bp, int bufsize)
  658 {
  659         struct bufdomain *bd;
  660         long space;
  661         int diff;
  662 
  663         KASSERT((bp->b_flags & B_MALLOC) == 0,
  664             ("bufspace_adjust: malloc buf %p", bp));
  665         bd = bufdomain(bp);
  666         diff = bufsize - bp->b_bufsize;
  667         if (diff < 0) {
  668                 atomic_subtract_long(&bd->bd_bufspace, -diff);
  669         } else if (diff > 0) {
  670                 space = atomic_fetchadd_long(&bd->bd_bufspace, diff);
  671                 /* Wake up the daemon on the transition. */
  672                 if (space < bd->bd_bufspacethresh &&
  673                     space + diff >= bd->bd_bufspacethresh)
  674                         bufspace_daemon_wakeup(bd);
  675         }
  676         bp->b_bufsize = bufsize;
  677 }
  678 
  679 /*
  680  *      bufspace_reserve:
  681  *
  682  *      Reserve bufspace before calling allocbuf().  metadata has a
  683  *      different space limit than data.
  684  */
  685 static int
  686 bufspace_reserve(struct bufdomain *bd, int size, bool metadata)
  687 {
  688         long limit, new;
  689         long space;
  690 
  691         if (metadata)
  692                 limit = bd->bd_maxbufspace;
  693         else
  694                 limit = bd->bd_hibufspace;
  695         space = atomic_fetchadd_long(&bd->bd_bufspace, size);
  696         new = space + size;
  697         if (new > limit) {
  698                 atomic_subtract_long(&bd->bd_bufspace, size);
  699                 return (ENOSPC);
  700         }
  701 
  702         /* Wake up the daemon on the transition. */
  703         if (space < bd->bd_bufspacethresh && new >= bd->bd_bufspacethresh)
  704                 bufspace_daemon_wakeup(bd);
  705 
  706         return (0);
  707 }
  708 
  709 /*
  710  *      bufspace_release:
  711  *
  712  *      Release reserved bufspace after bufspace_adjust() has consumed it.
  713  */
  714 static void
  715 bufspace_release(struct bufdomain *bd, int size)
  716 {
  717 
  718         atomic_subtract_long(&bd->bd_bufspace, size);
  719 }
  720 
  721 /*
  722  *      bufspace_wait:
  723  *
  724  *      Wait for bufspace, acting as the buf daemon if a locked vnode is
  725  *      supplied.  bd_wanted must be set prior to polling for space.  The
  726  *      operation must be re-tried on return.
  727  */
  728 static void
  729 bufspace_wait(struct bufdomain *bd, struct vnode *vp, int gbflags,
  730     int slpflag, int slptimeo)
  731 {
  732         struct thread *td;
  733         int error, fl, norunbuf;
  734 
  735         if ((gbflags & GB_NOWAIT_BD) != 0)
  736                 return;
  737 
  738         td = curthread;
  739         BD_LOCK(bd);
  740         while (bd->bd_wanted) {
  741                 if (vp != NULL && vp->v_type != VCHR &&
  742                     (td->td_pflags & TDP_BUFNEED) == 0) {
  743                         BD_UNLOCK(bd);
  744                         /*
  745                          * getblk() is called with a vnode locked, and
  746                          * some majority of the dirty buffers may as
  747                          * well belong to the vnode.  Flushing the
  748                          * buffers there would make a progress that
  749                          * cannot be achieved by the buf_daemon, that
  750                          * cannot lock the vnode.
  751                          */
  752                         norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
  753                             (td->td_pflags & TDP_NORUNNINGBUF);
  754 
  755                         /*
  756                          * Play bufdaemon.  The getnewbuf() function
  757                          * may be called while the thread owns lock
  758                          * for another dirty buffer for the same
  759                          * vnode, which makes it impossible to use
  760                          * VOP_FSYNC() there, due to the buffer lock
  761                          * recursion.
  762                          */
  763                         td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
  764                         fl = buf_flush(vp, bd, flushbufqtarget);
  765                         td->td_pflags &= norunbuf;
  766                         BD_LOCK(bd);
  767                         if (fl != 0)
  768                                 continue;
  769                         if (bd->bd_wanted == 0)
  770                                 break;
  771                 }
  772                 error = msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
  773                     (PRIBIO + 4) | slpflag, "newbuf", slptimeo);
  774                 if (error != 0)
  775                         break;
  776         }
  777         BD_UNLOCK(bd);
  778 }
  779 
  780 
  781 /*
  782  *      bufspace_daemon:
  783  *
  784  *      buffer space management daemon.  Tries to maintain some marginal
  785  *      amount of free buffer space so that requesting processes neither
  786  *      block nor work to reclaim buffers.
  787  */
  788 static void
  789 bufspace_daemon(void *arg)
  790 {
  791         struct bufdomain *bd;
  792 
  793         EVENTHANDLER_REGISTER(shutdown_pre_sync, kthread_shutdown, curthread,
  794             SHUTDOWN_PRI_LAST + 100);
  795 
  796         bd = arg;
  797         for (;;) {
  798                 kthread_suspend_check();
  799 
  800                 /*
  801                  * Free buffers from the clean queue until we meet our
  802                  * targets.
  803                  *
  804                  * Theory of operation:  The buffer cache is most efficient
  805                  * when some free buffer headers and space are always
  806                  * available to getnewbuf().  This daemon attempts to prevent
  807                  * the excessive blocking and synchronization associated
  808                  * with shortfall.  It goes through three phases according
  809                  * demand:
  810                  *
  811                  * 1)   The daemon wakes up voluntarily once per-second
  812                  *      during idle periods when the counters are below
  813                  *      the wakeup thresholds (bufspacethresh, lofreebuffers).
  814                  *
  815                  * 2)   The daemon wakes up as we cross the thresholds
  816                  *      ahead of any potential blocking.  This may bounce
  817                  *      slightly according to the rate of consumption and
  818                  *      release.
  819                  *
  820                  * 3)   The daemon and consumers are starved for working
  821                  *      clean buffers.  This is the 'bufspace' sleep below
  822                  *      which will inefficiently trade bufs with bqrelse
  823                  *      until we return to condition 2.
  824                  */
  825                 while (bd->bd_bufspace > bd->bd_lobufspace ||
  826                     bd->bd_freebuffers < bd->bd_hifreebuffers) {
  827                         if (buf_recycle(bd, false) != 0) {
  828                                 if (bd_flushall(bd))
  829                                         continue;
  830                                 /*
  831                                  * Speedup dirty if we've run out of clean
  832                                  * buffers.  This is possible in particular
  833                                  * because softdep may held many bufs locked
  834                                  * pending writes to other bufs which are
  835                                  * marked for delayed write, exhausting
  836                                  * clean space until they are written.
  837                                  */
  838                                 bd_speedup();
  839                                 BD_LOCK(bd);
  840                                 if (bd->bd_wanted) {
  841                                         msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
  842                                             PRIBIO|PDROP, "bufspace", hz/10);
  843                                 } else
  844                                         BD_UNLOCK(bd);
  845                         }
  846                         maybe_yield();
  847                 }
  848                 bufspace_daemon_wait(bd);
  849         }
  850 }
  851 
  852 /*
  853  *      bufmallocadjust:
  854  *
  855  *      Adjust the reported bufspace for a malloc managed buffer, possibly
  856  *      waking any waiters.
  857  */
  858 static void
  859 bufmallocadjust(struct buf *bp, int bufsize)
  860 {
  861         int diff;
  862 
  863         KASSERT((bp->b_flags & B_MALLOC) != 0,
  864             ("bufmallocadjust: non-malloc buf %p", bp));
  865         diff = bufsize - bp->b_bufsize;
  866         if (diff < 0)
  867                 atomic_subtract_long(&bufmallocspace, -diff);
  868         else
  869                 atomic_add_long(&bufmallocspace, diff);
  870         bp->b_bufsize = bufsize;
  871 }
  872 
  873 /*
  874  *      runningwakeup:
  875  *
  876  *      Wake up processes that are waiting on asynchronous writes to fall
  877  *      below lorunningspace.
  878  */
  879 static void
  880 runningwakeup(void)
  881 {
  882 
  883         mtx_lock(&rbreqlock);
  884         if (runningbufreq) {
  885                 runningbufreq = 0;
  886                 wakeup(&runningbufreq);
  887         }
  888         mtx_unlock(&rbreqlock);
  889 }
  890 
  891 /*
  892  *      runningbufwakeup:
  893  *
  894  *      Decrement the outstanding write count according.
  895  */
  896 void
  897 runningbufwakeup(struct buf *bp)
  898 {
  899         long space, bspace;
  900 
  901         bspace = bp->b_runningbufspace;
  902         if (bspace == 0)
  903                 return;
  904         space = atomic_fetchadd_long(&runningbufspace, -bspace);
  905         KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
  906             space, bspace));
  907         bp->b_runningbufspace = 0;
  908         /*
  909          * Only acquire the lock and wakeup on the transition from exceeding
  910          * the threshold to falling below it.
  911          */
  912         if (space < lorunningspace)
  913                 return;
  914         if (space - bspace > lorunningspace)
  915                 return;
  916         runningwakeup();
  917 }
  918 
  919 /*
  920  *      waitrunningbufspace()
  921  *
  922  *      runningbufspace is a measure of the amount of I/O currently
  923  *      running.  This routine is used in async-write situations to
  924  *      prevent creating huge backups of pending writes to a device.
  925  *      Only asynchronous writes are governed by this function.
  926  *
  927  *      This does NOT turn an async write into a sync write.  It waits  
  928  *      for earlier writes to complete and generally returns before the
  929  *      caller's write has reached the device.
  930  */
  931 void
  932 waitrunningbufspace(void)
  933 {
  934 
  935         mtx_lock(&rbreqlock);
  936         while (runningbufspace > hirunningspace) {
  937                 runningbufreq = 1;
  938                 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
  939         }
  940         mtx_unlock(&rbreqlock);
  941 }
  942 
  943 
  944 /*
  945  *      vfs_buf_test_cache:
  946  *
  947  *      Called when a buffer is extended.  This function clears the B_CACHE
  948  *      bit if the newly extended portion of the buffer does not contain
  949  *      valid data.
  950  */
  951 static __inline void
  952 vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
  953     vm_offset_t size, vm_page_t m)
  954 {
  955 
  956         VM_OBJECT_ASSERT_LOCKED(m->object);
  957         if (bp->b_flags & B_CACHE) {
  958                 int base = (foff + off) & PAGE_MASK;
  959                 if (vm_page_is_valid(m, base, size) == 0)
  960                         bp->b_flags &= ~B_CACHE;
  961         }
  962 }
  963 
  964 /* Wake up the buffer daemon if necessary */
  965 static void
  966 bd_wakeup(void)
  967 {
  968 
  969         mtx_lock(&bdlock);
  970         if (bd_request == 0) {
  971                 bd_request = 1;
  972                 wakeup(&bd_request);
  973         }
  974         mtx_unlock(&bdlock);
  975 }
  976 
  977 /*
  978  * Adjust the maxbcachbuf tunable.
  979  */
  980 static void
  981 maxbcachebuf_adjust(void)
  982 {
  983         int i;
  984 
  985         /*
  986          * maxbcachebuf must be a power of 2 >= MAXBSIZE.
  987          */
  988         i = 2;
  989         while (i * 2 <= maxbcachebuf)
  990                 i *= 2;
  991         maxbcachebuf = i;
  992         if (maxbcachebuf < MAXBSIZE)
  993                 maxbcachebuf = MAXBSIZE;
  994         if (maxbcachebuf > MAXPHYS)
  995                 maxbcachebuf = MAXPHYS;
  996         if (bootverbose != 0 && maxbcachebuf != MAXBCACHEBUF)
  997                 printf("maxbcachebuf=%d\n", maxbcachebuf);
  998 }
  999 
 1000 /*
 1001  * bd_speedup - speedup the buffer cache flushing code
 1002  */
 1003 void
 1004 bd_speedup(void)
 1005 {
 1006         int needwake;
 1007 
 1008         mtx_lock(&bdlock);
 1009         needwake = 0;
 1010         if (bd_speedupreq == 0 || bd_request == 0)
 1011                 needwake = 1;
 1012         bd_speedupreq = 1;
 1013         bd_request = 1;
 1014         if (needwake)
 1015                 wakeup(&bd_request);
 1016         mtx_unlock(&bdlock);
 1017 }
 1018 
 1019 #ifdef __i386__
 1020 #define TRANSIENT_DENOM 5
 1021 #else
 1022 #define TRANSIENT_DENOM 10
 1023 #endif
 1024 
 1025 /*
 1026  * Calculating buffer cache scaling values and reserve space for buffer
 1027  * headers.  This is called during low level kernel initialization and
 1028  * may be called more then once.  We CANNOT write to the memory area
 1029  * being reserved at this time.
 1030  */
 1031 caddr_t
 1032 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
 1033 {
 1034         int tuned_nbuf;
 1035         long maxbuf, maxbuf_sz, buf_sz, biotmap_sz;
 1036 
 1037         /*
 1038          * physmem_est is in pages.  Convert it to kilobytes (assumes
 1039          * PAGE_SIZE is >= 1K)
 1040          */
 1041         physmem_est = physmem_est * (PAGE_SIZE / 1024);
 1042 
 1043         maxbcachebuf_adjust();
 1044         /*
 1045          * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
 1046          * For the first 64MB of ram nominally allocate sufficient buffers to
 1047          * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
 1048          * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
 1049          * the buffer cache we limit the eventual kva reservation to
 1050          * maxbcache bytes.
 1051          *
 1052          * factor represents the 1/4 x ram conversion.
 1053          */
 1054         if (nbuf == 0) {
 1055                 int factor = 4 * BKVASIZE / 1024;
 1056 
 1057                 nbuf = 50;
 1058                 if (physmem_est > 4096)
 1059                         nbuf += min((physmem_est - 4096) / factor,
 1060                             65536 / factor);
 1061                 if (physmem_est > 65536)
 1062                         nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
 1063                             32 * 1024 * 1024 / (factor * 5));
 1064 
 1065                 if (maxbcache && nbuf > maxbcache / BKVASIZE)
 1066                         nbuf = maxbcache / BKVASIZE;
 1067                 tuned_nbuf = 1;
 1068         } else
 1069                 tuned_nbuf = 0;
 1070 
 1071         /* XXX Avoid unsigned long overflows later on with maxbufspace. */
 1072         maxbuf = (LONG_MAX / 3) / BKVASIZE;
 1073         if (nbuf > maxbuf) {
 1074                 if (!tuned_nbuf)
 1075                         printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
 1076                             maxbuf);
 1077                 nbuf = maxbuf;
 1078         }
 1079 
 1080         /*
 1081          * Ideal allocation size for the transient bio submap is 10%
 1082          * of the maximal space buffer map.  This roughly corresponds
 1083          * to the amount of the buffer mapped for typical UFS load.
 1084          *
 1085          * Clip the buffer map to reserve space for the transient
 1086          * BIOs, if its extent is bigger than 90% (80% on i386) of the
 1087          * maximum buffer map extent on the platform.
 1088          *
 1089          * The fall-back to the maxbuf in case of maxbcache unset,
 1090          * allows to not trim the buffer KVA for the architectures
 1091          * with ample KVA space.
 1092          */
 1093         if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
 1094                 maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
 1095                 buf_sz = (long)nbuf * BKVASIZE;
 1096                 if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
 1097                     (TRANSIENT_DENOM - 1)) {
 1098                         /*
 1099                          * There is more KVA than memory.  Do not
 1100                          * adjust buffer map size, and assign the rest
 1101                          * of maxbuf to transient map.
 1102                          */
 1103                         biotmap_sz = maxbuf_sz - buf_sz;
 1104                 } else {
 1105                         /*
 1106                          * Buffer map spans all KVA we could afford on
 1107                          * this platform.  Give 10% (20% on i386) of
 1108                          * the buffer map to the transient bio map.
 1109                          */
 1110                         biotmap_sz = buf_sz / TRANSIENT_DENOM;
 1111                         buf_sz -= biotmap_sz;
 1112                 }
 1113                 if (biotmap_sz / INT_MAX > MAXPHYS)
 1114                         bio_transient_maxcnt = INT_MAX;
 1115                 else
 1116                         bio_transient_maxcnt = biotmap_sz / MAXPHYS;
 1117                 /*
 1118                  * Artificially limit to 1024 simultaneous in-flight I/Os
 1119                  * using the transient mapping.
 1120                  */
 1121                 if (bio_transient_maxcnt > 1024)
 1122                         bio_transient_maxcnt = 1024;
 1123                 if (tuned_nbuf)
 1124                         nbuf = buf_sz / BKVASIZE;
 1125         }
 1126 
 1127         if (nswbuf == 0) {
 1128                 nswbuf = min(nbuf / 4, 256);
 1129                 if (nswbuf < NSWBUF_MIN)
 1130                         nswbuf = NSWBUF_MIN;
 1131         }
 1132 
 1133         /*
 1134          * Reserve space for the buffer cache buffers
 1135          */
 1136         buf = (void *)v;
 1137         v = (caddr_t)(buf + nbuf);
 1138 
 1139         return(v);
 1140 }
 1141 
 1142 /* Initialize the buffer subsystem.  Called before use of any buffers. */
 1143 void
 1144 bufinit(void)
 1145 {
 1146         struct buf *bp;
 1147         int i;
 1148 
 1149         KASSERT(maxbcachebuf >= MAXBSIZE,
 1150             ("maxbcachebuf (%d) must be >= MAXBSIZE (%d)\n", maxbcachebuf,
 1151             MAXBSIZE));
 1152         bq_init(&bqempty, QUEUE_EMPTY, -1, "bufq empty lock");
 1153         mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
 1154         mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
 1155         mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
 1156 
 1157         unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
 1158 
 1159         /* finally, initialize each buffer header and stick on empty q */
 1160         for (i = 0; i < nbuf; i++) {
 1161                 bp = &buf[i];
 1162                 bzero(bp, sizeof *bp);
 1163                 bp->b_flags = B_INVAL;
 1164                 bp->b_rcred = NOCRED;
 1165                 bp->b_wcred = NOCRED;
 1166                 bp->b_qindex = QUEUE_NONE;
 1167                 bp->b_domain = -1;
 1168                 bp->b_subqueue = mp_maxid + 1;
 1169                 bp->b_xflags = 0;
 1170                 bp->b_data = bp->b_kvabase = unmapped_buf;
 1171                 LIST_INIT(&bp->b_dep);
 1172                 BUF_LOCKINIT(bp);
 1173                 bq_insert(&bqempty, bp, false);
 1174         }
 1175 
 1176         /*
 1177          * maxbufspace is the absolute maximum amount of buffer space we are 
 1178          * allowed to reserve in KVM and in real terms.  The absolute maximum
 1179          * is nominally used by metadata.  hibufspace is the nominal maximum
 1180          * used by most other requests.  The differential is required to 
 1181          * ensure that metadata deadlocks don't occur.
 1182          *
 1183          * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
 1184          * this may result in KVM fragmentation which is not handled optimally
 1185          * by the system. XXX This is less true with vmem.  We could use
 1186          * PAGE_SIZE.
 1187          */
 1188         maxbufspace = (long)nbuf * BKVASIZE;
 1189         hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - maxbcachebuf * 10);
 1190         lobufspace = (hibufspace / 20) * 19; /* 95% */
 1191         bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
 1192 
 1193         /*
 1194          * Note: The 16 MiB upper limit for hirunningspace was chosen
 1195          * arbitrarily and may need further tuning. It corresponds to
 1196          * 128 outstanding write IO requests (if IO size is 128 KiB),
 1197          * which fits with many RAID controllers' tagged queuing limits.
 1198          * The lower 1 MiB limit is the historical upper limit for
 1199          * hirunningspace.
 1200          */
 1201         hirunningspace = lmax(lmin(roundup(hibufspace / 64, maxbcachebuf),
 1202             16 * 1024 * 1024), 1024 * 1024);
 1203         lorunningspace = roundup((hirunningspace * 2) / 3, maxbcachebuf);
 1204 
 1205         /*
 1206          * Limit the amount of malloc memory since it is wired permanently into
 1207          * the kernel space.  Even though this is accounted for in the buffer
 1208          * allocation, we don't want the malloced region to grow uncontrolled.
 1209          * The malloc scheme improves memory utilization significantly on
 1210          * average (small) directories.
 1211          */
 1212         maxbufmallocspace = hibufspace / 20;
 1213 
 1214         /*
 1215          * Reduce the chance of a deadlock occurring by limiting the number
 1216          * of delayed-write dirty buffers we allow to stack up.
 1217          */
 1218         hidirtybuffers = nbuf / 4 + 20;
 1219         dirtybufthresh = hidirtybuffers * 9 / 10;
 1220         /*
 1221          * To support extreme low-memory systems, make sure hidirtybuffers
 1222          * cannot eat up all available buffer space.  This occurs when our
 1223          * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
 1224          * buffer space assuming BKVASIZE'd buffers.
 1225          */
 1226         while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
 1227                 hidirtybuffers >>= 1;
 1228         }
 1229         lodirtybuffers = hidirtybuffers / 2;
 1230 
 1231         /*
 1232          * lofreebuffers should be sufficient to avoid stalling waiting on
 1233          * buf headers under heavy utilization.  The bufs in per-cpu caches
 1234          * are counted as free but will be unavailable to threads executing
 1235          * on other cpus.
 1236          *
 1237          * hifreebuffers is the free target for the bufspace daemon.  This
 1238          * should be set appropriately to limit work per-iteration.
 1239          */
 1240         lofreebuffers = MIN((nbuf / 25) + (20 * mp_ncpus), 128 * mp_ncpus);
 1241         hifreebuffers = (3 * lofreebuffers) / 2;
 1242         numfreebuffers = nbuf;
 1243 
 1244         /* Setup the kva and free list allocators. */
 1245         vmem_set_reclaim(buffer_arena, bufkva_reclaim);
 1246         buf_zone = uma_zcache_create("buf free cache", sizeof(struct buf),
 1247             NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
 1248 
 1249         /*
 1250          * Size the clean queue according to the amount of buffer space.
 1251          * One queue per-256mb up to the max.  More queues gives better
 1252          * concurrency but less accurate LRU.
 1253          */
 1254         buf_domains = MIN(howmany(maxbufspace, 256*1024*1024), BUF_DOMAINS);
 1255         for (i = 0 ; i < buf_domains; i++) {
 1256                 struct bufdomain *bd;
 1257 
 1258                 bd = &bdomain[i];
 1259                 bd_init(bd);
 1260                 bd->bd_freebuffers = nbuf / buf_domains;
 1261                 bd->bd_hifreebuffers = hifreebuffers / buf_domains;
 1262                 bd->bd_lofreebuffers = lofreebuffers / buf_domains;
 1263                 bd->bd_bufspace = 0;
 1264                 bd->bd_maxbufspace = maxbufspace / buf_domains;
 1265                 bd->bd_hibufspace = hibufspace / buf_domains;
 1266                 bd->bd_lobufspace = lobufspace / buf_domains;
 1267                 bd->bd_bufspacethresh = bufspacethresh / buf_domains;
 1268                 bd->bd_numdirtybuffers = 0;
 1269                 bd->bd_hidirtybuffers = hidirtybuffers / buf_domains;
 1270                 bd->bd_lodirtybuffers = lodirtybuffers / buf_domains;
 1271                 bd->bd_dirtybufthresh = dirtybufthresh / buf_domains;
 1272                 /* Don't allow more than 2% of bufs in the per-cpu caches. */
 1273                 bd->bd_lim = nbuf / buf_domains / 50 / mp_ncpus;
 1274         }
 1275         getnewbufcalls = counter_u64_alloc(M_WAITOK);
 1276         getnewbufrestarts = counter_u64_alloc(M_WAITOK);
 1277         mappingrestarts = counter_u64_alloc(M_WAITOK);
 1278         numbufallocfails = counter_u64_alloc(M_WAITOK);
 1279         notbufdflushes = counter_u64_alloc(M_WAITOK);
 1280         buffreekvacnt = counter_u64_alloc(M_WAITOK);
 1281         bufdefragcnt = counter_u64_alloc(M_WAITOK);
 1282         bufkvaspace = counter_u64_alloc(M_WAITOK);
 1283 }
 1284 
 1285 #ifdef INVARIANTS
 1286 static inline void
 1287 vfs_buf_check_mapped(struct buf *bp)
 1288 {
 1289 
 1290         KASSERT(bp->b_kvabase != unmapped_buf,
 1291             ("mapped buf: b_kvabase was not updated %p", bp));
 1292         KASSERT(bp->b_data != unmapped_buf,
 1293             ("mapped buf: b_data was not updated %p", bp));
 1294         KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
 1295             MAXPHYS, ("b_data + b_offset unmapped %p", bp));
 1296 }
 1297 
 1298 static inline void
 1299 vfs_buf_check_unmapped(struct buf *bp)
 1300 {
 1301 
 1302         KASSERT(bp->b_data == unmapped_buf,
 1303             ("unmapped buf: corrupted b_data %p", bp));
 1304 }
 1305 
 1306 #define BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
 1307 #define BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
 1308 #else
 1309 #define BUF_CHECK_MAPPED(bp) do {} while (0)
 1310 #define BUF_CHECK_UNMAPPED(bp) do {} while (0)
 1311 #endif
 1312 
 1313 static int
 1314 isbufbusy(struct buf *bp)
 1315 {
 1316         if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) ||
 1317             ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
 1318                 return (1);
 1319         return (0);
 1320 }
 1321 
 1322 /*
 1323  * Shutdown the system cleanly to prepare for reboot, halt, or power off.
 1324  */
 1325 void
 1326 bufshutdown(int show_busybufs)
 1327 {
 1328         static int first_buf_printf = 1;
 1329         struct buf *bp;
 1330         int iter, nbusy, pbusy;
 1331 #ifndef PREEMPTION
 1332         int subiter;
 1333 #endif
 1334 
 1335         /* 
 1336          * Sync filesystems for shutdown
 1337          */
 1338         wdog_kern_pat(WD_LASTVAL);
 1339         sys_sync(curthread, NULL);
 1340 
 1341         /*
 1342          * With soft updates, some buffers that are
 1343          * written will be remarked as dirty until other
 1344          * buffers are written.
 1345          */
 1346         for (iter = pbusy = 0; iter < 20; iter++) {
 1347                 nbusy = 0;
 1348                 for (bp = &buf[nbuf]; --bp >= buf; )
 1349                         if (isbufbusy(bp))
 1350                                 nbusy++;
 1351                 if (nbusy == 0) {
 1352                         if (first_buf_printf)
 1353                                 printf("All buffers synced.");
 1354                         break;
 1355                 }
 1356                 if (first_buf_printf) {
 1357                         printf("Syncing disks, buffers remaining... ");
 1358                         first_buf_printf = 0;
 1359                 }
 1360                 printf("%d ", nbusy);
 1361                 if (nbusy < pbusy)
 1362                         iter = 0;
 1363                 pbusy = nbusy;
 1364 
 1365                 wdog_kern_pat(WD_LASTVAL);
 1366                 sys_sync(curthread, NULL);
 1367 
 1368 #ifdef PREEMPTION
 1369                 /*
 1370                  * Spin for a while to allow interrupt threads to run.
 1371                  */
 1372                 DELAY(50000 * iter);
 1373 #else
 1374                 /*
 1375                  * Context switch several times to allow interrupt
 1376                  * threads to run.
 1377                  */
 1378                 for (subiter = 0; subiter < 50 * iter; subiter++) {
 1379                         thread_lock(curthread);
 1380                         mi_switch(SW_VOL, NULL);
 1381                         thread_unlock(curthread);
 1382                         DELAY(1000);
 1383                 }
 1384 #endif
 1385         }
 1386         printf("\n");
 1387         /*
 1388          * Count only busy local buffers to prevent forcing 
 1389          * a fsck if we're just a client of a wedged NFS server
 1390          */
 1391         nbusy = 0;
 1392         for (bp = &buf[nbuf]; --bp >= buf; ) {
 1393                 if (isbufbusy(bp)) {
 1394 #if 0
 1395 /* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
 1396                         if (bp->b_dev == NULL) {
 1397                                 TAILQ_REMOVE(&mountlist,
 1398                                     bp->b_vp->v_mount, mnt_list);
 1399                                 continue;
 1400                         }
 1401 #endif
 1402                         nbusy++;
 1403                         if (show_busybufs > 0) {
 1404                                 printf(
 1405             "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
 1406                                     nbusy, bp, bp->b_vp, bp->b_flags,
 1407                                     (intmax_t)bp->b_blkno,
 1408                                     (intmax_t)bp->b_lblkno);
 1409                                 BUF_LOCKPRINTINFO(bp);
 1410                                 if (show_busybufs > 1)
 1411                                         vn_printf(bp->b_vp,
 1412                                             "vnode content: ");
 1413                         }
 1414                 }
 1415         }
 1416         if (nbusy) {
 1417                 /*
 1418                  * Failed to sync all blocks. Indicate this and don't
 1419                  * unmount filesystems (thus forcing an fsck on reboot).
 1420                  */
 1421                 printf("Giving up on %d buffers\n", nbusy);
 1422                 DELAY(5000000); /* 5 seconds */
 1423         } else {
 1424                 if (!first_buf_printf)
 1425                         printf("Final sync complete\n");
 1426                 /*
 1427                  * Unmount filesystems
 1428                  */
 1429                 if (panicstr == NULL)
 1430                         vfs_unmountall();
 1431         }
 1432         swapoff_all();
 1433         DELAY(100000);          /* wait for console output to finish */
 1434 }
 1435 
 1436 static void
 1437 bpmap_qenter(struct buf *bp)
 1438 {
 1439 
 1440         BUF_CHECK_MAPPED(bp);
 1441 
 1442         /*
 1443          * bp->b_data is relative to bp->b_offset, but
 1444          * bp->b_offset may be offset into the first page.
 1445          */
 1446         bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
 1447         pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
 1448         bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
 1449             (vm_offset_t)(bp->b_offset & PAGE_MASK));
 1450 }
 1451 
 1452 static inline struct bufdomain *
 1453 bufdomain(struct buf *bp)
 1454 {
 1455 
 1456         return (&bdomain[bp->b_domain]);
 1457 }
 1458 
 1459 static struct bufqueue *
 1460 bufqueue(struct buf *bp)
 1461 {
 1462 
 1463         switch (bp->b_qindex) {
 1464         case QUEUE_NONE:
 1465                 /* FALLTHROUGH */
 1466         case QUEUE_SENTINEL:
 1467                 return (NULL);
 1468         case QUEUE_EMPTY:
 1469                 return (&bqempty);
 1470         case QUEUE_DIRTY:
 1471                 return (&bufdomain(bp)->bd_dirtyq);
 1472         case QUEUE_CLEAN:
 1473                 return (&bufdomain(bp)->bd_subq[bp->b_subqueue]);
 1474         default:
 1475                 break;
 1476         }
 1477         panic("bufqueue(%p): Unhandled type %d\n", bp, bp->b_qindex);
 1478 }
 1479 
 1480 /*
 1481  * Return the locked bufqueue that bp is a member of.
 1482  */
 1483 static struct bufqueue *
 1484 bufqueue_acquire(struct buf *bp)
 1485 {
 1486         struct bufqueue *bq, *nbq;
 1487 
 1488         /*
 1489          * bp can be pushed from a per-cpu queue to the
 1490          * cleanq while we're waiting on the lock.  Retry
 1491          * if the queues don't match.
 1492          */
 1493         bq = bufqueue(bp);
 1494         BQ_LOCK(bq);
 1495         for (;;) {
 1496                 nbq = bufqueue(bp);
 1497                 if (bq == nbq)
 1498                         break;
 1499                 BQ_UNLOCK(bq);
 1500                 BQ_LOCK(nbq);
 1501                 bq = nbq;
 1502         }
 1503         return (bq);
 1504 }
 1505 
 1506 /*
 1507  *      binsfree:
 1508  *
 1509  *      Insert the buffer into the appropriate free list.  Requires a
 1510  *      locked buffer on entry and buffer is unlocked before return.
 1511  */
 1512 static void
 1513 binsfree(struct buf *bp, int qindex)
 1514 {
 1515         struct bufdomain *bd;
 1516         struct bufqueue *bq;
 1517 
 1518         KASSERT(qindex == QUEUE_CLEAN || qindex == QUEUE_DIRTY,
 1519             ("binsfree: Invalid qindex %d", qindex));
 1520         BUF_ASSERT_XLOCKED(bp);
 1521 
 1522         /*
 1523          * Handle delayed bremfree() processing.
 1524          */
 1525         if (bp->b_flags & B_REMFREE) {
 1526                 if (bp->b_qindex == qindex) {
 1527                         bp->b_flags |= B_REUSE;
 1528                         bp->b_flags &= ~B_REMFREE;
 1529                         BUF_UNLOCK(bp);
 1530                         return;
 1531                 }
 1532                 bq = bufqueue_acquire(bp);
 1533                 bq_remove(bq, bp);
 1534                 BQ_UNLOCK(bq);
 1535         }
 1536         bd = bufdomain(bp);
 1537         if (qindex == QUEUE_CLEAN) {
 1538                 if (bd->bd_lim != 0)
 1539                         bq = &bd->bd_subq[PCPU_GET(cpuid)];
 1540                 else
 1541                         bq = bd->bd_cleanq;
 1542         } else
 1543                 bq = &bd->bd_dirtyq;
 1544         bq_insert(bq, bp, true);
 1545 }
 1546 
 1547 /*
 1548  * buf_free:
 1549  *
 1550  *      Free a buffer to the buf zone once it no longer has valid contents.
 1551  */
 1552 static void
 1553 buf_free(struct buf *bp)
 1554 {
 1555 
 1556         if (bp->b_flags & B_REMFREE)
 1557                 bremfreef(bp);
 1558         if (bp->b_vflags & BV_BKGRDINPROG)
 1559                 panic("losing buffer 1");
 1560         if (bp->b_rcred != NOCRED) {
 1561                 crfree(bp->b_rcred);
 1562                 bp->b_rcred = NOCRED;
 1563         }
 1564         if (bp->b_wcred != NOCRED) {
 1565                 crfree(bp->b_wcred);
 1566                 bp->b_wcred = NOCRED;
 1567         }
 1568         if (!LIST_EMPTY(&bp->b_dep))
 1569                 buf_deallocate(bp);
 1570         bufkva_free(bp);
 1571         atomic_add_int(&bufdomain(bp)->bd_freebuffers, 1);
 1572         BUF_UNLOCK(bp);
 1573         uma_zfree(buf_zone, bp);
 1574 }
 1575 
 1576 /*
 1577  * buf_import:
 1578  *
 1579  *      Import bufs into the uma cache from the buf list.  The system still
 1580  *      expects a static array of bufs and much of the synchronization
 1581  *      around bufs assumes type stable storage.  As a result, UMA is used
 1582  *      only as a per-cpu cache of bufs still maintained on a global list.
 1583  */
 1584 static int
 1585 buf_import(void *arg, void **store, int cnt, int domain, int flags)
 1586 {
 1587         struct buf *bp;
 1588         int i;
 1589 
 1590         BQ_LOCK(&bqempty);
 1591         for (i = 0; i < cnt; i++) {
 1592                 bp = TAILQ_FIRST(&bqempty.bq_queue);
 1593                 if (bp == NULL)
 1594                         break;
 1595                 bq_remove(&bqempty, bp);
 1596                 store[i] = bp;
 1597         }
 1598         BQ_UNLOCK(&bqempty);
 1599 
 1600         return (i);
 1601 }
 1602 
 1603 /*
 1604  * buf_release:
 1605  *
 1606  *      Release bufs from the uma cache back to the buffer queues.
 1607  */
 1608 static void
 1609 buf_release(void *arg, void **store, int cnt)
 1610 {
 1611         struct bufqueue *bq;
 1612         struct buf *bp;
 1613         int i;
 1614 
 1615         bq = &bqempty;
 1616         BQ_LOCK(bq);
 1617         for (i = 0; i < cnt; i++) {
 1618                 bp = store[i];
 1619                 /* Inline bq_insert() to batch locking. */
 1620                 TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
 1621                 bp->b_flags &= ~(B_AGE | B_REUSE);
 1622                 bq->bq_len++;
 1623                 bp->b_qindex = bq->bq_index;
 1624         }
 1625         BQ_UNLOCK(bq);
 1626 }
 1627 
 1628 /*
 1629  * buf_alloc:
 1630  *
 1631  *      Allocate an empty buffer header.
 1632  */
 1633 static struct buf *
 1634 buf_alloc(struct bufdomain *bd)
 1635 {
 1636         struct buf *bp;
 1637         int freebufs;
 1638 
 1639         /*
 1640          * We can only run out of bufs in the buf zone if the average buf
 1641          * is less than BKVASIZE.  In this case the actual wait/block will
 1642          * come from buf_reycle() failing to flush one of these small bufs.
 1643          */
 1644         bp = NULL;
 1645         freebufs = atomic_fetchadd_int(&bd->bd_freebuffers, -1);
 1646         if (freebufs > 0)
 1647                 bp = uma_zalloc(buf_zone, M_NOWAIT);
 1648         if (bp == NULL) {
 1649                 atomic_add_int(&bd->bd_freebuffers, 1);
 1650                 bufspace_daemon_wakeup(bd);
 1651                 counter_u64_add(numbufallocfails, 1);
 1652                 return (NULL);
 1653         }
 1654         /*
 1655          * Wake-up the bufspace daemon on transition below threshold.
 1656          */
 1657         if (freebufs == bd->bd_lofreebuffers)
 1658                 bufspace_daemon_wakeup(bd);
 1659 
 1660         if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 1661                 panic("getnewbuf_empty: Locked buf %p on free queue.", bp);
 1662         
 1663         KASSERT(bp->b_vp == NULL,
 1664             ("bp: %p still has vnode %p.", bp, bp->b_vp));
 1665         KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
 1666             ("invalid buffer %p flags %#x", bp, bp->b_flags));
 1667         KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
 1668             ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
 1669         KASSERT(bp->b_npages == 0,
 1670             ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
 1671         KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
 1672         KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
 1673 
 1674         bp->b_domain = BD_DOMAIN(bd);
 1675         bp->b_flags = 0;
 1676         bp->b_ioflags = 0;
 1677         bp->b_xflags = 0;
 1678         bp->b_vflags = 0;
 1679         bp->b_vp = NULL;
 1680         bp->b_blkno = bp->b_lblkno = 0;
 1681         bp->b_offset = NOOFFSET;
 1682         bp->b_iodone = 0;
 1683         bp->b_error = 0;
 1684         bp->b_resid = 0;
 1685         bp->b_bcount = 0;
 1686         bp->b_npages = 0;
 1687         bp->b_dirtyoff = bp->b_dirtyend = 0;
 1688         bp->b_bufobj = NULL;
 1689         bp->b_data = bp->b_kvabase = unmapped_buf;
 1690         bp->b_fsprivate1 = NULL;
 1691         bp->b_fsprivate2 = NULL;
 1692         bp->b_fsprivate3 = NULL;
 1693         LIST_INIT(&bp->b_dep);
 1694 
 1695         return (bp);
 1696 }
 1697 
 1698 /*
 1699  *      buf_recycle:
 1700  *
 1701  *      Free a buffer from the given bufqueue.  kva controls whether the
 1702  *      freed buf must own some kva resources.  This is used for
 1703  *      defragmenting.
 1704  */
 1705 static int
 1706 buf_recycle(struct bufdomain *bd, bool kva)
 1707 {
 1708         struct bufqueue *bq;
 1709         struct buf *bp, *nbp;
 1710 
 1711         if (kva)
 1712                 counter_u64_add(bufdefragcnt, 1);
 1713         nbp = NULL;
 1714         bq = bd->bd_cleanq;
 1715         BQ_LOCK(bq);
 1716         KASSERT(BQ_LOCKPTR(bq) == BD_LOCKPTR(bd),
 1717             ("buf_recycle: Locks don't match"));
 1718         nbp = TAILQ_FIRST(&bq->bq_queue);
 1719 
 1720         /*
 1721          * Run scan, possibly freeing data and/or kva mappings on the fly
 1722          * depending.
 1723          */
 1724         while ((bp = nbp) != NULL) {
 1725                 /*
 1726                  * Calculate next bp (we can only use it if we do not
 1727                  * release the bqlock).
 1728                  */
 1729                 nbp = TAILQ_NEXT(bp, b_freelist);
 1730 
 1731                 /*
 1732                  * If we are defragging then we need a buffer with 
 1733                  * some kva to reclaim.
 1734                  */
 1735                 if (kva && bp->b_kvasize == 0)
 1736                         continue;
 1737 
 1738                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 1739                         continue;
 1740 
 1741                 /*
 1742                  * Implement a second chance algorithm for frequently
 1743                  * accessed buffers.
 1744                  */
 1745                 if ((bp->b_flags & B_REUSE) != 0) {
 1746                         TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
 1747                         TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
 1748                         bp->b_flags &= ~B_REUSE;
 1749                         BUF_UNLOCK(bp);
 1750                         continue;
 1751                 }
 1752 
 1753                 /*
 1754                  * Skip buffers with background writes in progress.
 1755                  */
 1756                 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
 1757                         BUF_UNLOCK(bp);
 1758                         continue;
 1759                 }
 1760 
 1761                 KASSERT(bp->b_qindex == QUEUE_CLEAN,
 1762                     ("buf_recycle: inconsistent queue %d bp %p",
 1763                     bp->b_qindex, bp));
 1764                 KASSERT(bp->b_domain == BD_DOMAIN(bd),
 1765                     ("getnewbuf: queue domain %d doesn't match request %d",
 1766                     bp->b_domain, (int)BD_DOMAIN(bd)));
 1767                 /*
 1768                  * NOTE:  nbp is now entirely invalid.  We can only restart
 1769                  * the scan from this point on.
 1770                  */
 1771                 bq_remove(bq, bp);
 1772                 BQ_UNLOCK(bq);
 1773 
 1774                 /*
 1775                  * Requeue the background write buffer with error and
 1776                  * restart the scan.
 1777                  */
 1778                 if ((bp->b_vflags & BV_BKGRDERR) != 0) {
 1779                         bqrelse(bp);
 1780                         BQ_LOCK(bq);
 1781                         nbp = TAILQ_FIRST(&bq->bq_queue);
 1782                         continue;
 1783                 }
 1784                 bp->b_flags |= B_INVAL;
 1785                 brelse(bp);
 1786                 return (0);
 1787         }
 1788         bd->bd_wanted = 1;
 1789         BQ_UNLOCK(bq);
 1790 
 1791         return (ENOBUFS);
 1792 }
 1793 
 1794 /*
 1795  *      bremfree:
 1796  *
 1797  *      Mark the buffer for removal from the appropriate free list.
 1798  *      
 1799  */
 1800 void
 1801 bremfree(struct buf *bp)
 1802 {
 1803 
 1804         CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 1805         KASSERT((bp->b_flags & B_REMFREE) == 0,
 1806             ("bremfree: buffer %p already marked for delayed removal.", bp));
 1807         KASSERT(bp->b_qindex != QUEUE_NONE,
 1808             ("bremfree: buffer %p not on a queue.", bp));
 1809         BUF_ASSERT_XLOCKED(bp);
 1810 
 1811         bp->b_flags |= B_REMFREE;
 1812 }
 1813 
 1814 /*
 1815  *      bremfreef:
 1816  *
 1817  *      Force an immediate removal from a free list.  Used only in nfs when
 1818  *      it abuses the b_freelist pointer.
 1819  */
 1820 void
 1821 bremfreef(struct buf *bp)
 1822 {
 1823         struct bufqueue *bq;
 1824 
 1825         bq = bufqueue_acquire(bp);
 1826         bq_remove(bq, bp);
 1827         BQ_UNLOCK(bq);
 1828 }
 1829 
 1830 static void
 1831 bq_init(struct bufqueue *bq, int qindex, int subqueue, const char *lockname)
 1832 {
 1833 
 1834         mtx_init(&bq->bq_lock, lockname, NULL, MTX_DEF);
 1835         TAILQ_INIT(&bq->bq_queue);
 1836         bq->bq_len = 0;
 1837         bq->bq_index = qindex;
 1838         bq->bq_subqueue = subqueue;
 1839 }
 1840 
 1841 static void
 1842 bd_init(struct bufdomain *bd)
 1843 {
 1844         int i;
 1845 
 1846         bd->bd_cleanq = &bd->bd_subq[mp_maxid + 1];
 1847         bq_init(bd->bd_cleanq, QUEUE_CLEAN, mp_maxid + 1, "bufq clean lock");
 1848         bq_init(&bd->bd_dirtyq, QUEUE_DIRTY, -1, "bufq dirty lock");
 1849         for (i = 0; i <= mp_maxid; i++)
 1850                 bq_init(&bd->bd_subq[i], QUEUE_CLEAN, i,
 1851                     "bufq clean subqueue lock");
 1852         mtx_init(&bd->bd_run_lock, "bufspace daemon run lock", NULL, MTX_DEF);
 1853 }
 1854 
 1855 /*
 1856  *      bq_remove:
 1857  *
 1858  *      Removes a buffer from the free list, must be called with the
 1859  *      correct qlock held.
 1860  */
 1861 static void
 1862 bq_remove(struct bufqueue *bq, struct buf *bp)
 1863 {
 1864 
 1865         CTR3(KTR_BUF, "bq_remove(%p) vp %p flags %X",
 1866             bp, bp->b_vp, bp->b_flags);
 1867         KASSERT(bp->b_qindex != QUEUE_NONE,
 1868             ("bq_remove: buffer %p not on a queue.", bp));
 1869         KASSERT(bufqueue(bp) == bq,
 1870             ("bq_remove: Remove buffer %p from wrong queue.", bp));
 1871 
 1872         BQ_ASSERT_LOCKED(bq);
 1873         if (bp->b_qindex != QUEUE_EMPTY) {
 1874                 BUF_ASSERT_XLOCKED(bp);
 1875         }
 1876         KASSERT(bq->bq_len >= 1,
 1877             ("queue %d underflow", bp->b_qindex));
 1878         TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
 1879         bq->bq_len--;
 1880         bp->b_qindex = QUEUE_NONE;
 1881         bp->b_flags &= ~(B_REMFREE | B_REUSE);
 1882 }
 1883 
 1884 static void
 1885 bd_flush(struct bufdomain *bd, struct bufqueue *bq)
 1886 {
 1887         struct buf *bp;
 1888 
 1889         BQ_ASSERT_LOCKED(bq);
 1890         if (bq != bd->bd_cleanq) {
 1891                 BD_LOCK(bd);
 1892                 while ((bp = TAILQ_FIRST(&bq->bq_queue)) != NULL) {
 1893                         TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
 1894                         TAILQ_INSERT_TAIL(&bd->bd_cleanq->bq_queue, bp,
 1895                             b_freelist);
 1896                         bp->b_subqueue = bd->bd_cleanq->bq_subqueue;
 1897                 }
 1898                 bd->bd_cleanq->bq_len += bq->bq_len;
 1899                 bq->bq_len = 0;
 1900         }
 1901         if (bd->bd_wanted) {
 1902                 bd->bd_wanted = 0;
 1903                 wakeup(&bd->bd_wanted);
 1904         }
 1905         if (bq != bd->bd_cleanq)
 1906                 BD_UNLOCK(bd);
 1907 }
 1908 
 1909 static int
 1910 bd_flushall(struct bufdomain *bd)
 1911 {
 1912         struct bufqueue *bq;
 1913         int flushed;
 1914         int i;
 1915 
 1916         if (bd->bd_lim == 0)
 1917                 return (0);
 1918         flushed = 0;
 1919         for (i = 0; i <= mp_maxid; i++) {
 1920                 bq = &bd->bd_subq[i];
 1921                 if (bq->bq_len == 0)
 1922                         continue;
 1923                 BQ_LOCK(bq);
 1924                 bd_flush(bd, bq);
 1925                 BQ_UNLOCK(bq);
 1926                 flushed++;
 1927         }
 1928 
 1929         return (flushed);
 1930 }
 1931 
 1932 static void
 1933 bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock)
 1934 {
 1935         struct bufdomain *bd;
 1936 
 1937         if (bp->b_qindex != QUEUE_NONE)
 1938                 panic("bq_insert: free buffer %p onto another queue?", bp);
 1939 
 1940         bd = bufdomain(bp);
 1941         if (bp->b_flags & B_AGE) {
 1942                 /* Place this buf directly on the real queue. */
 1943                 if (bq->bq_index == QUEUE_CLEAN)
 1944                         bq = bd->bd_cleanq;
 1945                 BQ_LOCK(bq);
 1946                 TAILQ_INSERT_HEAD(&bq->bq_queue, bp, b_freelist);
 1947         } else {
 1948                 BQ_LOCK(bq);
 1949                 TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
 1950         }
 1951         bp->b_flags &= ~(B_AGE | B_REUSE);
 1952         bq->bq_len++;
 1953         bp->b_qindex = bq->bq_index;
 1954         bp->b_subqueue = bq->bq_subqueue;
 1955 
 1956         /*
 1957          * Unlock before we notify so that we don't wakeup a waiter that
 1958          * fails a trylock on the buf and sleeps again.
 1959          */
 1960         if (unlock)
 1961                 BUF_UNLOCK(bp);
 1962 
 1963         if (bp->b_qindex == QUEUE_CLEAN) {
 1964                 /*
 1965                  * Flush the per-cpu queue and notify any waiters.
 1966                  */
 1967                 if (bd->bd_wanted || (bq != bd->bd_cleanq &&
 1968                     bq->bq_len >= bd->bd_lim))
 1969                         bd_flush(bd, bq);
 1970         }
 1971         BQ_UNLOCK(bq);
 1972 }
 1973 
 1974 /*
 1975  *      bufkva_free:
 1976  *
 1977  *      Free the kva allocation for a buffer.
 1978  *
 1979  */
 1980 static void
 1981 bufkva_free(struct buf *bp)
 1982 {
 1983 
 1984 #ifdef INVARIANTS
 1985         if (bp->b_kvasize == 0) {
 1986                 KASSERT(bp->b_kvabase == unmapped_buf &&
 1987                     bp->b_data == unmapped_buf,
 1988                     ("Leaked KVA space on %p", bp));
 1989         } else if (buf_mapped(bp))
 1990                 BUF_CHECK_MAPPED(bp);
 1991         else
 1992                 BUF_CHECK_UNMAPPED(bp);
 1993 #endif
 1994         if (bp->b_kvasize == 0)
 1995                 return;
 1996 
 1997         vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
 1998         counter_u64_add(bufkvaspace, -bp->b_kvasize);
 1999         counter_u64_add(buffreekvacnt, 1);
 2000         bp->b_data = bp->b_kvabase = unmapped_buf;
 2001         bp->b_kvasize = 0;
 2002 }
 2003 
 2004 /*
 2005  *      bufkva_alloc:
 2006  *
 2007  *      Allocate the buffer KVA and set b_kvasize and b_kvabase.
 2008  */
 2009 static int
 2010 bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
 2011 {
 2012         vm_offset_t addr;
 2013         int error;
 2014 
 2015         KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
 2016             ("Invalid gbflags 0x%x in %s", gbflags, __func__));
 2017 
 2018         bufkva_free(bp);
 2019 
 2020         addr = 0;
 2021         error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
 2022         if (error != 0) {
 2023                 /*
 2024                  * Buffer map is too fragmented.  Request the caller
 2025                  * to defragment the map.
 2026                  */
 2027                 return (error);
 2028         }
 2029         bp->b_kvabase = (caddr_t)addr;
 2030         bp->b_kvasize = maxsize;
 2031         counter_u64_add(bufkvaspace, bp->b_kvasize);
 2032         if ((gbflags & GB_UNMAPPED) != 0) {
 2033                 bp->b_data = unmapped_buf;
 2034                 BUF_CHECK_UNMAPPED(bp);
 2035         } else {
 2036                 bp->b_data = bp->b_kvabase;
 2037                 BUF_CHECK_MAPPED(bp);
 2038         }
 2039         return (0);
 2040 }
 2041 
 2042 /*
 2043  *      bufkva_reclaim:
 2044  *
 2045  *      Reclaim buffer kva by freeing buffers holding kva.  This is a vmem
 2046  *      callback that fires to avoid returning failure.
 2047  */
 2048 static void
 2049 bufkva_reclaim(vmem_t *vmem, int flags)
 2050 {
 2051         bool done;
 2052         int q;
 2053         int i;
 2054 
 2055         done = false;
 2056         for (i = 0; i < 5; i++) {
 2057                 for (q = 0; q < buf_domains; q++)
 2058                         if (buf_recycle(&bdomain[q], true) != 0)
 2059                                 done = true;
 2060                 if (done)
 2061                         break;
 2062         }
 2063         return;
 2064 }
 2065 
 2066 /*
 2067  * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
 2068  * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
 2069  * the buffer is valid and we do not have to do anything.
 2070  */
 2071 static void
 2072 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, int cnt,
 2073     struct ucred * cred, int flags, void (*ckhashfunc)(struct buf *))
 2074 {
 2075         struct buf *rabp;
 2076         int i;
 2077 
 2078         for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
 2079                 if (inmem(vp, *rablkno))
 2080                         continue;
 2081                 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
 2082                 if ((rabp->b_flags & B_CACHE) != 0) {
 2083                         brelse(rabp);
 2084                         continue;
 2085                 }
 2086                 if (!TD_IS_IDLETHREAD(curthread)) {
 2087 #ifdef RACCT
 2088                         if (racct_enable) {
 2089                                 PROC_LOCK(curproc);
 2090                                 racct_add_buf(curproc, rabp, 0);
 2091                                 PROC_UNLOCK(curproc);
 2092                         }
 2093 #endif /* RACCT */
 2094                         curthread->td_ru.ru_inblock++;
 2095                 }
 2096                 rabp->b_flags |= B_ASYNC;
 2097                 rabp->b_flags &= ~B_INVAL;
 2098                 if ((flags & GB_CKHASH) != 0) {
 2099                         rabp->b_flags |= B_CKHASH;
 2100                         rabp->b_ckhashcalc = ckhashfunc;
 2101                 }
 2102                 rabp->b_ioflags &= ~BIO_ERROR;
 2103                 rabp->b_iocmd = BIO_READ;
 2104                 if (rabp->b_rcred == NOCRED && cred != NOCRED)
 2105                         rabp->b_rcred = crhold(cred);
 2106                 vfs_busy_pages(rabp, 0);
 2107                 BUF_KERNPROC(rabp);
 2108                 rabp->b_iooffset = dbtob(rabp->b_blkno);
 2109                 bstrategy(rabp);
 2110         }
 2111 }
 2112 
 2113 /*
 2114  * Entry point for bread() and breadn() via #defines in sys/buf.h.
 2115  *
 2116  * Get a buffer with the specified data.  Look in the cache first.  We
 2117  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
 2118  * is set, the buffer is valid and we do not have to do anything, see
 2119  * getblk(). Also starts asynchronous I/O on read-ahead blocks.
 2120  *
 2121  * Always return a NULL buffer pointer (in bpp) when returning an error.
 2122  */
 2123 int
 2124 breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno,
 2125     int *rabsize, int cnt, struct ucred *cred, int flags,
 2126     void (*ckhashfunc)(struct buf *), struct buf **bpp)
 2127 {
 2128         struct buf *bp;
 2129         struct thread *td;
 2130         int error, readwait, rv;
 2131 
 2132         CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
 2133         td = curthread;
 2134         /*
 2135          * Can only return NULL if GB_LOCK_NOWAIT or GB_SPARSE flags
 2136          * are specified.
 2137          */
 2138         error = getblkx(vp, blkno, size, 0, 0, flags, &bp);
 2139         if (error != 0) {
 2140                 *bpp = NULL;
 2141                 return (error);
 2142         }
 2143         flags &= ~GB_NOSPARSE;
 2144         *bpp = bp;
 2145 
 2146         /*
 2147          * If not found in cache, do some I/O
 2148          */
 2149         readwait = 0;
 2150         if ((bp->b_flags & B_CACHE) == 0) {
 2151                 if (!TD_IS_IDLETHREAD(td)) {
 2152 #ifdef RACCT
 2153                         if (racct_enable) {
 2154                                 PROC_LOCK(td->td_proc);
 2155                                 racct_add_buf(td->td_proc, bp, 0);
 2156                                 PROC_UNLOCK(td->td_proc);
 2157                         }
 2158 #endif /* RACCT */
 2159                         td->td_ru.ru_inblock++;
 2160                 }
 2161                 bp->b_iocmd = BIO_READ;
 2162                 bp->b_flags &= ~B_INVAL;
 2163                 if ((flags & GB_CKHASH) != 0) {
 2164                         bp->b_flags |= B_CKHASH;
 2165                         bp->b_ckhashcalc = ckhashfunc;
 2166                 }
 2167                 bp->b_ioflags &= ~BIO_ERROR;
 2168                 if (bp->b_rcred == NOCRED && cred != NOCRED)
 2169                         bp->b_rcred = crhold(cred);
 2170                 vfs_busy_pages(bp, 0);
 2171                 bp->b_iooffset = dbtob(bp->b_blkno);
 2172                 bstrategy(bp);
 2173                 ++readwait;
 2174         }
 2175 
 2176         /*
 2177          * Attempt to initiate asynchronous I/O on read-ahead blocks.
 2178          */
 2179         breada(vp, rablkno, rabsize, cnt, cred, flags, ckhashfunc);
 2180 
 2181         rv = 0;
 2182         if (readwait) {
 2183                 rv = bufwait(bp);
 2184                 if (rv != 0) {
 2185                         brelse(bp);
 2186                         *bpp = NULL;
 2187                 }
 2188         }
 2189         return (rv);
 2190 }
 2191 
 2192 /*
 2193  * Write, release buffer on completion.  (Done by iodone
 2194  * if async).  Do not bother writing anything if the buffer
 2195  * is invalid.
 2196  *
 2197  * Note that we set B_CACHE here, indicating that buffer is
 2198  * fully valid and thus cacheable.  This is true even of NFS
 2199  * now so we set it generally.  This could be set either here 
 2200  * or in biodone() since the I/O is synchronous.  We put it
 2201  * here.
 2202  */
 2203 int
 2204 bufwrite(struct buf *bp)
 2205 {
 2206         int oldflags;
 2207         struct vnode *vp;
 2208         long space;
 2209         int vp_md;
 2210 
 2211         CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 2212         if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
 2213                 bp->b_flags |= B_INVAL | B_RELBUF;
 2214                 bp->b_flags &= ~B_CACHE;
 2215                 brelse(bp);
 2216                 return (ENXIO);
 2217         }
 2218         if (bp->b_flags & B_INVAL) {
 2219                 brelse(bp);
 2220                 return (0);
 2221         }
 2222 
 2223         if (bp->b_flags & B_BARRIER)
 2224                 atomic_add_long(&barrierwrites, 1);
 2225 
 2226         oldflags = bp->b_flags;
 2227 
 2228         BUF_ASSERT_HELD(bp);
 2229 
 2230         KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
 2231             ("FFS background buffer should not get here %p", bp));
 2232 
 2233         vp = bp->b_vp;
 2234         if (vp)
 2235                 vp_md = vp->v_vflag & VV_MD;
 2236         else
 2237                 vp_md = 0;
 2238 
 2239         /*
 2240          * Mark the buffer clean.  Increment the bufobj write count
 2241          * before bundirty() call, to prevent other thread from seeing
 2242          * empty dirty list and zero counter for writes in progress,
 2243          * falsely indicating that the bufobj is clean.
 2244          */
 2245         bufobj_wref(bp->b_bufobj);
 2246         bundirty(bp);
 2247 
 2248         bp->b_flags &= ~B_DONE;
 2249         bp->b_ioflags &= ~BIO_ERROR;
 2250         bp->b_flags |= B_CACHE;
 2251         bp->b_iocmd = BIO_WRITE;
 2252 
 2253         vfs_busy_pages(bp, 1);
 2254 
 2255         /*
 2256          * Normal bwrites pipeline writes
 2257          */
 2258         bp->b_runningbufspace = bp->b_bufsize;
 2259         space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
 2260 
 2261         if (!TD_IS_IDLETHREAD(curthread)) {
 2262 #ifdef RACCT
 2263                 if (racct_enable) {
 2264                         PROC_LOCK(curproc);
 2265                         racct_add_buf(curproc, bp, 1);
 2266                         PROC_UNLOCK(curproc);
 2267                 }
 2268 #endif /* RACCT */
 2269                 curthread->td_ru.ru_oublock++;
 2270         }
 2271         if (oldflags & B_ASYNC)
 2272                 BUF_KERNPROC(bp);
 2273         bp->b_iooffset = dbtob(bp->b_blkno);
 2274         buf_track(bp, __func__);
 2275         bstrategy(bp);
 2276 
 2277         if ((oldflags & B_ASYNC) == 0) {
 2278                 int rtval = bufwait(bp);
 2279                 brelse(bp);
 2280                 return (rtval);
 2281         } else if (space > hirunningspace) {
 2282                 /*
 2283                  * don't allow the async write to saturate the I/O
 2284                  * system.  We will not deadlock here because
 2285                  * we are blocking waiting for I/O that is already in-progress
 2286                  * to complete. We do not block here if it is the update
 2287                  * or syncer daemon trying to clean up as that can lead
 2288                  * to deadlock.
 2289                  */
 2290                 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
 2291                         waitrunningbufspace();
 2292         }
 2293 
 2294         return (0);
 2295 }
 2296 
 2297 void
 2298 bufbdflush(struct bufobj *bo, struct buf *bp)
 2299 {
 2300         struct buf *nbp;
 2301 
 2302         if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
 2303                 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
 2304                 altbufferflushes++;
 2305         } else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
 2306                 BO_LOCK(bo);
 2307                 /*
 2308                  * Try to find a buffer to flush.
 2309                  */
 2310                 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
 2311                         if ((nbp->b_vflags & BV_BKGRDINPROG) ||
 2312                             BUF_LOCK(nbp,
 2313                                      LK_EXCLUSIVE | LK_NOWAIT, NULL))
 2314                                 continue;
 2315                         if (bp == nbp)
 2316                                 panic("bdwrite: found ourselves");
 2317                         BO_UNLOCK(bo);
 2318                         /* Don't countdeps with the bo lock held. */
 2319                         if (buf_countdeps(nbp, 0)) {
 2320                                 BO_LOCK(bo);
 2321                                 BUF_UNLOCK(nbp);
 2322                                 continue;
 2323                         }
 2324                         if (nbp->b_flags & B_CLUSTEROK) {
 2325                                 vfs_bio_awrite(nbp);
 2326                         } else {
 2327                                 bremfree(nbp);
 2328                                 bawrite(nbp);
 2329                         }
 2330                         dirtybufferflushes++;
 2331                         break;
 2332                 }
 2333                 if (nbp == NULL)
 2334                         BO_UNLOCK(bo);
 2335         }
 2336 }
 2337 
 2338 /*
 2339  * Delayed write. (Buffer is marked dirty).  Do not bother writing
 2340  * anything if the buffer is marked invalid.
 2341  *
 2342  * Note that since the buffer must be completely valid, we can safely
 2343  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
 2344  * biodone() in order to prevent getblk from writing the buffer
 2345  * out synchronously.
 2346  */
 2347 void
 2348 bdwrite(struct buf *bp)
 2349 {
 2350         struct thread *td = curthread;
 2351         struct vnode *vp;
 2352         struct bufobj *bo;
 2353 
 2354         CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 2355         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
 2356         KASSERT((bp->b_flags & B_BARRIER) == 0,
 2357             ("Barrier request in delayed write %p", bp));
 2358         BUF_ASSERT_HELD(bp);
 2359 
 2360         if (bp->b_flags & B_INVAL) {
 2361                 brelse(bp);
 2362                 return;
 2363         }
 2364 
 2365         /*
 2366          * If we have too many dirty buffers, don't create any more.
 2367          * If we are wildly over our limit, then force a complete
 2368          * cleanup. Otherwise, just keep the situation from getting
 2369          * out of control. Note that we have to avoid a recursive
 2370          * disaster and not try to clean up after our own cleanup!
 2371          */
 2372         vp = bp->b_vp;
 2373         bo = bp->b_bufobj;
 2374         if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
 2375                 td->td_pflags |= TDP_INBDFLUSH;
 2376                 BO_BDFLUSH(bo, bp);
 2377                 td->td_pflags &= ~TDP_INBDFLUSH;
 2378         } else
 2379                 recursiveflushes++;
 2380 
 2381         bdirty(bp);
 2382         /*
 2383          * Set B_CACHE, indicating that the buffer is fully valid.  This is
 2384          * true even of NFS now.
 2385          */
 2386         bp->b_flags |= B_CACHE;
 2387 
 2388         /*
 2389          * This bmap keeps the system from needing to do the bmap later,
 2390          * perhaps when the system is attempting to do a sync.  Since it
 2391          * is likely that the indirect block -- or whatever other datastructure
 2392          * that the filesystem needs is still in memory now, it is a good
 2393          * thing to do this.  Note also, that if the pageout daemon is
 2394          * requesting a sync -- there might not be enough memory to do
 2395          * the bmap then...  So, this is important to do.
 2396          */
 2397         if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
 2398                 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
 2399         }
 2400 
 2401         buf_track(bp, __func__);
 2402 
 2403         /*
 2404          * Set the *dirty* buffer range based upon the VM system dirty
 2405          * pages.
 2406          *
 2407          * Mark the buffer pages as clean.  We need to do this here to
 2408          * satisfy the vnode_pager and the pageout daemon, so that it
 2409          * thinks that the pages have been "cleaned".  Note that since
 2410          * the pages are in a delayed write buffer -- the VFS layer
 2411          * "will" see that the pages get written out on the next sync,
 2412          * or perhaps the cluster will be completed.
 2413          */
 2414         vfs_clean_pages_dirty_buf(bp);
 2415         bqrelse(bp);
 2416 
 2417         /*
 2418          * note: we cannot initiate I/O from a bdwrite even if we wanted to,
 2419          * due to the softdep code.
 2420          */
 2421 }
 2422 
 2423 /*
 2424  *      bdirty:
 2425  *
 2426  *      Turn buffer into delayed write request.  We must clear BIO_READ and
 2427  *      B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to 
 2428  *      itself to properly update it in the dirty/clean lists.  We mark it
 2429  *      B_DONE to ensure that any asynchronization of the buffer properly
 2430  *      clears B_DONE ( else a panic will occur later ).  
 2431  *
 2432  *      bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
 2433  *      might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
 2434  *      should only be called if the buffer is known-good.
 2435  *
 2436  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 2437  *      count.
 2438  *
 2439  *      The buffer must be on QUEUE_NONE.
 2440  */
 2441 void
 2442 bdirty(struct buf *bp)
 2443 {
 2444 
 2445         CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
 2446             bp, bp->b_vp, bp->b_flags);
 2447         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
 2448         KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
 2449             ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
 2450         BUF_ASSERT_HELD(bp);
 2451         bp->b_flags &= ~(B_RELBUF);
 2452         bp->b_iocmd = BIO_WRITE;
 2453 
 2454         if ((bp->b_flags & B_DELWRI) == 0) {
 2455                 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
 2456                 reassignbuf(bp);
 2457                 bdirtyadd(bp);
 2458         }
 2459 }
 2460 
 2461 /*
 2462  *      bundirty:
 2463  *
 2464  *      Clear B_DELWRI for buffer.
 2465  *
 2466  *      Since the buffer is not on a queue, we do not update the numfreebuffers
 2467  *      count.
 2468  *      
 2469  *      The buffer must be on QUEUE_NONE.
 2470  */
 2471 
 2472 void
 2473 bundirty(struct buf *bp)
 2474 {
 2475 
 2476         CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 2477         KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
 2478         KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
 2479             ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
 2480         BUF_ASSERT_HELD(bp);
 2481 
 2482         if (bp->b_flags & B_DELWRI) {
 2483                 bp->b_flags &= ~B_DELWRI;
 2484                 reassignbuf(bp);
 2485                 bdirtysub(bp);
 2486         }
 2487         /*
 2488          * Since it is now being written, we can clear its deferred write flag.
 2489          */
 2490         bp->b_flags &= ~B_DEFERRED;
 2491 }
 2492 
 2493 /*
 2494  *      bawrite:
 2495  *
 2496  *      Asynchronous write.  Start output on a buffer, but do not wait for
 2497  *      it to complete.  The buffer is released when the output completes.
 2498  *
 2499  *      bwrite() ( or the VOP routine anyway ) is responsible for handling 
 2500  *      B_INVAL buffers.  Not us.
 2501  */
 2502 void
 2503 bawrite(struct buf *bp)
 2504 {
 2505 
 2506         bp->b_flags |= B_ASYNC;
 2507         (void) bwrite(bp);
 2508 }
 2509 
 2510 /*
 2511  *      babarrierwrite:
 2512  *
 2513  *      Asynchronous barrier write.  Start output on a buffer, but do not
 2514  *      wait for it to complete.  Place a write barrier after this write so
 2515  *      that this buffer and all buffers written before it are committed to
 2516  *      the disk before any buffers written after this write are committed
 2517  *      to the disk.  The buffer is released when the output completes.
 2518  */
 2519 void
 2520 babarrierwrite(struct buf *bp)
 2521 {
 2522 
 2523         bp->b_flags |= B_ASYNC | B_BARRIER;
 2524         (void) bwrite(bp);
 2525 }
 2526 
 2527 /*
 2528  *      bbarrierwrite:
 2529  *
 2530  *      Synchronous barrier write.  Start output on a buffer and wait for
 2531  *      it to complete.  Place a write barrier after this write so that
 2532  *      this buffer and all buffers written before it are committed to 
 2533  *      the disk before any buffers written after this write are committed
 2534  *      to the disk.  The buffer is released when the output completes.
 2535  */
 2536 int
 2537 bbarrierwrite(struct buf *bp)
 2538 {
 2539 
 2540         bp->b_flags |= B_BARRIER;
 2541         return (bwrite(bp));
 2542 }
 2543 
 2544 /*
 2545  *      bwillwrite:
 2546  *
 2547  *      Called prior to the locking of any vnodes when we are expecting to
 2548  *      write.  We do not want to starve the buffer cache with too many
 2549  *      dirty buffers so we block here.  By blocking prior to the locking
 2550  *      of any vnodes we attempt to avoid the situation where a locked vnode
 2551  *      prevents the various system daemons from flushing related buffers.
 2552  */
 2553 void
 2554 bwillwrite(void)
 2555 {
 2556 
 2557         if (buf_dirty_count_severe()) {
 2558                 mtx_lock(&bdirtylock);
 2559                 while (buf_dirty_count_severe()) {
 2560                         bdirtywait = 1;
 2561                         msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
 2562                             "flswai", 0);
 2563                 }
 2564                 mtx_unlock(&bdirtylock);
 2565         }
 2566 }
 2567 
 2568 /*
 2569  * Return true if we have too many dirty buffers.
 2570  */
 2571 int
 2572 buf_dirty_count_severe(void)
 2573 {
 2574 
 2575         return (!BIT_EMPTY(BUF_DOMAINS, &bdhidirty));
 2576 }
 2577 
 2578 /*
 2579  *      brelse:
 2580  *
 2581  *      Release a busy buffer and, if requested, free its resources.  The
 2582  *      buffer will be stashed in the appropriate bufqueue[] allowing it
 2583  *      to be accessed later as a cache entity or reused for other purposes.
 2584  */
 2585 void
 2586 brelse(struct buf *bp)
 2587 {
 2588         struct mount *v_mnt;
 2589         int qindex;
 2590 
 2591         /*
 2592          * Many functions erroneously call brelse with a NULL bp under rare
 2593          * error conditions. Simply return when called with a NULL bp.
 2594          */
 2595         if (bp == NULL)
 2596                 return;
 2597         CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
 2598             bp, bp->b_vp, bp->b_flags);
 2599         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
 2600             ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 2601         KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
 2602             ("brelse: non-VMIO buffer marked NOREUSE"));
 2603 
 2604         if (BUF_LOCKRECURSED(bp)) {
 2605                 /*
 2606                  * Do not process, in particular, do not handle the
 2607                  * B_INVAL/B_RELBUF and do not release to free list.
 2608                  */
 2609                 BUF_UNLOCK(bp);
 2610                 return;
 2611         }
 2612 
 2613         if (bp->b_flags & B_MANAGED) {
 2614                 bqrelse(bp);
 2615                 return;
 2616         }
 2617 
 2618         if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
 2619                 BO_LOCK(bp->b_bufobj);
 2620                 bp->b_vflags &= ~BV_BKGRDERR;
 2621                 BO_UNLOCK(bp->b_bufobj);
 2622                 bdirty(bp);
 2623         }
 2624         if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
 2625             (bp->b_error != ENXIO || !LIST_EMPTY(&bp->b_dep)) &&
 2626             !(bp->b_flags & B_INVAL)) {
 2627                 /*
 2628                  * Failed write, redirty.  All errors except ENXIO (which
 2629                  * means the device is gone) are treated as being
 2630                  * transient.
 2631                  *
 2632                  * XXX Treating EIO as transient is not correct; the
 2633                  * contract with the local storage device drivers is that
 2634                  * they will only return EIO once the I/O is no longer
 2635                  * retriable.  Network I/O also respects this through the
 2636                  * guarantees of TCP and/or the internal retries of NFS.
 2637                  * ENOMEM might be transient, but we also have no way of
 2638                  * knowing when its ok to retry/reschedule.  In general,
 2639                  * this entire case should be made obsolete through better
 2640                  * error handling/recovery and resource scheduling.
 2641                  *
 2642                  * Do this also for buffers that failed with ENXIO, but have
 2643                  * non-empty dependencies - the soft updates code might need
 2644                  * to access the buffer to untangle them.
 2645                  *
 2646                  * Must clear BIO_ERROR to prevent pages from being scrapped.
 2647                  */
 2648                 bp->b_ioflags &= ~BIO_ERROR;
 2649                 bdirty(bp);
 2650         } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
 2651             (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
 2652                 /*
 2653                  * Either a failed read I/O, or we were asked to free or not
 2654                  * cache the buffer, or we failed to write to a device that's
 2655                  * no longer present.
 2656                  */
 2657                 bp->b_flags |= B_INVAL;
 2658                 if (!LIST_EMPTY(&bp->b_dep))
 2659                         buf_deallocate(bp);
 2660                 if (bp->b_flags & B_DELWRI)
 2661                         bdirtysub(bp);
 2662                 bp->b_flags &= ~(B_DELWRI | B_CACHE);
 2663                 if ((bp->b_flags & B_VMIO) == 0) {
 2664                         allocbuf(bp, 0);
 2665                         if (bp->b_vp)
 2666                                 brelvp(bp);
 2667                 }
 2668         }
 2669 
 2670         /*
 2671          * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_truncate() 
 2672          * is called with B_DELWRI set, the underlying pages may wind up
 2673          * getting freed causing a previous write (bdwrite()) to get 'lost'
 2674          * because pages associated with a B_DELWRI bp are marked clean.
 2675          * 
 2676          * We still allow the B_INVAL case to call vfs_vmio_truncate(), even
 2677          * if B_DELWRI is set.
 2678          */
 2679         if (bp->b_flags & B_DELWRI)
 2680                 bp->b_flags &= ~B_RELBUF;
 2681 
 2682         /*
 2683          * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
 2684          * constituted, not even NFS buffers now.  Two flags effect this.  If
 2685          * B_INVAL, the struct buf is invalidated but the VM object is kept
 2686          * around ( i.e. so it is trivial to reconstitute the buffer later ).
 2687          *
 2688          * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
 2689          * invalidated.  BIO_ERROR cannot be set for a failed write unless the
 2690          * buffer is also B_INVAL because it hits the re-dirtying code above.
 2691          *
 2692          * Normally we can do this whether a buffer is B_DELWRI or not.  If
 2693          * the buffer is an NFS buffer, it is tracking piecemeal writes or
 2694          * the commit state and we cannot afford to lose the buffer. If the
 2695          * buffer has a background write in progress, we need to keep it
 2696          * around to prevent it from being reconstituted and starting a second
 2697          * background write.
 2698          */
 2699 
 2700         v_mnt = bp->b_vp != NULL ? bp->b_vp->v_mount : NULL;
 2701 
 2702         if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
 2703             (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
 2704             (v_mnt == NULL || (v_mnt->mnt_vfc->vfc_flags & VFCF_NETWORK) == 0 ||
 2705             vn_isdisk(bp->b_vp, NULL) || (bp->b_flags & B_DELWRI) == 0)) {
 2706                 vfs_vmio_invalidate(bp);
 2707                 allocbuf(bp, 0);
 2708         }
 2709 
 2710         if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
 2711             (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
 2712                 allocbuf(bp, 0);
 2713                 bp->b_flags &= ~B_NOREUSE;
 2714                 if (bp->b_vp != NULL)
 2715                         brelvp(bp);
 2716         }
 2717                         
 2718         /*
 2719          * If the buffer has junk contents signal it and eventually
 2720          * clean up B_DELWRI and diassociate the vnode so that gbincore()
 2721          * doesn't find it.
 2722          */
 2723         if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
 2724             (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
 2725                 bp->b_flags |= B_INVAL;
 2726         if (bp->b_flags & B_INVAL) {
 2727                 if (bp->b_flags & B_DELWRI)
 2728                         bundirty(bp);
 2729                 if (bp->b_vp)
 2730                         brelvp(bp);
 2731         }
 2732 
 2733         buf_track(bp, __func__);
 2734 
 2735         /* buffers with no memory */
 2736         if (bp->b_bufsize == 0) {
 2737                 buf_free(bp);
 2738                 return;
 2739         }
 2740         /* buffers with junk contents */
 2741         if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
 2742             (bp->b_ioflags & BIO_ERROR)) {
 2743                 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
 2744                 if (bp->b_vflags & BV_BKGRDINPROG)
 2745                         panic("losing buffer 2");
 2746                 qindex = QUEUE_CLEAN;
 2747                 bp->b_flags |= B_AGE;
 2748         /* remaining buffers */
 2749         } else if (bp->b_flags & B_DELWRI)
 2750                 qindex = QUEUE_DIRTY;
 2751         else
 2752                 qindex = QUEUE_CLEAN;
 2753 
 2754         if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
 2755                 panic("brelse: not dirty");
 2756 
 2757         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_RELBUF | B_DIRECT);
 2758         /* binsfree unlocks bp. */
 2759         binsfree(bp, qindex);
 2760 }
 2761 
 2762 /*
 2763  * Release a buffer back to the appropriate queue but do not try to free
 2764  * it.  The buffer is expected to be used again soon.
 2765  *
 2766  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
 2767  * biodone() to requeue an async I/O on completion.  It is also used when
 2768  * known good buffers need to be requeued but we think we may need the data
 2769  * again soon.
 2770  *
 2771  * XXX we should be able to leave the B_RELBUF hint set on completion.
 2772  */
 2773 void
 2774 bqrelse(struct buf *bp)
 2775 {
 2776         int qindex;
 2777 
 2778         CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 2779         KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
 2780             ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
 2781 
 2782         qindex = QUEUE_NONE;
 2783         if (BUF_LOCKRECURSED(bp)) {
 2784                 /* do not release to free list */
 2785                 BUF_UNLOCK(bp);
 2786                 return;
 2787         }
 2788         bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
 2789 
 2790         if (bp->b_flags & B_MANAGED) {
 2791                 if (bp->b_flags & B_REMFREE)
 2792                         bremfreef(bp);
 2793                 goto out;
 2794         }
 2795 
 2796         /* buffers with stale but valid contents */
 2797         if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
 2798             BV_BKGRDERR)) == BV_BKGRDERR) {
 2799                 BO_LOCK(bp->b_bufobj);
 2800                 bp->b_vflags &= ~BV_BKGRDERR;
 2801                 BO_UNLOCK(bp->b_bufobj);
 2802                 qindex = QUEUE_DIRTY;
 2803         } else {
 2804                 if ((bp->b_flags & B_DELWRI) == 0 &&
 2805                     (bp->b_xflags & BX_VNDIRTY))
 2806                         panic("bqrelse: not dirty");
 2807                 if ((bp->b_flags & B_NOREUSE) != 0) {
 2808                         brelse(bp);
 2809                         return;
 2810                 }
 2811                 qindex = QUEUE_CLEAN;
 2812         }
 2813         buf_track(bp, __func__);
 2814         /* binsfree unlocks bp. */
 2815         binsfree(bp, qindex);
 2816         return;
 2817 
 2818 out:
 2819         buf_track(bp, __func__);
 2820         /* unlock */
 2821         BUF_UNLOCK(bp);
 2822 }
 2823 
 2824 /*
 2825  * Complete I/O to a VMIO backed page.  Validate the pages as appropriate,
 2826  * restore bogus pages.
 2827  */
 2828 static void
 2829 vfs_vmio_iodone(struct buf *bp)
 2830 {
 2831         vm_ooffset_t foff;
 2832         vm_page_t m;
 2833         vm_object_t obj;
 2834         struct vnode *vp __unused;
 2835         int i, iosize, resid;
 2836         bool bogus;
 2837 
 2838         obj = bp->b_bufobj->bo_object;
 2839         KASSERT(obj->paging_in_progress >= bp->b_npages,
 2840             ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
 2841             obj->paging_in_progress, bp->b_npages));
 2842 
 2843         vp = bp->b_vp;
 2844         KASSERT(vp->v_holdcnt > 0,
 2845             ("vfs_vmio_iodone: vnode %p has zero hold count", vp));
 2846         KASSERT(vp->v_object != NULL,
 2847             ("vfs_vmio_iodone: vnode %p has no vm_object", vp));
 2848 
 2849         foff = bp->b_offset;
 2850         KASSERT(bp->b_offset != NOOFFSET,
 2851             ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
 2852 
 2853         bogus = false;
 2854         iosize = bp->b_bcount - bp->b_resid;
 2855         VM_OBJECT_WLOCK(obj);
 2856         for (i = 0; i < bp->b_npages; i++) {
 2857                 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
 2858                 if (resid > iosize)
 2859                         resid = iosize;
 2860 
 2861                 /*
 2862                  * cleanup bogus pages, restoring the originals
 2863                  */
 2864                 m = bp->b_pages[i];
 2865                 if (m == bogus_page) {
 2866                         bogus = true;
 2867                         m = vm_page_lookup(obj, OFF_TO_IDX(foff));
 2868                         if (m == NULL)
 2869                                 panic("biodone: page disappeared!");
 2870                         bp->b_pages[i] = m;
 2871                 } else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
 2872                         /*
 2873                          * In the write case, the valid and clean bits are
 2874                          * already changed correctly ( see bdwrite() ), so we 
 2875                          * only need to do this here in the read case.
 2876                          */
 2877                         KASSERT((m->dirty & vm_page_bits(foff & PAGE_MASK,
 2878                             resid)) == 0, ("vfs_vmio_iodone: page %p "
 2879                             "has unexpected dirty bits", m));
 2880                         vfs_page_set_valid(bp, foff, m);
 2881                 }
 2882                 KASSERT(OFF_TO_IDX(foff) == m->pindex,
 2883                     ("vfs_vmio_iodone: foff(%jd)/pindex(%ju) mismatch",
 2884                     (intmax_t)foff, (uintmax_t)m->pindex));
 2885 
 2886                 vm_page_sunbusy(m);
 2887                 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 2888                 iosize -= resid;
 2889         }
 2890         vm_object_pip_wakeupn(obj, bp->b_npages);
 2891         VM_OBJECT_WUNLOCK(obj);
 2892         if (bogus && buf_mapped(bp)) {
 2893                 BUF_CHECK_MAPPED(bp);
 2894                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 2895                     bp->b_pages, bp->b_npages);
 2896         }
 2897 }
 2898 
 2899 /*
 2900  * Unwire a page held by a buf and either free it or update the page queues to
 2901  * reflect its recent use.
 2902  */
 2903 static void
 2904 vfs_vmio_unwire(struct buf *bp, vm_page_t m)
 2905 {
 2906         bool freed;
 2907 
 2908         vm_page_lock(m);
 2909         if (vm_page_unwire_noq(m)) {
 2910                 if ((bp->b_flags & B_DIRECT) != 0)
 2911                         freed = vm_page_try_to_free(m);
 2912                 else
 2913                         freed = false;
 2914                 if (!freed) {
 2915                         /*
 2916                          * Use a racy check of the valid bits to determine
 2917                          * whether we can accelerate reclamation of the page.
 2918                          * The valid bits will be stable unless the page is
 2919                          * being mapped or is referenced by multiple buffers,
 2920                          * and in those cases we expect races to be rare.  At
 2921                          * worst we will either accelerate reclamation of a
 2922                          * valid page and violate LRU, or unnecessarily defer
 2923                          * reclamation of an invalid page.
 2924                          *
 2925                          * The B_NOREUSE flag marks data that is not expected to
 2926                          * be reused, so accelerate reclamation in that case
 2927                          * too.  Otherwise, maintain LRU.
 2928                          */
 2929                         if (m->valid == 0 || (bp->b_flags & B_NOREUSE) != 0)
 2930                                 vm_page_deactivate_noreuse(m);
 2931                         else if (vm_page_active(m))
 2932                                 vm_page_reference(m);
 2933                         else
 2934                                 vm_page_deactivate(m);
 2935                 }
 2936         }
 2937         vm_page_unlock(m);
 2938 }
 2939 
 2940 /*
 2941  * Perform page invalidation when a buffer is released.  The fully invalid
 2942  * pages will be reclaimed later in vfs_vmio_truncate().
 2943  */
 2944 static void
 2945 vfs_vmio_invalidate(struct buf *bp)
 2946 {
 2947         vm_object_t obj;
 2948         vm_page_t m;
 2949         int i, resid, poffset, presid;
 2950 
 2951         if (buf_mapped(bp)) {
 2952                 BUF_CHECK_MAPPED(bp);
 2953                 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
 2954         } else
 2955                 BUF_CHECK_UNMAPPED(bp);
 2956         /*
 2957          * Get the base offset and length of the buffer.  Note that 
 2958          * in the VMIO case if the buffer block size is not
 2959          * page-aligned then b_data pointer may not be page-aligned.
 2960          * But our b_pages[] array *IS* page aligned.
 2961          *
 2962          * block sizes less then DEV_BSIZE (usually 512) are not 
 2963          * supported due to the page granularity bits (m->valid,
 2964          * m->dirty, etc...). 
 2965          *
 2966          * See man buf(9) for more information
 2967          */
 2968         obj = bp->b_bufobj->bo_object;
 2969         resid = bp->b_bufsize;
 2970         poffset = bp->b_offset & PAGE_MASK;
 2971         VM_OBJECT_WLOCK(obj);
 2972         for (i = 0; i < bp->b_npages; i++) {
 2973                 m = bp->b_pages[i];
 2974                 if (m == bogus_page)
 2975                         panic("vfs_vmio_invalidate: Unexpected bogus page.");
 2976                 bp->b_pages[i] = NULL;
 2977 
 2978                 presid = resid > (PAGE_SIZE - poffset) ?
 2979                     (PAGE_SIZE - poffset) : resid;
 2980                 KASSERT(presid >= 0, ("brelse: extra page"));
 2981                 while (vm_page_xbusied(m)) {
 2982                         vm_page_lock(m);
 2983                         VM_OBJECT_WUNLOCK(obj);
 2984                         vm_page_busy_sleep(m, "mbncsh", true);
 2985                         VM_OBJECT_WLOCK(obj);
 2986                 }
 2987                 if (pmap_page_wired_mappings(m) == 0)
 2988                         vm_page_set_invalid(m, poffset, presid);
 2989                 vfs_vmio_unwire(bp, m);
 2990                 resid -= presid;
 2991                 poffset = 0;
 2992         }
 2993         VM_OBJECT_WUNLOCK(obj);
 2994         bp->b_npages = 0;
 2995 }
 2996 
 2997 /*
 2998  * Page-granular truncation of an existing VMIO buffer.
 2999  */
 3000 static void
 3001 vfs_vmio_truncate(struct buf *bp, int desiredpages)
 3002 {
 3003         vm_object_t obj;
 3004         vm_page_t m;
 3005         int i;
 3006 
 3007         if (bp->b_npages == desiredpages)
 3008                 return;
 3009 
 3010         if (buf_mapped(bp)) {
 3011                 BUF_CHECK_MAPPED(bp);
 3012                 pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
 3013                     (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
 3014         } else
 3015                 BUF_CHECK_UNMAPPED(bp);
 3016 
 3017         /*
 3018          * The object lock is needed only if we will attempt to free pages.
 3019          */
 3020         obj = (bp->b_flags & B_DIRECT) != 0 ? bp->b_bufobj->bo_object : NULL;
 3021         if (obj != NULL)
 3022                 VM_OBJECT_WLOCK(obj);
 3023         for (i = desiredpages; i < bp->b_npages; i++) {
 3024                 m = bp->b_pages[i];
 3025                 KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
 3026                 bp->b_pages[i] = NULL;
 3027                 vfs_vmio_unwire(bp, m);
 3028         }
 3029         if (obj != NULL)
 3030                 VM_OBJECT_WUNLOCK(obj);
 3031         bp->b_npages = desiredpages;
 3032 }
 3033 
 3034 /*
 3035  * Byte granular extension of VMIO buffers.
 3036  */
 3037 static void
 3038 vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
 3039 {
 3040         /*
 3041          * We are growing the buffer, possibly in a 
 3042          * byte-granular fashion.
 3043          */
 3044         vm_object_t obj;
 3045         vm_offset_t toff;
 3046         vm_offset_t tinc;
 3047         vm_page_t m;
 3048 
 3049         /*
 3050          * Step 1, bring in the VM pages from the object, allocating
 3051          * them if necessary.  We must clear B_CACHE if these pages
 3052          * are not valid for the range covered by the buffer.
 3053          */
 3054         obj = bp->b_bufobj->bo_object;
 3055         VM_OBJECT_WLOCK(obj);
 3056         if (bp->b_npages < desiredpages) {
 3057                 /*
 3058                  * We must allocate system pages since blocking
 3059                  * here could interfere with paging I/O, no
 3060                  * matter which process we are.
 3061                  *
 3062                  * Only exclusive busy can be tested here.
 3063                  * Blocking on shared busy might lead to
 3064                  * deadlocks once allocbuf() is called after
 3065                  * pages are vfs_busy_pages().
 3066                  */
 3067                 (void)vm_page_grab_pages(obj,
 3068                     OFF_TO_IDX(bp->b_offset) + bp->b_npages,
 3069                     VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY |
 3070                     VM_ALLOC_NOBUSY | VM_ALLOC_WIRED,
 3071                     &bp->b_pages[bp->b_npages], desiredpages - bp->b_npages);
 3072                 bp->b_npages = desiredpages;
 3073         }
 3074 
 3075         /*
 3076          * Step 2.  We've loaded the pages into the buffer,
 3077          * we have to figure out if we can still have B_CACHE
 3078          * set.  Note that B_CACHE is set according to the
 3079          * byte-granular range ( bcount and size ), not the
 3080          * aligned range ( newbsize ).
 3081          *
 3082          * The VM test is against m->valid, which is DEV_BSIZE
 3083          * aligned.  Needless to say, the validity of the data
 3084          * needs to also be DEV_BSIZE aligned.  Note that this
 3085          * fails with NFS if the server or some other client
 3086          * extends the file's EOF.  If our buffer is resized, 
 3087          * B_CACHE may remain set! XXX
 3088          */
 3089         toff = bp->b_bcount;
 3090         tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
 3091         while ((bp->b_flags & B_CACHE) && toff < size) {
 3092                 vm_pindex_t pi;
 3093 
 3094                 if (tinc > (size - toff))
 3095                         tinc = size - toff;
 3096                 pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
 3097                 m = bp->b_pages[pi];
 3098                 vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
 3099                 toff += tinc;
 3100                 tinc = PAGE_SIZE;
 3101         }
 3102         VM_OBJECT_WUNLOCK(obj);
 3103 
 3104         /*
 3105          * Step 3, fixup the KVA pmap.
 3106          */
 3107         if (buf_mapped(bp))
 3108                 bpmap_qenter(bp);
 3109         else
 3110                 BUF_CHECK_UNMAPPED(bp);
 3111 }
 3112 
 3113 /*
 3114  * Check to see if a block at a particular lbn is available for a clustered
 3115  * write.
 3116  */
 3117 static int
 3118 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
 3119 {
 3120         struct buf *bpa;
 3121         int match;
 3122 
 3123         match = 0;
 3124 
 3125         /* If the buf isn't in core skip it */
 3126         if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
 3127                 return (0);
 3128 
 3129         /* If the buf is busy we don't want to wait for it */
 3130         if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
 3131                 return (0);
 3132 
 3133         /* Only cluster with valid clusterable delayed write buffers */
 3134         if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
 3135             (B_DELWRI | B_CLUSTEROK))
 3136                 goto done;
 3137 
 3138         if (bpa->b_bufsize != size)
 3139                 goto done;
 3140 
 3141         /*
 3142          * Check to see if it is in the expected place on disk and that the
 3143          * block has been mapped.
 3144          */
 3145         if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
 3146                 match = 1;
 3147 done:
 3148         BUF_UNLOCK(bpa);
 3149         return (match);
 3150 }
 3151 
 3152 /*
 3153  *      vfs_bio_awrite:
 3154  *
 3155  *      Implement clustered async writes for clearing out B_DELWRI buffers.
 3156  *      This is much better then the old way of writing only one buffer at
 3157  *      a time.  Note that we may not be presented with the buffers in the 
 3158  *      correct order, so we search for the cluster in both directions.
 3159  */
 3160 int
 3161 vfs_bio_awrite(struct buf *bp)
 3162 {
 3163         struct bufobj *bo;
 3164         int i;
 3165         int j;
 3166         daddr_t lblkno = bp->b_lblkno;
 3167         struct vnode *vp = bp->b_vp;
 3168         int ncl;
 3169         int nwritten;
 3170         int size;
 3171         int maxcl;
 3172         int gbflags;
 3173 
 3174         bo = &vp->v_bufobj;
 3175         gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
 3176         /*
 3177          * right now we support clustered writing only to regular files.  If
 3178          * we find a clusterable block we could be in the middle of a cluster
 3179          * rather then at the beginning.
 3180          */
 3181         if ((vp->v_type == VREG) && 
 3182             (vp->v_mount != 0) && /* Only on nodes that have the size info */
 3183             (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
 3184 
 3185                 size = vp->v_mount->mnt_stat.f_iosize;
 3186                 maxcl = MAXPHYS / size;
 3187 
 3188                 BO_RLOCK(bo);
 3189                 for (i = 1; i < maxcl; i++)
 3190                         if (vfs_bio_clcheck(vp, size, lblkno + i,
 3191                             bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
 3192                                 break;
 3193 
 3194                 for (j = 1; i + j <= maxcl && j <= lblkno; j++) 
 3195                         if (vfs_bio_clcheck(vp, size, lblkno - j,
 3196                             bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
 3197                                 break;
 3198                 BO_RUNLOCK(bo);
 3199                 --j;
 3200                 ncl = i + j;
 3201                 /*
 3202                  * this is a possible cluster write
 3203                  */
 3204                 if (ncl != 1) {
 3205                         BUF_UNLOCK(bp);
 3206                         nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
 3207                             gbflags);
 3208                         return (nwritten);
 3209                 }
 3210         }
 3211         bremfree(bp);
 3212         bp->b_flags |= B_ASYNC;
 3213         /*
 3214          * default (old) behavior, writing out only one block
 3215          *
 3216          * XXX returns b_bufsize instead of b_bcount for nwritten?
 3217          */
 3218         nwritten = bp->b_bufsize;
 3219         (void) bwrite(bp);
 3220 
 3221         return (nwritten);
 3222 }
 3223 
 3224 /*
 3225  *      getnewbuf_kva:
 3226  *
 3227  *      Allocate KVA for an empty buf header according to gbflags.
 3228  */
 3229 static int
 3230 getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
 3231 {
 3232 
 3233         if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
 3234                 /*
 3235                  * In order to keep fragmentation sane we only allocate kva
 3236                  * in BKVASIZE chunks.  XXX with vmem we can do page size.
 3237                  */
 3238                 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
 3239 
 3240                 if (maxsize != bp->b_kvasize &&
 3241                     bufkva_alloc(bp, maxsize, gbflags))
 3242                         return (ENOSPC);
 3243         }
 3244         return (0);
 3245 }
 3246 
 3247 /*
 3248  *      getnewbuf:
 3249  *
 3250  *      Find and initialize a new buffer header, freeing up existing buffers
 3251  *      in the bufqueues as necessary.  The new buffer is returned locked.
 3252  *
 3253  *      We block if:
 3254  *              We have insufficient buffer headers
 3255  *              We have insufficient buffer space
 3256  *              buffer_arena is too fragmented ( space reservation fails )
 3257  *              If we have to flush dirty buffers ( but we try to avoid this )
 3258  *
 3259  *      The caller is responsible for releasing the reserved bufspace after
 3260  *      allocbuf() is called.
 3261  */
 3262 static struct buf *
 3263 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
 3264 {
 3265         struct bufdomain *bd;
 3266         struct buf *bp;
 3267         bool metadata, reserved;
 3268 
 3269         bp = NULL;
 3270         KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
 3271             ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
 3272         if (!unmapped_buf_allowed)
 3273                 gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
 3274 
 3275         if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
 3276             vp->v_type == VCHR)
 3277                 metadata = true;
 3278         else
 3279                 metadata = false;
 3280         if (vp == NULL)
 3281                 bd = &bdomain[0];
 3282         else
 3283                 bd = &bdomain[vp->v_bufobj.bo_domain];
 3284 
 3285         counter_u64_add(getnewbufcalls, 1);
 3286         reserved = false;
 3287         do {
 3288                 if (reserved == false &&
 3289                     bufspace_reserve(bd, maxsize, metadata) != 0) {
 3290                         counter_u64_add(getnewbufrestarts, 1);
 3291                         continue;
 3292                 }
 3293                 reserved = true;
 3294                 if ((bp = buf_alloc(bd)) == NULL) {
 3295                         counter_u64_add(getnewbufrestarts, 1);
 3296                         continue;
 3297                 }
 3298                 if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
 3299                         return (bp);
 3300                 break;
 3301         } while (buf_recycle(bd, false) == 0);
 3302 
 3303         if (reserved)
 3304                 bufspace_release(bd, maxsize);
 3305         if (bp != NULL) {
 3306                 bp->b_flags |= B_INVAL;
 3307                 brelse(bp);
 3308         }
 3309         bufspace_wait(bd, vp, gbflags, slpflag, slptimeo);
 3310 
 3311         return (NULL);
 3312 }
 3313 
 3314 /*
 3315  *      buf_daemon:
 3316  *
 3317  *      buffer flushing daemon.  Buffers are normally flushed by the
 3318  *      update daemon but if it cannot keep up this process starts to
 3319  *      take the load in an attempt to prevent getnewbuf() from blocking.
 3320  */
 3321 static struct kproc_desc buf_kp = {
 3322         "bufdaemon",
 3323         buf_daemon,
 3324         &bufdaemonproc
 3325 };
 3326 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
 3327 
 3328 static int
 3329 buf_flush(struct vnode *vp, struct bufdomain *bd, int target)
 3330 {
 3331         int flushed;
 3332 
 3333         flushed = flushbufqueues(vp, bd, target, 0);
 3334         if (flushed == 0) {
 3335                 /*
 3336                  * Could not find any buffers without rollback
 3337                  * dependencies, so just write the first one
 3338                  * in the hopes of eventually making progress.
 3339                  */
 3340                 if (vp != NULL && target > 2)
 3341                         target /= 2;
 3342                 flushbufqueues(vp, bd, target, 1);
 3343         }
 3344         return (flushed);
 3345 }
 3346 
 3347 static void
 3348 buf_daemon()
 3349 {
 3350         struct bufdomain *bd;
 3351         int speedupreq;
 3352         int lodirty;
 3353         int i;
 3354 
 3355         /*
 3356          * This process needs to be suspended prior to shutdown sync.
 3357          */
 3358         EVENTHANDLER_REGISTER(shutdown_pre_sync, kthread_shutdown, curthread,
 3359             SHUTDOWN_PRI_LAST + 100);
 3360 
 3361         /*
 3362          * Start the buf clean daemons as children threads.
 3363          */
 3364         for (i = 0 ; i < buf_domains; i++) {
 3365                 int error;
 3366 
 3367                 error = kthread_add((void (*)(void *))bufspace_daemon,
 3368                     &bdomain[i], curproc, NULL, 0, 0, "bufspacedaemon-%d", i);
 3369                 if (error)
 3370                         panic("error %d spawning bufspace daemon", error);
 3371         }
 3372 
 3373         /*
 3374          * This process is allowed to take the buffer cache to the limit
 3375          */
 3376         curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
 3377         mtx_lock(&bdlock);
 3378         for (;;) {
 3379                 bd_request = 0;
 3380                 mtx_unlock(&bdlock);
 3381 
 3382                 kthread_suspend_check();
 3383 
 3384                 /*
 3385                  * Save speedupreq for this pass and reset to capture new
 3386                  * requests.
 3387                  */
 3388                 speedupreq = bd_speedupreq;
 3389                 bd_speedupreq = 0;
 3390 
 3391                 /*
 3392                  * Flush each domain sequentially according to its level and
 3393                  * the speedup request.
 3394                  */
 3395                 for (i = 0; i < buf_domains; i++) {
 3396                         bd = &bdomain[i];
 3397                         if (speedupreq)
 3398                                 lodirty = bd->bd_numdirtybuffers / 2;
 3399                         else
 3400                                 lodirty = bd->bd_lodirtybuffers;
 3401                         while (bd->bd_numdirtybuffers > lodirty) {
 3402                                 if (buf_flush(NULL, bd,
 3403                                     bd->bd_numdirtybuffers - lodirty) == 0)
 3404                                         break;
 3405                                 kern_yield(PRI_USER);
 3406                         }
 3407                 }
 3408 
 3409                 /*
 3410                  * Only clear bd_request if we have reached our low water
 3411                  * mark.  The buf_daemon normally waits 1 second and
 3412                  * then incrementally flushes any dirty buffers that have
 3413                  * built up, within reason.
 3414                  *
 3415                  * If we were unable to hit our low water mark and couldn't
 3416                  * find any flushable buffers, we sleep for a short period
 3417                  * to avoid endless loops on unlockable buffers.
 3418                  */
 3419                 mtx_lock(&bdlock);
 3420                 if (!BIT_EMPTY(BUF_DOMAINS, &bdlodirty)) {
 3421                         /*
 3422                          * We reached our low water mark, reset the
 3423                          * request and sleep until we are needed again.
 3424                          * The sleep is just so the suspend code works.
 3425                          */
 3426                         bd_request = 0;
 3427                         /*
 3428                          * Do an extra wakeup in case dirty threshold
 3429                          * changed via sysctl and the explicit transition
 3430                          * out of shortfall was missed.
 3431                          */
 3432                         bdirtywakeup();
 3433                         if (runningbufspace <= lorunningspace)
 3434                                 runningwakeup();
 3435                         msleep(&bd_request, &bdlock, PVM, "psleep", hz);
 3436                 } else {
 3437                         /*
 3438                          * We couldn't find any flushable dirty buffers but
 3439                          * still have too many dirty buffers, we
 3440                          * have to sleep and try again.  (rare)
 3441                          */
 3442                         msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
 3443                 }
 3444         }
 3445 }
 3446 
 3447 /*
 3448  *      flushbufqueues:
 3449  *
 3450  *      Try to flush a buffer in the dirty queue.  We must be careful to
 3451  *      free up B_INVAL buffers instead of write them, which NFS is 
 3452  *      particularly sensitive to.
 3453  */
 3454 static int flushwithdeps = 0;
 3455 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
 3456     0, "Number of buffers flushed with dependecies that require rollbacks");
 3457 
 3458 static int
 3459 flushbufqueues(struct vnode *lvp, struct bufdomain *bd, int target,
 3460     int flushdeps)
 3461 {
 3462         struct bufqueue *bq;
 3463         struct buf *sentinel;
 3464         struct vnode *vp;
 3465         struct mount *mp;
 3466         struct buf *bp;
 3467         int hasdeps;
 3468         int flushed;
 3469         int error;
 3470         bool unlock;
 3471 
 3472         flushed = 0;
 3473         bq = &bd->bd_dirtyq;
 3474         bp = NULL;
 3475         sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
 3476         sentinel->b_qindex = QUEUE_SENTINEL;
 3477         BQ_LOCK(bq);
 3478         TAILQ_INSERT_HEAD(&bq->bq_queue, sentinel, b_freelist);
 3479         BQ_UNLOCK(bq);
 3480         while (flushed != target) {
 3481                 maybe_yield();
 3482                 BQ_LOCK(bq);
 3483                 bp = TAILQ_NEXT(sentinel, b_freelist);
 3484                 if (bp != NULL) {
 3485                         TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
 3486                         TAILQ_INSERT_AFTER(&bq->bq_queue, bp, sentinel,
 3487                             b_freelist);
 3488                 } else {
 3489                         BQ_UNLOCK(bq);
 3490                         break;
 3491                 }
 3492                 /*
 3493                  * Skip sentinels inserted by other invocations of the
 3494                  * flushbufqueues(), taking care to not reorder them.
 3495                  *
 3496                  * Only flush the buffers that belong to the
 3497                  * vnode locked by the curthread.
 3498                  */
 3499                 if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
 3500                     bp->b_vp != lvp)) {
 3501                         BQ_UNLOCK(bq);
 3502                         continue;
 3503                 }
 3504                 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
 3505                 BQ_UNLOCK(bq);
 3506                 if (error != 0)
 3507                         continue;
 3508 
 3509                 /*
 3510                  * BKGRDINPROG can only be set with the buf and bufobj
 3511                  * locks both held.  We tolerate a race to clear it here.
 3512                  */
 3513                 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
 3514                     (bp->b_flags & B_DELWRI) == 0) {
 3515                         BUF_UNLOCK(bp);
 3516                         continue;
 3517                 }
 3518                 if (bp->b_flags & B_INVAL) {
 3519                         bremfreef(bp);
 3520                         brelse(bp);
 3521                         flushed++;
 3522                         continue;
 3523                 }
 3524 
 3525                 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
 3526                         if (flushdeps == 0) {
 3527                                 BUF_UNLOCK(bp);
 3528                                 continue;
 3529                         }
 3530                         hasdeps = 1;
 3531                 } else
 3532                         hasdeps = 0;
 3533                 /*
 3534                  * We must hold the lock on a vnode before writing
 3535                  * one of its buffers. Otherwise we may confuse, or
 3536                  * in the case of a snapshot vnode, deadlock the
 3537                  * system.
 3538                  *
 3539                  * The lock order here is the reverse of the normal
 3540                  * of vnode followed by buf lock.  This is ok because
 3541                  * the NOWAIT will prevent deadlock.
 3542                  */
 3543                 vp = bp->b_vp;
 3544                 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
 3545                         BUF_UNLOCK(bp);
 3546                         continue;
 3547                 }
 3548                 if (lvp == NULL) {
 3549                         unlock = true;
 3550                         error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
 3551                 } else {
 3552                         ASSERT_VOP_LOCKED(vp, "getbuf");
 3553                         unlock = false;
 3554                         error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
 3555                             vn_lock(vp, LK_TRYUPGRADE);
 3556                 }
 3557                 if (error == 0) {
 3558                         CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
 3559                             bp, bp->b_vp, bp->b_flags);
 3560                         if (curproc == bufdaemonproc) {
 3561                                 vfs_bio_awrite(bp);
 3562                         } else {
 3563                                 bremfree(bp);
 3564                                 bwrite(bp);
 3565                                 counter_u64_add(notbufdflushes, 1);
 3566                         }
 3567                         vn_finished_write(mp);
 3568                         if (unlock)
 3569                                 VOP_UNLOCK(vp, 0);
 3570                         flushwithdeps += hasdeps;
 3571                         flushed++;
 3572 
 3573                         /*
 3574                          * Sleeping on runningbufspace while holding
 3575                          * vnode lock leads to deadlock.
 3576                          */
 3577                         if (curproc == bufdaemonproc &&
 3578                             runningbufspace > hirunningspace)
 3579                                 waitrunningbufspace();
 3580                         continue;
 3581                 }
 3582                 vn_finished_write(mp);
 3583                 BUF_UNLOCK(bp);
 3584         }
 3585         BQ_LOCK(bq);
 3586         TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
 3587         BQ_UNLOCK(bq);
 3588         free(sentinel, M_TEMP);
 3589         return (flushed);
 3590 }
 3591 
 3592 /*
 3593  * Check to see if a block is currently memory resident.
 3594  */
 3595 struct buf *
 3596 incore(struct bufobj *bo, daddr_t blkno)
 3597 {
 3598         struct buf *bp;
 3599 
 3600         BO_RLOCK(bo);
 3601         bp = gbincore(bo, blkno);
 3602         BO_RUNLOCK(bo);
 3603         return (bp);
 3604 }
 3605 
 3606 /*
 3607  * Returns true if no I/O is needed to access the
 3608  * associated VM object.  This is like incore except
 3609  * it also hunts around in the VM system for the data.
 3610  */
 3611 
 3612 static int
 3613 inmem(struct vnode * vp, daddr_t blkno)
 3614 {
 3615         vm_object_t obj;
 3616         vm_offset_t toff, tinc, size;
 3617         vm_page_t m;
 3618         vm_ooffset_t off;
 3619 
 3620         ASSERT_VOP_LOCKED(vp, "inmem");
 3621 
 3622         if (incore(&vp->v_bufobj, blkno))
 3623                 return 1;
 3624         if (vp->v_mount == NULL)
 3625                 return 0;
 3626         obj = vp->v_object;
 3627         if (obj == NULL)
 3628                 return (0);
 3629 
 3630         size = PAGE_SIZE;
 3631         if (size > vp->v_mount->mnt_stat.f_iosize)
 3632                 size = vp->v_mount->mnt_stat.f_iosize;
 3633         off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
 3634 
 3635         VM_OBJECT_RLOCK(obj);
 3636         for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
 3637                 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
 3638                 if (!m)
 3639                         goto notinmem;
 3640                 tinc = size;
 3641                 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
 3642                         tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
 3643                 if (vm_page_is_valid(m,
 3644                     (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
 3645                         goto notinmem;
 3646         }
 3647         VM_OBJECT_RUNLOCK(obj);
 3648         return 1;
 3649 
 3650 notinmem:
 3651         VM_OBJECT_RUNLOCK(obj);
 3652         return (0);
 3653 }
 3654 
 3655 /*
 3656  * Set the dirty range for a buffer based on the status of the dirty
 3657  * bits in the pages comprising the buffer.  The range is limited
 3658  * to the size of the buffer.
 3659  *
 3660  * Tell the VM system that the pages associated with this buffer
 3661  * are clean.  This is used for delayed writes where the data is
 3662  * going to go to disk eventually without additional VM intevention.
 3663  *
 3664  * Note that while we only really need to clean through to b_bcount, we
 3665  * just go ahead and clean through to b_bufsize.
 3666  */
 3667 static void
 3668 vfs_clean_pages_dirty_buf(struct buf *bp)
 3669 {
 3670         vm_ooffset_t foff, noff, eoff;
 3671         vm_page_t m;
 3672         int i;
 3673 
 3674         if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
 3675                 return;
 3676 
 3677         foff = bp->b_offset;
 3678         KASSERT(bp->b_offset != NOOFFSET,
 3679             ("vfs_clean_pages_dirty_buf: no buffer offset"));
 3680 
 3681         VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
 3682         vfs_drain_busy_pages(bp);
 3683         vfs_setdirty_locked_object(bp);
 3684         for (i = 0; i < bp->b_npages; i++) {
 3685                 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 3686                 eoff = noff;
 3687                 if (eoff > bp->b_offset + bp->b_bufsize)
 3688                         eoff = bp->b_offset + bp->b_bufsize;
 3689                 m = bp->b_pages[i];
 3690                 vfs_page_set_validclean(bp, foff, m);
 3691                 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
 3692                 foff = noff;
 3693         }
 3694         VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
 3695 }
 3696 
 3697 static void
 3698 vfs_setdirty_locked_object(struct buf *bp)
 3699 {
 3700         vm_object_t object;
 3701         int i;
 3702 
 3703         object = bp->b_bufobj->bo_object;
 3704         VM_OBJECT_ASSERT_WLOCKED(object);
 3705 
 3706         /*
 3707          * We qualify the scan for modified pages on whether the
 3708          * object has been flushed yet.
 3709          */
 3710         if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
 3711                 vm_offset_t boffset;
 3712                 vm_offset_t eoffset;
 3713 
 3714                 /*
 3715                  * test the pages to see if they have been modified directly
 3716                  * by users through the VM system.
 3717                  */
 3718                 for (i = 0; i < bp->b_npages; i++)
 3719                         vm_page_test_dirty(bp->b_pages[i]);
 3720 
 3721                 /*
 3722                  * Calculate the encompassing dirty range, boffset and eoffset,
 3723                  * (eoffset - boffset) bytes.
 3724                  */
 3725 
 3726                 for (i = 0; i < bp->b_npages; i++) {
 3727                         if (bp->b_pages[i]->dirty)
 3728                                 break;
 3729                 }
 3730                 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 3731 
 3732                 for (i = bp->b_npages - 1; i >= 0; --i) {
 3733                         if (bp->b_pages[i]->dirty) {
 3734                                 break;
 3735                         }
 3736                 }
 3737                 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 3738 
 3739                 /*
 3740                  * Fit it to the buffer.
 3741                  */
 3742 
 3743                 if (eoffset > bp->b_bcount)
 3744                         eoffset = bp->b_bcount;
 3745 
 3746                 /*
 3747                  * If we have a good dirty range, merge with the existing
 3748                  * dirty range.
 3749                  */
 3750 
 3751                 if (boffset < eoffset) {
 3752                         if (bp->b_dirtyoff > boffset)
 3753                                 bp->b_dirtyoff = boffset;
 3754                         if (bp->b_dirtyend < eoffset)
 3755                                 bp->b_dirtyend = eoffset;
 3756                 }
 3757         }
 3758 }
 3759 
 3760 /*
 3761  * Allocate the KVA mapping for an existing buffer.
 3762  * If an unmapped buffer is provided but a mapped buffer is requested, take
 3763  * also care to properly setup mappings between pages and KVA.
 3764  */
 3765 static void
 3766 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
 3767 {
 3768         int bsize, maxsize, need_mapping, need_kva;
 3769         off_t offset;
 3770 
 3771         need_mapping = bp->b_data == unmapped_buf &&
 3772             (gbflags & GB_UNMAPPED) == 0;
 3773         need_kva = bp->b_kvabase == unmapped_buf &&
 3774             bp->b_data == unmapped_buf &&
 3775             (gbflags & GB_KVAALLOC) != 0;
 3776         if (!need_mapping && !need_kva)
 3777                 return;
 3778 
 3779         BUF_CHECK_UNMAPPED(bp);
 3780 
 3781         if (need_mapping && bp->b_kvabase != unmapped_buf) {
 3782                 /*
 3783                  * Buffer is not mapped, but the KVA was already
 3784                  * reserved at the time of the instantiation.  Use the
 3785                  * allocated space.
 3786                  */
 3787                 goto has_addr;
 3788         }
 3789 
 3790         /*
 3791          * Calculate the amount of the address space we would reserve
 3792          * if the buffer was mapped.
 3793          */
 3794         bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
 3795         KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
 3796         offset = blkno * bsize;
 3797         maxsize = size + (offset & PAGE_MASK);
 3798         maxsize = imax(maxsize, bsize);
 3799 
 3800         while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
 3801                 if ((gbflags & GB_NOWAIT_BD) != 0) {
 3802                         /*
 3803                          * XXXKIB: defragmentation cannot
 3804                          * succeed, not sure what else to do.
 3805                          */
 3806                         panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
 3807                 }
 3808                 counter_u64_add(mappingrestarts, 1);
 3809                 bufspace_wait(bufdomain(bp), bp->b_vp, gbflags, 0, 0);
 3810         }
 3811 has_addr:
 3812         if (need_mapping) {
 3813                 /* b_offset is handled by bpmap_qenter. */
 3814                 bp->b_data = bp->b_kvabase;
 3815                 BUF_CHECK_MAPPED(bp);
 3816                 bpmap_qenter(bp);
 3817         }
 3818 }
 3819 
 3820 struct buf *
 3821 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
 3822     int flags)
 3823 {
 3824         struct buf *bp;
 3825         int error;
 3826 
 3827         error = getblkx(vp, blkno, size, slpflag, slptimeo, flags, &bp);
 3828         if (error != 0)
 3829                 return (NULL);
 3830         return (bp);
 3831 }
 3832 
 3833 /*
 3834  *      getblkx:
 3835  *
 3836  *      Get a block given a specified block and offset into a file/device.
 3837  *      The buffers B_DONE bit will be cleared on return, making it almost
 3838  *      ready for an I/O initiation.  B_INVAL may or may not be set on 
 3839  *      return.  The caller should clear B_INVAL prior to initiating a
 3840  *      READ.
 3841  *
 3842  *      For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
 3843  *      an existing buffer.
 3844  *
 3845  *      For a VMIO buffer, B_CACHE is modified according to the backing VM.
 3846  *      If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
 3847  *      and then cleared based on the backing VM.  If the previous buffer is
 3848  *      non-0-sized but invalid, B_CACHE will be cleared.
 3849  *
 3850  *      If getblk() must create a new buffer, the new buffer is returned with
 3851  *      both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
 3852  *      case it is returned with B_INVAL clear and B_CACHE set based on the
 3853  *      backing VM.
 3854  *
 3855  *      getblk() also forces a bwrite() for any B_DELWRI buffer whos
 3856  *      B_CACHE bit is clear.
 3857  *      
 3858  *      What this means, basically, is that the caller should use B_CACHE to
 3859  *      determine whether the buffer is fully valid or not and should clear
 3860  *      B_INVAL prior to issuing a read.  If the caller intends to validate
 3861  *      the buffer by loading its data area with something, the caller needs
 3862  *      to clear B_INVAL.  If the caller does this without issuing an I/O, 
 3863  *      the caller should set B_CACHE ( as an optimization ), else the caller
 3864  *      should issue the I/O and biodone() will set B_CACHE if the I/O was
 3865  *      a write attempt or if it was a successful read.  If the caller 
 3866  *      intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
 3867  *      prior to issuing the READ.  biodone() will *not* clear B_INVAL.
 3868  */
 3869 int
 3870 getblkx(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
 3871     int flags, struct buf **bpp)
 3872 {
 3873         struct buf *bp;
 3874         struct bufobj *bo;
 3875         daddr_t d_blkno;
 3876         int bsize, error, maxsize, vmio;
 3877         off_t offset;
 3878 
 3879         CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
 3880         KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
 3881             ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
 3882         ASSERT_VOP_LOCKED(vp, "getblk");
 3883         if (size > maxbcachebuf)
 3884                 panic("getblk: size(%d) > maxbcachebuf(%d)\n", size,
 3885                     maxbcachebuf);
 3886         if (!unmapped_buf_allowed)
 3887                 flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
 3888 
 3889         bo = &vp->v_bufobj;
 3890         d_blkno = blkno;
 3891 loop:
 3892         BO_RLOCK(bo);
 3893         bp = gbincore(bo, blkno);
 3894         if (bp != NULL) {
 3895                 int lockflags;
 3896                 /*
 3897                  * Buffer is in-core.  If the buffer is not busy nor managed,
 3898                  * it must be on a queue.
 3899                  */
 3900                 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
 3901 
 3902                 if ((flags & GB_LOCK_NOWAIT) != 0)
 3903                         lockflags |= LK_NOWAIT;
 3904 
 3905                 error = BUF_TIMELOCK(bp, lockflags,
 3906                     BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
 3907 
 3908                 /*
 3909                  * If we slept and got the lock we have to restart in case
 3910                  * the buffer changed identities.
 3911                  */
 3912                 if (error == ENOLCK)
 3913                         goto loop;
 3914                 /* We timed out or were interrupted. */
 3915                 else if (error != 0)
 3916                         return (error);
 3917                 /* If recursed, assume caller knows the rules. */
 3918                 else if (BUF_LOCKRECURSED(bp))
 3919                         goto end;
 3920 
 3921                 /*
 3922                  * The buffer is locked.  B_CACHE is cleared if the buffer is 
 3923                  * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
 3924                  * and for a VMIO buffer B_CACHE is adjusted according to the
 3925                  * backing VM cache.
 3926                  */
 3927                 if (bp->b_flags & B_INVAL)
 3928                         bp->b_flags &= ~B_CACHE;
 3929                 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
 3930                         bp->b_flags |= B_CACHE;
 3931                 if (bp->b_flags & B_MANAGED)
 3932                         MPASS(bp->b_qindex == QUEUE_NONE);
 3933                 else
 3934                         bremfree(bp);
 3935 
 3936                 /*
 3937                  * check for size inconsistencies for non-VMIO case.
 3938                  */
 3939                 if (bp->b_bcount != size) {
 3940                         if ((bp->b_flags & B_VMIO) == 0 ||
 3941                             (size > bp->b_kvasize)) {
 3942                                 if (bp->b_flags & B_DELWRI) {
 3943                                         bp->b_flags |= B_NOCACHE;
 3944                                         bwrite(bp);
 3945                                 } else {
 3946                                         if (LIST_EMPTY(&bp->b_dep)) {
 3947                                                 bp->b_flags |= B_RELBUF;
 3948                                                 brelse(bp);
 3949                                         } else {
 3950                                                 bp->b_flags |= B_NOCACHE;
 3951                                                 bwrite(bp);
 3952                                         }
 3953                                 }
 3954                                 goto loop;
 3955                         }
 3956                 }
 3957 
 3958                 /*
 3959                  * Handle the case of unmapped buffer which should
 3960                  * become mapped, or the buffer for which KVA
 3961                  * reservation is requested.
 3962                  */
 3963                 bp_unmapped_get_kva(bp, blkno, size, flags);
 3964 
 3965                 /*
 3966                  * If the size is inconsistent in the VMIO case, we can resize
 3967                  * the buffer.  This might lead to B_CACHE getting set or
 3968                  * cleared.  If the size has not changed, B_CACHE remains
 3969                  * unchanged from its previous state.
 3970                  */
 3971                 allocbuf(bp, size);
 3972 
 3973                 KASSERT(bp->b_offset != NOOFFSET, 
 3974                     ("getblk: no buffer offset"));
 3975 
 3976                 /*
 3977                  * A buffer with B_DELWRI set and B_CACHE clear must
 3978                  * be committed before we can return the buffer in
 3979                  * order to prevent the caller from issuing a read
 3980                  * ( due to B_CACHE not being set ) and overwriting
 3981                  * it.
 3982                  *
 3983                  * Most callers, including NFS and FFS, need this to
 3984                  * operate properly either because they assume they
 3985                  * can issue a read if B_CACHE is not set, or because
 3986                  * ( for example ) an uncached B_DELWRI might loop due 
 3987                  * to softupdates re-dirtying the buffer.  In the latter
 3988                  * case, B_CACHE is set after the first write completes,
 3989                  * preventing further loops.
 3990                  * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
 3991                  * above while extending the buffer, we cannot allow the
 3992                  * buffer to remain with B_CACHE set after the write
 3993                  * completes or it will represent a corrupt state.  To
 3994                  * deal with this we set B_NOCACHE to scrap the buffer
 3995                  * after the write.
 3996                  *
 3997                  * We might be able to do something fancy, like setting
 3998                  * B_CACHE in bwrite() except if B_DELWRI is already set,
 3999                  * so the below call doesn't set B_CACHE, but that gets real
 4000                  * confusing.  This is much easier.
 4001                  */
 4002 
 4003                 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
 4004                         bp->b_flags |= B_NOCACHE;
 4005                         bwrite(bp);
 4006                         goto loop;
 4007                 }
 4008                 bp->b_flags &= ~B_DONE;
 4009         } else {
 4010                 /*
 4011                  * Buffer is not in-core, create new buffer.  The buffer
 4012                  * returned by getnewbuf() is locked.  Note that the returned
 4013                  * buffer is also considered valid (not marked B_INVAL).
 4014                  */
 4015                 BO_RUNLOCK(bo);
 4016                 /*
 4017                  * If the user does not want us to create the buffer, bail out
 4018                  * here.
 4019                  */
 4020                 if (flags & GB_NOCREAT)
 4021                         return (EEXIST);
 4022                 if (bdomain[bo->bo_domain].bd_freebuffers == 0 &&
 4023                     TD_IS_IDLETHREAD(curthread))
 4024                         return (EBUSY);
 4025 
 4026                 bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
 4027                 KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
 4028                 offset = blkno * bsize;
 4029                 vmio = vp->v_object != NULL;
 4030                 if (vmio) {
 4031                         maxsize = size + (offset & PAGE_MASK);
 4032                 } else {
 4033                         maxsize = size;
 4034                         /* Do not allow non-VMIO notmapped buffers. */
 4035                         flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
 4036                 }
 4037                 maxsize = imax(maxsize, bsize);
 4038                 if ((flags & GB_NOSPARSE) != 0 && vmio &&
 4039                     !vn_isdisk(vp, NULL)) {
 4040                         error = VOP_BMAP(vp, blkno, NULL, &d_blkno, 0, 0);
 4041                         KASSERT(error != EOPNOTSUPP,
 4042                             ("GB_NOSPARSE from fs not supporting bmap, vp %p",
 4043                             vp));
 4044                         if (error != 0)
 4045                                 return (error);
 4046                         if (d_blkno == -1)
 4047                                 return (EJUSTRETURN);
 4048                 }
 4049 
 4050                 bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
 4051                 if (bp == NULL) {
 4052                         if (slpflag || slptimeo)
 4053                                 return (ETIMEDOUT);
 4054                         /*
 4055                          * XXX This is here until the sleep path is diagnosed
 4056                          * enough to work under very low memory conditions.
 4057                          *
 4058                          * There's an issue on low memory, 4BSD+non-preempt
 4059                          * systems (eg MIPS routers with 32MB RAM) where buffer
 4060                          * exhaustion occurs without sleeping for buffer
 4061                          * reclaimation.  This just sticks in a loop and
 4062                          * constantly attempts to allocate a buffer, which
 4063                          * hits exhaustion and tries to wakeup bufdaemon.
 4064                          * This never happens because we never yield.
 4065                          *
 4066                          * The real solution is to identify and fix these cases
 4067                          * so we aren't effectively busy-waiting in a loop
 4068                          * until the reclaimation path has cycles to run.
 4069                          */
 4070                         kern_yield(PRI_USER);
 4071                         goto loop;
 4072                 }
 4073 
 4074                 /*
 4075                  * This code is used to make sure that a buffer is not
 4076                  * created while the getnewbuf routine is blocked.
 4077                  * This can be a problem whether the vnode is locked or not.
 4078                  * If the buffer is created out from under us, we have to
 4079                  * throw away the one we just created.
 4080                  *
 4081                  * Note: this must occur before we associate the buffer
 4082                  * with the vp especially considering limitations in
 4083                  * the splay tree implementation when dealing with duplicate
 4084                  * lblkno's.
 4085                  */
 4086                 BO_LOCK(bo);
 4087                 if (gbincore(bo, blkno)) {
 4088                         BO_UNLOCK(bo);
 4089                         bp->b_flags |= B_INVAL;
 4090                         bufspace_release(bufdomain(bp), maxsize);
 4091                         brelse(bp);
 4092                         goto loop;
 4093                 }
 4094 
 4095                 /*
 4096                  * Insert the buffer into the hash, so that it can
 4097                  * be found by incore.
 4098                  */
 4099                 bp->b_lblkno = blkno;
 4100                 bp->b_blkno = d_blkno;
 4101                 bp->b_offset = offset;
 4102                 bgetvp(vp, bp);
 4103                 BO_UNLOCK(bo);
 4104 
 4105                 /*
 4106                  * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
 4107                  * buffer size starts out as 0, B_CACHE will be set by
 4108                  * allocbuf() for the VMIO case prior to it testing the
 4109                  * backing store for validity.
 4110                  */
 4111 
 4112                 if (vmio) {
 4113                         bp->b_flags |= B_VMIO;
 4114                         KASSERT(vp->v_object == bp->b_bufobj->bo_object,
 4115                             ("ARGH! different b_bufobj->bo_object %p %p %p\n",
 4116                             bp, vp->v_object, bp->b_bufobj->bo_object));
 4117                 } else {
 4118                         bp->b_flags &= ~B_VMIO;
 4119                         KASSERT(bp->b_bufobj->bo_object == NULL,
 4120                             ("ARGH! has b_bufobj->bo_object %p %p\n",
 4121                             bp, bp->b_bufobj->bo_object));
 4122                         BUF_CHECK_MAPPED(bp);
 4123                 }
 4124 
 4125                 allocbuf(bp, size);
 4126                 bufspace_release(bufdomain(bp), maxsize);
 4127                 bp->b_flags &= ~B_DONE;
 4128         }
 4129         CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
 4130         BUF_ASSERT_HELD(bp);
 4131 end:
 4132         buf_track(bp, __func__);
 4133         KASSERT(bp->b_bufobj == bo,
 4134             ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
 4135         *bpp = bp;
 4136         return (0);
 4137 }
 4138 
 4139 /*
 4140  * Get an empty, disassociated buffer of given size.  The buffer is initially
 4141  * set to B_INVAL.
 4142  */
 4143 struct buf *
 4144 geteblk(int size, int flags)
 4145 {
 4146         struct buf *bp;
 4147         int maxsize;
 4148 
 4149         maxsize = (size + BKVAMASK) & ~BKVAMASK;
 4150         while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
 4151                 if ((flags & GB_NOWAIT_BD) &&
 4152                     (curthread->td_pflags & TDP_BUFNEED) != 0)
 4153                         return (NULL);
 4154         }
 4155         allocbuf(bp, size);
 4156         bufspace_release(bufdomain(bp), maxsize);
 4157         bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
 4158         BUF_ASSERT_HELD(bp);
 4159         return (bp);
 4160 }
 4161 
 4162 /*
 4163  * Truncate the backing store for a non-vmio buffer.
 4164  */
 4165 static void
 4166 vfs_nonvmio_truncate(struct buf *bp, int newbsize)
 4167 {
 4168 
 4169         if (bp->b_flags & B_MALLOC) {
 4170                 /*
 4171                  * malloced buffers are not shrunk
 4172                  */
 4173                 if (newbsize == 0) {
 4174                         bufmallocadjust(bp, 0);
 4175                         free(bp->b_data, M_BIOBUF);
 4176                         bp->b_data = bp->b_kvabase;
 4177                         bp->b_flags &= ~B_MALLOC;
 4178                 }
 4179                 return;
 4180         }
 4181         vm_hold_free_pages(bp, newbsize);
 4182         bufspace_adjust(bp, newbsize);
 4183 }
 4184 
 4185 /*
 4186  * Extend the backing for a non-VMIO buffer.
 4187  */
 4188 static void
 4189 vfs_nonvmio_extend(struct buf *bp, int newbsize)
 4190 {
 4191         caddr_t origbuf;
 4192         int origbufsize;
 4193 
 4194         /*
 4195          * We only use malloced memory on the first allocation.
 4196          * and revert to page-allocated memory when the buffer
 4197          * grows.
 4198          *
 4199          * There is a potential smp race here that could lead
 4200          * to bufmallocspace slightly passing the max.  It
 4201          * is probably extremely rare and not worth worrying
 4202          * over.
 4203          */
 4204         if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
 4205             bufmallocspace < maxbufmallocspace) {
 4206                 bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
 4207                 bp->b_flags |= B_MALLOC;
 4208                 bufmallocadjust(bp, newbsize);
 4209                 return;
 4210         }
 4211 
 4212         /*
 4213          * If the buffer is growing on its other-than-first
 4214          * allocation then we revert to the page-allocation
 4215          * scheme.
 4216          */
 4217         origbuf = NULL;
 4218         origbufsize = 0;
 4219         if (bp->b_flags & B_MALLOC) {
 4220                 origbuf = bp->b_data;
 4221                 origbufsize = bp->b_bufsize;
 4222                 bp->b_data = bp->b_kvabase;
 4223                 bufmallocadjust(bp, 0);
 4224                 bp->b_flags &= ~B_MALLOC;
 4225                 newbsize = round_page(newbsize);
 4226         }
 4227         vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
 4228             (vm_offset_t) bp->b_data + newbsize);
 4229         if (origbuf != NULL) {
 4230                 bcopy(origbuf, bp->b_data, origbufsize);
 4231                 free(origbuf, M_BIOBUF);
 4232         }
 4233         bufspace_adjust(bp, newbsize);
 4234 }
 4235 
 4236 /*
 4237  * This code constitutes the buffer memory from either anonymous system
 4238  * memory (in the case of non-VMIO operations) or from an associated
 4239  * VM object (in the case of VMIO operations).  This code is able to
 4240  * resize a buffer up or down.
 4241  *
 4242  * Note that this code is tricky, and has many complications to resolve
 4243  * deadlock or inconsistent data situations.  Tread lightly!!! 
 4244  * There are B_CACHE and B_DELWRI interactions that must be dealt with by 
 4245  * the caller.  Calling this code willy nilly can result in the loss of data.
 4246  *
 4247  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
 4248  * B_CACHE for the non-VMIO case.
 4249  */
 4250 int
 4251 allocbuf(struct buf *bp, int size)
 4252 {
 4253         int newbsize;
 4254 
 4255         BUF_ASSERT_HELD(bp);
 4256 
 4257         if (bp->b_bcount == size)
 4258                 return (1);
 4259 
 4260         if (bp->b_kvasize != 0 && bp->b_kvasize < size)
 4261                 panic("allocbuf: buffer too small");
 4262 
 4263         newbsize = roundup2(size, DEV_BSIZE);
 4264         if ((bp->b_flags & B_VMIO) == 0) {
 4265                 if ((bp->b_flags & B_MALLOC) == 0)
 4266                         newbsize = round_page(newbsize);
 4267                 /*
 4268                  * Just get anonymous memory from the kernel.  Don't
 4269                  * mess with B_CACHE.
 4270                  */
 4271                 if (newbsize < bp->b_bufsize)
 4272                         vfs_nonvmio_truncate(bp, newbsize);
 4273                 else if (newbsize > bp->b_bufsize)
 4274                         vfs_nonvmio_extend(bp, newbsize);
 4275         } else {
 4276                 int desiredpages;
 4277 
 4278                 desiredpages = (size == 0) ? 0 :
 4279                     num_pages((bp->b_offset & PAGE_MASK) + newbsize);
 4280 
 4281                 if (bp->b_flags & B_MALLOC)
 4282                         panic("allocbuf: VMIO buffer can't be malloced");
 4283                 /*
 4284                  * Set B_CACHE initially if buffer is 0 length or will become
 4285                  * 0-length.
 4286                  */
 4287                 if (size == 0 || bp->b_bufsize == 0)
 4288                         bp->b_flags |= B_CACHE;
 4289 
 4290                 if (newbsize < bp->b_bufsize)
 4291                         vfs_vmio_truncate(bp, desiredpages);
 4292                 /* XXX This looks as if it should be newbsize > b_bufsize */
 4293                 else if (size > bp->b_bcount)
 4294                         vfs_vmio_extend(bp, desiredpages, size);
 4295                 bufspace_adjust(bp, newbsize);
 4296         }
 4297         bp->b_bcount = size;            /* requested buffer size. */
 4298         return (1);
 4299 }
 4300 
 4301 extern int inflight_transient_maps;
 4302 
 4303 static struct bio_queue nondump_bios;
 4304 
 4305 void
 4306 biodone(struct bio *bp)
 4307 {
 4308         struct mtx *mtxp;
 4309         void (*done)(struct bio *);
 4310         vm_offset_t start, end;
 4311 
 4312         biotrack(bp, __func__);
 4313 
 4314         /*
 4315          * Avoid completing I/O when dumping after a panic since that may
 4316          * result in a deadlock in the filesystem or pager code.  Note that
 4317          * this doesn't affect dumps that were started manually since we aim
 4318          * to keep the system usable after it has been resumed.
 4319          */
 4320         if (__predict_false(dumping && SCHEDULER_STOPPED())) {
 4321                 TAILQ_INSERT_HEAD(&nondump_bios, bp, bio_queue);
 4322                 return;
 4323         }
 4324         if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
 4325                 bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
 4326                 bp->bio_flags |= BIO_UNMAPPED;
 4327                 start = trunc_page((vm_offset_t)bp->bio_data);
 4328                 end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
 4329                 bp->bio_data = unmapped_buf;
 4330                 pmap_qremove(start, atop(end - start));
 4331                 vmem_free(transient_arena, start, end - start);
 4332                 atomic_add_int(&inflight_transient_maps, -1);
 4333         }
 4334         done = bp->bio_done;
 4335         if (done == NULL) {
 4336                 mtxp = mtx_pool_find(mtxpool_sleep, bp);
 4337                 mtx_lock(mtxp);
 4338                 bp->bio_flags |= BIO_DONE;
 4339                 wakeup(bp);
 4340                 mtx_unlock(mtxp);
 4341         } else
 4342                 done(bp);
 4343 }
 4344 
 4345 /*
 4346  * Wait for a BIO to finish.
 4347  */
 4348 int
 4349 biowait(struct bio *bp, const char *wchan)
 4350 {
 4351         struct mtx *mtxp;
 4352 
 4353         mtxp = mtx_pool_find(mtxpool_sleep, bp);
 4354         mtx_lock(mtxp);
 4355         while ((bp->bio_flags & BIO_DONE) == 0)
 4356                 msleep(bp, mtxp, PRIBIO, wchan, 0);
 4357         mtx_unlock(mtxp);
 4358         if (bp->bio_error != 0)
 4359                 return (bp->bio_error);
 4360         if (!(bp->bio_flags & BIO_ERROR))
 4361                 return (0);
 4362         return (EIO);
 4363 }
 4364 
 4365 void
 4366 biofinish(struct bio *bp, struct devstat *stat, int error)
 4367 {
 4368         
 4369         if (error) {
 4370                 bp->bio_error = error;
 4371                 bp->bio_flags |= BIO_ERROR;
 4372         }
 4373         if (stat != NULL)
 4374                 devstat_end_transaction_bio(stat, bp);
 4375         biodone(bp);
 4376 }
 4377 
 4378 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
 4379 void
 4380 biotrack_buf(struct bio *bp, const char *location)
 4381 {
 4382 
 4383         buf_track(bp->bio_track_bp, location);
 4384 }
 4385 #endif
 4386 
 4387 /*
 4388  *      bufwait:
 4389  *
 4390  *      Wait for buffer I/O completion, returning error status.  The buffer
 4391  *      is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
 4392  *      error and cleared.
 4393  */
 4394 int
 4395 bufwait(struct buf *bp)
 4396 {
 4397         if (bp->b_iocmd == BIO_READ)
 4398                 bwait(bp, PRIBIO, "biord");
 4399         else
 4400                 bwait(bp, PRIBIO, "biowr");
 4401         if (bp->b_flags & B_EINTR) {
 4402                 bp->b_flags &= ~B_EINTR;
 4403                 return (EINTR);
 4404         }
 4405         if (bp->b_ioflags & BIO_ERROR) {
 4406                 return (bp->b_error ? bp->b_error : EIO);
 4407         } else {
 4408                 return (0);
 4409         }
 4410 }
 4411 
 4412 /*
 4413  *      bufdone:
 4414  *
 4415  *      Finish I/O on a buffer, optionally calling a completion function.
 4416  *      This is usually called from an interrupt so process blocking is
 4417  *      not allowed.
 4418  *
 4419  *      biodone is also responsible for setting B_CACHE in a B_VMIO bp.
 4420  *      In a non-VMIO bp, B_CACHE will be set on the next getblk() 
 4421  *      assuming B_INVAL is clear.
 4422  *
 4423  *      For the VMIO case, we set B_CACHE if the op was a read and no
 4424  *      read error occurred, or if the op was a write.  B_CACHE is never
 4425  *      set if the buffer is invalid or otherwise uncacheable.
 4426  *
 4427  *      biodone does not mess with B_INVAL, allowing the I/O routine or the
 4428  *      initiator to leave B_INVAL set to brelse the buffer out of existence
 4429  *      in the biodone routine.
 4430  */
 4431 void
 4432 bufdone(struct buf *bp)
 4433 {
 4434         struct bufobj *dropobj;
 4435         void    (*biodone)(struct buf *);
 4436 
 4437         buf_track(bp, __func__);
 4438         CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
 4439         dropobj = NULL;
 4440 
 4441         KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
 4442         BUF_ASSERT_HELD(bp);
 4443 
 4444         runningbufwakeup(bp);
 4445         if (bp->b_iocmd == BIO_WRITE)
 4446                 dropobj = bp->b_bufobj;
 4447         /* call optional completion function if requested */
 4448         if (bp->b_iodone != NULL) {
 4449                 biodone = bp->b_iodone;
 4450                 bp->b_iodone = NULL;
 4451                 (*biodone) (bp);
 4452                 if (dropobj)
 4453                         bufobj_wdrop(dropobj);
 4454                 return;
 4455         }
 4456         if (bp->b_flags & B_VMIO) {
 4457                 /*
 4458                  * Set B_CACHE if the op was a normal read and no error
 4459                  * occurred.  B_CACHE is set for writes in the b*write()
 4460                  * routines.
 4461                  */
 4462                 if (bp->b_iocmd == BIO_READ &&
 4463                     !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
 4464                     !(bp->b_ioflags & BIO_ERROR))
 4465                         bp->b_flags |= B_CACHE;
 4466                 vfs_vmio_iodone(bp);
 4467         }
 4468         if (!LIST_EMPTY(&bp->b_dep))
 4469                 buf_complete(bp);
 4470         if ((bp->b_flags & B_CKHASH) != 0) {
 4471                 KASSERT(bp->b_iocmd == BIO_READ,
 4472                     ("bufdone: b_iocmd %d not BIO_READ", bp->b_iocmd));
 4473                 KASSERT(buf_mapped(bp), ("bufdone: bp %p not mapped", bp));
 4474                 (*bp->b_ckhashcalc)(bp);
 4475         }
 4476         /*
 4477          * For asynchronous completions, release the buffer now. The brelse
 4478          * will do a wakeup there if necessary - so no need to do a wakeup
 4479          * here in the async case. The sync case always needs to do a wakeup.
 4480          */
 4481         if (bp->b_flags & B_ASYNC) {
 4482                 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
 4483                     (bp->b_ioflags & BIO_ERROR))
 4484                         brelse(bp);
 4485                 else
 4486                         bqrelse(bp);
 4487         } else
 4488                 bdone(bp);
 4489         if (dropobj)
 4490                 bufobj_wdrop(dropobj);
 4491 }
 4492 
 4493 /*
 4494  * This routine is called in lieu of iodone in the case of
 4495  * incomplete I/O.  This keeps the busy status for pages
 4496  * consistent.
 4497  */
 4498 void
 4499 vfs_unbusy_pages(struct buf *bp)
 4500 {
 4501         int i;
 4502         vm_object_t obj;
 4503         vm_page_t m;
 4504 
 4505         runningbufwakeup(bp);
 4506         if (!(bp->b_flags & B_VMIO))
 4507                 return;
 4508 
 4509         obj = bp->b_bufobj->bo_object;
 4510         VM_OBJECT_WLOCK(obj);
 4511         for (i = 0; i < bp->b_npages; i++) {
 4512                 m = bp->b_pages[i];
 4513                 if (m == bogus_page) {
 4514                         m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
 4515                         if (!m)
 4516                                 panic("vfs_unbusy_pages: page missing\n");
 4517                         bp->b_pages[i] = m;
 4518                         if (buf_mapped(bp)) {
 4519                                 BUF_CHECK_MAPPED(bp);
 4520                                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 4521                                     bp->b_pages, bp->b_npages);
 4522                         } else
 4523                                 BUF_CHECK_UNMAPPED(bp);
 4524                 }
 4525                 vm_page_sunbusy(m);
 4526         }
 4527         vm_object_pip_wakeupn(obj, bp->b_npages);
 4528         VM_OBJECT_WUNLOCK(obj);
 4529 }
 4530 
 4531 /*
 4532  * vfs_page_set_valid:
 4533  *
 4534  *      Set the valid bits in a page based on the supplied offset.   The
 4535  *      range is restricted to the buffer's size.
 4536  *
 4537  *      This routine is typically called after a read completes.
 4538  */
 4539 static void
 4540 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
 4541 {
 4542         vm_ooffset_t eoff;
 4543 
 4544         /*
 4545          * Compute the end offset, eoff, such that [off, eoff) does not span a
 4546          * page boundary and eoff is not greater than the end of the buffer.
 4547          * The end of the buffer, in this case, is our file EOF, not the
 4548          * allocation size of the buffer.
 4549          */
 4550         eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
 4551         if (eoff > bp->b_offset + bp->b_bcount)
 4552                 eoff = bp->b_offset + bp->b_bcount;
 4553 
 4554         /*
 4555          * Set valid range.  This is typically the entire buffer and thus the
 4556          * entire page.
 4557          */
 4558         if (eoff > off)
 4559                 vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
 4560 }
 4561 
 4562 /*
 4563  * vfs_page_set_validclean:
 4564  *
 4565  *      Set the valid bits and clear the dirty bits in a page based on the
 4566  *      supplied offset.   The range is restricted to the buffer's size.
 4567  */
 4568 static void
 4569 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
 4570 {
 4571         vm_ooffset_t soff, eoff;
 4572 
 4573         /*
 4574          * Start and end offsets in buffer.  eoff - soff may not cross a
 4575          * page boundary or cross the end of the buffer.  The end of the
 4576          * buffer, in this case, is our file EOF, not the allocation size
 4577          * of the buffer.
 4578          */
 4579         soff = off;
 4580         eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 4581         if (eoff > bp->b_offset + bp->b_bcount)
 4582                 eoff = bp->b_offset + bp->b_bcount;
 4583 
 4584         /*
 4585          * Set valid range.  This is typically the entire buffer and thus the
 4586          * entire page.
 4587          */
 4588         if (eoff > soff) {
 4589                 vm_page_set_validclean(
 4590                     m,
 4591                    (vm_offset_t) (soff & PAGE_MASK),
 4592                    (vm_offset_t) (eoff - soff)
 4593                 );
 4594         }
 4595 }
 4596 
 4597 /*
 4598  * Ensure that all buffer pages are not exclusive busied.  If any page is
 4599  * exclusive busy, drain it.
 4600  */
 4601 void
 4602 vfs_drain_busy_pages(struct buf *bp)
 4603 {
 4604         vm_page_t m;
 4605         int i, last_busied;
 4606 
 4607         VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
 4608         last_busied = 0;
 4609         for (i = 0; i < bp->b_npages; i++) {
 4610                 m = bp->b_pages[i];
 4611                 if (vm_page_xbusied(m)) {
 4612                         for (; last_busied < i; last_busied++)
 4613                                 vm_page_sbusy(bp->b_pages[last_busied]);
 4614                         while (vm_page_xbusied(m)) {
 4615                                 vm_page_lock(m);
 4616                                 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
 4617                                 vm_page_busy_sleep(m, "vbpage", true);
 4618                                 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
 4619                         }
 4620                 }
 4621         }
 4622         for (i = 0; i < last_busied; i++)
 4623                 vm_page_sunbusy(bp->b_pages[i]);
 4624 }
 4625 
 4626 /*
 4627  * This routine is called before a device strategy routine.
 4628  * It is used to tell the VM system that paging I/O is in
 4629  * progress, and treat the pages associated with the buffer
 4630  * almost as being exclusive busy.  Also the object paging_in_progress
 4631  * flag is handled to make sure that the object doesn't become
 4632  * inconsistent.
 4633  *
 4634  * Since I/O has not been initiated yet, certain buffer flags
 4635  * such as BIO_ERROR or B_INVAL may be in an inconsistent state
 4636  * and should be ignored.
 4637  */
 4638 void
 4639 vfs_busy_pages(struct buf *bp, int clear_modify)
 4640 {
 4641         vm_object_t obj;
 4642         vm_ooffset_t foff;
 4643         vm_page_t m;
 4644         int i;
 4645         bool bogus;
 4646 
 4647         if (!(bp->b_flags & B_VMIO))
 4648                 return;
 4649 
 4650         obj = bp->b_bufobj->bo_object;
 4651         foff = bp->b_offset;
 4652         KASSERT(bp->b_offset != NOOFFSET,
 4653             ("vfs_busy_pages: no buffer offset"));
 4654         VM_OBJECT_WLOCK(obj);
 4655         vfs_drain_busy_pages(bp);
 4656         if (bp->b_bufsize != 0)
 4657                 vfs_setdirty_locked_object(bp);
 4658         bogus = false;
 4659         for (i = 0; i < bp->b_npages; i++) {
 4660                 m = bp->b_pages[i];
 4661 
 4662                 if ((bp->b_flags & B_CLUSTER) == 0) {
 4663                         vm_object_pip_add(obj, 1);
 4664                         vm_page_sbusy(m);
 4665                 }
 4666                 /*
 4667                  * When readying a buffer for a read ( i.e
 4668                  * clear_modify == 0 ), it is important to do
 4669                  * bogus_page replacement for valid pages in 
 4670                  * partially instantiated buffers.  Partially 
 4671                  * instantiated buffers can, in turn, occur when
 4672                  * reconstituting a buffer from its VM backing store
 4673                  * base.  We only have to do this if B_CACHE is
 4674                  * clear ( which causes the I/O to occur in the
 4675                  * first place ).  The replacement prevents the read
 4676                  * I/O from overwriting potentially dirty VM-backed
 4677                  * pages.  XXX bogus page replacement is, uh, bogus.
 4678                  * It may not work properly with small-block devices.
 4679                  * We need to find a better way.
 4680                  */
 4681                 if (clear_modify) {
 4682                         pmap_remove_write(m);
 4683                         vfs_page_set_validclean(bp, foff, m);
 4684                 } else if (m->valid == VM_PAGE_BITS_ALL &&
 4685                     (bp->b_flags & B_CACHE) == 0) {
 4686                         bp->b_pages[i] = bogus_page;
 4687                         bogus = true;
 4688                 }
 4689                 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 4690         }
 4691         VM_OBJECT_WUNLOCK(obj);
 4692         if (bogus && buf_mapped(bp)) {
 4693                 BUF_CHECK_MAPPED(bp);
 4694                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 4695                     bp->b_pages, bp->b_npages);
 4696         }
 4697 }
 4698 
 4699 /*
 4700  *      vfs_bio_set_valid:
 4701  *
 4702  *      Set the range within the buffer to valid.  The range is
 4703  *      relative to the beginning of the buffer, b_offset.  Note that
 4704  *      b_offset itself may be offset from the beginning of the first
 4705  *      page.
 4706  */
 4707 void   
 4708 vfs_bio_set_valid(struct buf *bp, int base, int size)
 4709 {
 4710         int i, n;
 4711         vm_page_t m;
 4712 
 4713         if (!(bp->b_flags & B_VMIO))
 4714                 return;
 4715 
 4716         /*
 4717          * Fixup base to be relative to beginning of first page.
 4718          * Set initial n to be the maximum number of bytes in the
 4719          * first page that can be validated.
 4720          */
 4721         base += (bp->b_offset & PAGE_MASK);
 4722         n = PAGE_SIZE - (base & PAGE_MASK);
 4723 
 4724         VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
 4725         for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
 4726                 m = bp->b_pages[i];
 4727                 if (n > size)
 4728                         n = size;
 4729                 vm_page_set_valid_range(m, base & PAGE_MASK, n);
 4730                 base += n;
 4731                 size -= n;
 4732                 n = PAGE_SIZE;
 4733         }
 4734         VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
 4735 }
 4736 
 4737 /*
 4738  *      vfs_bio_clrbuf:
 4739  *
 4740  *      If the specified buffer is a non-VMIO buffer, clear the entire
 4741  *      buffer.  If the specified buffer is a VMIO buffer, clear and
 4742  *      validate only the previously invalid portions of the buffer.
 4743  *      This routine essentially fakes an I/O, so we need to clear
 4744  *      BIO_ERROR and B_INVAL.
 4745  *
 4746  *      Note that while we only theoretically need to clear through b_bcount,
 4747  *      we go ahead and clear through b_bufsize.
 4748  */
 4749 void
 4750 vfs_bio_clrbuf(struct buf *bp) 
 4751 {
 4752         int i, j, mask, sa, ea, slide;
 4753 
 4754         if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
 4755                 clrbuf(bp);
 4756                 return;
 4757         }
 4758         bp->b_flags &= ~B_INVAL;
 4759         bp->b_ioflags &= ~BIO_ERROR;
 4760         VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
 4761         if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
 4762             (bp->b_offset & PAGE_MASK) == 0) {
 4763                 if (bp->b_pages[0] == bogus_page)
 4764                         goto unlock;
 4765                 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
 4766                 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
 4767                 if ((bp->b_pages[0]->valid & mask) == mask)
 4768                         goto unlock;
 4769                 if ((bp->b_pages[0]->valid & mask) == 0) {
 4770                         pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
 4771                         bp->b_pages[0]->valid |= mask;
 4772                         goto unlock;
 4773                 }
 4774         }
 4775         sa = bp->b_offset & PAGE_MASK;
 4776         slide = 0;
 4777         for (i = 0; i < bp->b_npages; i++, sa = 0) {
 4778                 slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
 4779                 ea = slide & PAGE_MASK;
 4780                 if (ea == 0)
 4781                         ea = PAGE_SIZE;
 4782                 if (bp->b_pages[i] == bogus_page)
 4783                         continue;
 4784                 j = sa / DEV_BSIZE;
 4785                 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
 4786                 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
 4787                 if ((bp->b_pages[i]->valid & mask) == mask)
 4788                         continue;
 4789                 if ((bp->b_pages[i]->valid & mask) == 0)
 4790                         pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
 4791                 else {
 4792                         for (; sa < ea; sa += DEV_BSIZE, j++) {
 4793                                 if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
 4794                                         pmap_zero_page_area(bp->b_pages[i],
 4795                                             sa, DEV_BSIZE);
 4796                                 }
 4797                         }
 4798                 }
 4799                 bp->b_pages[i]->valid |= mask;
 4800         }
 4801 unlock:
 4802         VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
 4803         bp->b_resid = 0;
 4804 }
 4805 
 4806 void
 4807 vfs_bio_bzero_buf(struct buf *bp, int base, int size)
 4808 {
 4809         vm_page_t m;
 4810         int i, n;
 4811 
 4812         if (buf_mapped(bp)) {
 4813                 BUF_CHECK_MAPPED(bp);
 4814                 bzero(bp->b_data + base, size);
 4815         } else {
 4816                 BUF_CHECK_UNMAPPED(bp);
 4817                 n = PAGE_SIZE - (base & PAGE_MASK);
 4818                 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
 4819                         m = bp->b_pages[i];
 4820                         if (n > size)
 4821                                 n = size;
 4822                         pmap_zero_page_area(m, base & PAGE_MASK, n);
 4823                         base += n;
 4824                         size -= n;
 4825                         n = PAGE_SIZE;
 4826                 }
 4827         }
 4828 }
 4829 
 4830 /*
 4831  * Update buffer flags based on I/O request parameters, optionally releasing the
 4832  * buffer.  If it's VMIO or direct I/O, the buffer pages are released to the VM,
 4833  * where they may be placed on a page queue (VMIO) or freed immediately (direct
 4834  * I/O).  Otherwise the buffer is released to the cache.
 4835  */
 4836 static void
 4837 b_io_dismiss(struct buf *bp, int ioflag, bool release)
 4838 {
 4839 
 4840         KASSERT((ioflag & IO_NOREUSE) == 0 || (ioflag & IO_VMIO) != 0,
 4841             ("buf %p non-VMIO noreuse", bp));
 4842 
 4843         if ((ioflag & IO_DIRECT) != 0)
 4844                 bp->b_flags |= B_DIRECT;
 4845         if ((ioflag & IO_EXT) != 0)
 4846                 bp->b_xflags |= BX_ALTDATA;
 4847         if ((ioflag & (IO_VMIO | IO_DIRECT)) != 0 && LIST_EMPTY(&bp->b_dep)) {
 4848                 bp->b_flags |= B_RELBUF;
 4849                 if ((ioflag & IO_NOREUSE) != 0)
 4850                         bp->b_flags |= B_NOREUSE;
 4851                 if (release)
 4852                         brelse(bp);
 4853         } else if (release)
 4854                 bqrelse(bp);
 4855 }
 4856 
 4857 void
 4858 vfs_bio_brelse(struct buf *bp, int ioflag)
 4859 {
 4860 
 4861         b_io_dismiss(bp, ioflag, true);
 4862 }
 4863 
 4864 void
 4865 vfs_bio_set_flags(struct buf *bp, int ioflag)
 4866 {
 4867 
 4868         b_io_dismiss(bp, ioflag, false);
 4869 }
 4870 
 4871 /*
 4872  * vm_hold_load_pages and vm_hold_free_pages get pages into
 4873  * a buffers address space.  The pages are anonymous and are
 4874  * not associated with a file object.
 4875  */
 4876 static void
 4877 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
 4878 {
 4879         vm_offset_t pg;
 4880         vm_page_t p;
 4881         int index;
 4882 
 4883         BUF_CHECK_MAPPED(bp);
 4884 
 4885         to = round_page(to);
 4886         from = round_page(from);
 4887         index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 4888 
 4889         for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
 4890                 /*
 4891                  * note: must allocate system pages since blocking here
 4892                  * could interfere with paging I/O, no matter which
 4893                  * process we are.
 4894                  */
 4895                 p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
 4896                     VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) |
 4897                     VM_ALLOC_WAITOK);
 4898                 pmap_qenter(pg, &p, 1);
 4899                 bp->b_pages[index] = p;
 4900         }
 4901         bp->b_npages = index;
 4902 }
 4903 
 4904 /* Return pages associated with this buf to the vm system */
 4905 static void
 4906 vm_hold_free_pages(struct buf *bp, int newbsize)
 4907 {
 4908         vm_offset_t from;
 4909         vm_page_t p;
 4910         int index, newnpages;
 4911 
 4912         BUF_CHECK_MAPPED(bp);
 4913 
 4914         from = round_page((vm_offset_t)bp->b_data + newbsize);
 4915         newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 4916         if (bp->b_npages > newnpages)
 4917                 pmap_qremove(from, bp->b_npages - newnpages);
 4918         for (index = newnpages; index < bp->b_npages; index++) {
 4919                 p = bp->b_pages[index];
 4920                 bp->b_pages[index] = NULL;
 4921                 p->wire_count--;
 4922                 vm_page_free(p);
 4923         }
 4924         vm_wire_sub(bp->b_npages - newnpages);
 4925         bp->b_npages = newnpages;
 4926 }
 4927 
 4928 /*
 4929  * Map an IO request into kernel virtual address space.
 4930  *
 4931  * All requests are (re)mapped into kernel VA space.
 4932  * Notice that we use b_bufsize for the size of the buffer
 4933  * to be mapped.  b_bcount might be modified by the driver.
 4934  *
 4935  * Note that even if the caller determines that the address space should
 4936  * be valid, a race or a smaller-file mapped into a larger space may
 4937  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
 4938  * check the return value.
 4939  *
 4940  * This function only works with pager buffers.
 4941  */
 4942 int
 4943 vmapbuf(struct buf *bp, int mapbuf)
 4944 {
 4945         vm_prot_t prot;
 4946         int pidx;
 4947 
 4948         if (bp->b_bufsize < 0)
 4949                 return (-1);
 4950         prot = VM_PROT_READ;
 4951         if (bp->b_iocmd == BIO_READ)
 4952                 prot |= VM_PROT_WRITE;  /* Less backwards than it looks */
 4953         if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
 4954             (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
 4955             btoc(MAXPHYS))) < 0)
 4956                 return (-1);
 4957         bp->b_npages = pidx;
 4958         bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
 4959         if (mapbuf || !unmapped_buf_allowed) {
 4960                 pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
 4961                 bp->b_data = bp->b_kvabase + bp->b_offset;
 4962         } else
 4963                 bp->b_data = unmapped_buf;
 4964         return(0);
 4965 }
 4966 
 4967 /*
 4968  * Free the io map PTEs associated with this IO operation.
 4969  * We also invalidate the TLB entries and restore the original b_addr.
 4970  *
 4971  * This function only works with pager buffers.
 4972  */
 4973 void
 4974 vunmapbuf(struct buf *bp)
 4975 {
 4976         int npages;
 4977 
 4978         npages = bp->b_npages;
 4979         if (buf_mapped(bp))
 4980                 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
 4981         vm_page_unhold_pages(bp->b_pages, npages);
 4982 
 4983         bp->b_data = unmapped_buf;
 4984 }
 4985 
 4986 void
 4987 bdone(struct buf *bp)
 4988 {
 4989         struct mtx *mtxp;
 4990 
 4991         mtxp = mtx_pool_find(mtxpool_sleep, bp);
 4992         mtx_lock(mtxp);
 4993         bp->b_flags |= B_DONE;
 4994         wakeup(bp);
 4995         mtx_unlock(mtxp);
 4996 }
 4997 
 4998 void
 4999 bwait(struct buf *bp, u_char pri, const char *wchan)
 5000 {
 5001         struct mtx *mtxp;
 5002 
 5003         mtxp = mtx_pool_find(mtxpool_sleep, bp);
 5004         mtx_lock(mtxp);
 5005         while ((bp->b_flags & B_DONE) == 0)
 5006                 msleep(bp, mtxp, pri, wchan, 0);
 5007         mtx_unlock(mtxp);
 5008 }
 5009 
 5010 int
 5011 bufsync(struct bufobj *bo, int waitfor)
 5012 {
 5013 
 5014         return (VOP_FSYNC(bo2vnode(bo), waitfor, curthread));
 5015 }
 5016 
 5017 void
 5018 bufstrategy(struct bufobj *bo, struct buf *bp)
 5019 {
 5020         int i __unused;
 5021         struct vnode *vp;
 5022 
 5023         vp = bp->b_vp;
 5024         KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
 5025         KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
 5026             ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
 5027         i = VOP_STRATEGY(vp, bp);
 5028         KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
 5029 }
 5030 
 5031 /*
 5032  * Initialize a struct bufobj before use.  Memory is assumed zero filled.
 5033  */
 5034 void
 5035 bufobj_init(struct bufobj *bo, void *private)
 5036 {
 5037         static volatile int bufobj_cleanq;
 5038 
 5039         bo->bo_domain =
 5040             atomic_fetchadd_int(&bufobj_cleanq, 1) % buf_domains;
 5041         rw_init(BO_LOCKPTR(bo), "bufobj interlock");
 5042         bo->bo_private = private;
 5043         TAILQ_INIT(&bo->bo_clean.bv_hd);
 5044         TAILQ_INIT(&bo->bo_dirty.bv_hd);
 5045 }
 5046 
 5047 void
 5048 bufobj_wrefl(struct bufobj *bo)
 5049 {
 5050 
 5051         KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
 5052         ASSERT_BO_WLOCKED(bo);
 5053         bo->bo_numoutput++;
 5054 }
 5055 
 5056 void
 5057 bufobj_wref(struct bufobj *bo)
 5058 {
 5059 
 5060         KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
 5061         BO_LOCK(bo);
 5062         bo->bo_numoutput++;
 5063         BO_UNLOCK(bo);
 5064 }
 5065 
 5066 void
 5067 bufobj_wdrop(struct bufobj *bo)
 5068 {
 5069 
 5070         KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
 5071         BO_LOCK(bo);
 5072         KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
 5073         if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
 5074                 bo->bo_flag &= ~BO_WWAIT;
 5075                 wakeup(&bo->bo_numoutput);
 5076         }
 5077         BO_UNLOCK(bo);
 5078 }
 5079 
 5080 int
 5081 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
 5082 {
 5083         int error;
 5084 
 5085         KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
 5086         ASSERT_BO_WLOCKED(bo);
 5087         error = 0;
 5088         while (bo->bo_numoutput) {
 5089                 bo->bo_flag |= BO_WWAIT;
 5090                 error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
 5091                     slpflag | (PRIBIO + 1), "bo_wwait", timeo);
 5092                 if (error)
 5093                         break;
 5094         }
 5095         return (error);
 5096 }
 5097 
 5098 /*
 5099  * Set bio_data or bio_ma for struct bio from the struct buf.
 5100  */
 5101 void
 5102 bdata2bio(struct buf *bp, struct bio *bip)
 5103 {
 5104 
 5105         if (!buf_mapped(bp)) {
 5106                 KASSERT(unmapped_buf_allowed, ("unmapped"));
 5107                 bip->bio_ma = bp->b_pages;
 5108                 bip->bio_ma_n = bp->b_npages;
 5109                 bip->bio_data = unmapped_buf;
 5110                 bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
 5111                 bip->bio_flags |= BIO_UNMAPPED;
 5112                 KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
 5113                     PAGE_SIZE == bp->b_npages,
 5114                     ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
 5115                     (long long)bip->bio_length, bip->bio_ma_n));
 5116         } else {
 5117                 bip->bio_data = bp->b_data;
 5118                 bip->bio_ma = NULL;
 5119         }
 5120 }
 5121 
 5122 /*
 5123  * The MIPS pmap code currently doesn't handle aliased pages.
 5124  * The VIPT caches may not handle page aliasing themselves, leading
 5125  * to data corruption.
 5126  *
 5127  * As such, this code makes a system extremely unhappy if said
 5128  * system doesn't support unaliasing the above situation in hardware.
 5129  * Some "recent" systems (eg some mips24k/mips74k cores) don't enable
 5130  * this feature at build time, so it has to be handled in software.
 5131  *
 5132  * Once the MIPS pmap/cache code grows to support this function on
 5133  * earlier chips, it should be flipped back off.
 5134  */
 5135 #ifdef  __mips__
 5136 static int buf_pager_relbuf = 1;
 5137 #else
 5138 static int buf_pager_relbuf = 0;
 5139 #endif
 5140 SYSCTL_INT(_vfs, OID_AUTO, buf_pager_relbuf, CTLFLAG_RWTUN,
 5141     &buf_pager_relbuf, 0,
 5142     "Make buffer pager release buffers after reading");
 5143 
 5144 /*
 5145  * The buffer pager.  It uses buffer reads to validate pages.
 5146  *
 5147  * In contrast to the generic local pager from vm/vnode_pager.c, this
 5148  * pager correctly and easily handles volumes where the underlying
 5149  * device block size is greater than the machine page size.  The
 5150  * buffer cache transparently extends the requested page run to be
 5151  * aligned at the block boundary, and does the necessary bogus page
 5152  * replacements in the addends to avoid obliterating already valid
 5153  * pages.
 5154  *
 5155  * The only non-trivial issue is that the exclusive busy state for
 5156  * pages, which is assumed by the vm_pager_getpages() interface, is
 5157  * incompatible with the VMIO buffer cache's desire to share-busy the
 5158  * pages.  This function performs a trivial downgrade of the pages'
 5159  * state before reading buffers, and a less trivial upgrade from the
 5160  * shared-busy to excl-busy state after the read.
 5161  */
 5162 int
 5163 vfs_bio_getpages(struct vnode *vp, vm_page_t *ma, int count,
 5164     int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
 5165     vbg_get_blksize_t get_blksize)
 5166 {
 5167         vm_page_t m;
 5168         vm_object_t object;
 5169         struct buf *bp;
 5170         struct mount *mp;
 5171         daddr_t lbn, lbnp;
 5172         vm_ooffset_t la, lb, poff, poffe;
 5173         long bsize;
 5174         int bo_bs, br_flags, error, i, pgsin, pgsin_a, pgsin_b;
 5175         bool redo, lpart;
 5176 
 5177         object = vp->v_object;
 5178         mp = vp->v_mount;
 5179         error = 0;
 5180         la = IDX_TO_OFF(ma[count - 1]->pindex);
 5181         if (la >= object->un_pager.vnp.vnp_size)
 5182                 return (VM_PAGER_BAD);
 5183 
 5184         /*
 5185          * Change the meaning of la from where the last requested page starts
 5186          * to where it ends, because that's the end of the requested region
 5187          * and the start of the potential read-ahead region.
 5188          */
 5189         la += PAGE_SIZE;
 5190         lpart = la > object->un_pager.vnp.vnp_size;
 5191         bo_bs = get_blksize(vp, get_lblkno(vp, IDX_TO_OFF(ma[0]->pindex)));
 5192 
 5193         /*
 5194          * Calculate read-ahead, behind and total pages.
 5195          */
 5196         pgsin = count;
 5197         lb = IDX_TO_OFF(ma[0]->pindex);
 5198         pgsin_b = OFF_TO_IDX(lb - rounddown2(lb, bo_bs));
 5199         pgsin += pgsin_b;
 5200         if (rbehind != NULL)
 5201                 *rbehind = pgsin_b;
 5202         pgsin_a = OFF_TO_IDX(roundup2(la, bo_bs) - la);
 5203         if (la + IDX_TO_OFF(pgsin_a) >= object->un_pager.vnp.vnp_size)
 5204                 pgsin_a = OFF_TO_IDX(roundup2(object->un_pager.vnp.vnp_size,
 5205                     PAGE_SIZE) - la);
 5206         pgsin += pgsin_a;
 5207         if (rahead != NULL)
 5208                 *rahead = pgsin_a;
 5209         VM_CNT_INC(v_vnodein);
 5210         VM_CNT_ADD(v_vnodepgsin, pgsin);
 5211 
 5212         br_flags = (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS)
 5213             != 0) ? GB_UNMAPPED : 0;
 5214         VM_OBJECT_WLOCK(object);
 5215 again:
 5216         for (i = 0; i < count; i++)
 5217                 vm_page_busy_downgrade(ma[i]);
 5218         VM_OBJECT_WUNLOCK(object);
 5219 
 5220         lbnp = -1;
 5221         for (i = 0; i < count; i++) {
 5222                 m = ma[i];
 5223 
 5224                 /*
 5225                  * Pages are shared busy and the object lock is not
 5226                  * owned, which together allow for the pages'
 5227                  * invalidation.  The racy test for validity avoids
 5228                  * useless creation of the buffer for the most typical
 5229                  * case when invalidation is not used in redo or for
 5230                  * parallel read.  The shared->excl upgrade loop at
 5231                  * the end of the function catches the race in a
 5232                  * reliable way (protected by the object lock).
 5233                  */
 5234                 if (m->valid == VM_PAGE_BITS_ALL)
 5235                         continue;
 5236 
 5237                 poff = IDX_TO_OFF(m->pindex);
 5238                 poffe = MIN(poff + PAGE_SIZE, object->un_pager.vnp.vnp_size);
 5239                 for (; poff < poffe; poff += bsize) {
 5240                         lbn = get_lblkno(vp, poff);
 5241                         if (lbn == lbnp)
 5242                                 goto next_page;
 5243                         lbnp = lbn;
 5244 
 5245                         bsize = get_blksize(vp, lbn);
 5246                         error = bread_gb(vp, lbn, bsize, curthread->td_ucred,
 5247                             br_flags, &bp);
 5248                         if (error != 0)
 5249                                 goto end_pages;
 5250                         if (LIST_EMPTY(&bp->b_dep)) {
 5251                                 /*
 5252                                  * Invalidation clears m->valid, but
 5253                                  * may leave B_CACHE flag if the
 5254                                  * buffer existed at the invalidation
 5255                                  * time.  In this case, recycle the
 5256                                  * buffer to do real read on next
 5257                                  * bread() after redo.
 5258                                  *
 5259                                  * Otherwise B_RELBUF is not strictly
 5260                                  * necessary, enable to reduce buf
 5261                                  * cache pressure.
 5262                                  */
 5263                                 if (buf_pager_relbuf ||
 5264                                     m->valid != VM_PAGE_BITS_ALL)
 5265                                         bp->b_flags |= B_RELBUF;
 5266 
 5267                                 bp->b_flags &= ~B_NOCACHE;
 5268                                 brelse(bp);
 5269                         } else {
 5270                                 bqrelse(bp);
 5271                         }
 5272                 }
 5273                 KASSERT(1 /* racy, enable for debugging */ ||
 5274                     m->valid == VM_PAGE_BITS_ALL || i == count - 1,
 5275                     ("buf %d %p invalid", i, m));
 5276                 if (i == count - 1 && lpart) {
 5277                         VM_OBJECT_WLOCK(object);
 5278                         if (m->valid != 0 &&
 5279                             m->valid != VM_PAGE_BITS_ALL)
 5280                                 vm_page_zero_invalid(m, TRUE);
 5281                         VM_OBJECT_WUNLOCK(object);
 5282                 }
 5283 next_page:;
 5284         }
 5285 end_pages:
 5286 
 5287         VM_OBJECT_WLOCK(object);
 5288         redo = false;
 5289         for (i = 0; i < count; i++) {
 5290                 vm_page_sunbusy(ma[i]);
 5291                 ma[i] = vm_page_grab(object, ma[i]->pindex, VM_ALLOC_NORMAL);
 5292 
 5293                 /*
 5294                  * Since the pages were only sbusy while neither the
 5295                  * buffer nor the object lock was held by us, or
 5296                  * reallocated while vm_page_grab() slept for busy
 5297                  * relinguish, they could have been invalidated.
 5298                  * Recheck the valid bits and re-read as needed.
 5299                  *
 5300                  * Note that the last page is made fully valid in the
 5301                  * read loop, and partial validity for the page at
 5302                  * index count - 1 could mean that the page was
 5303                  * invalidated or removed, so we must restart for
 5304                  * safety as well.
 5305                  */
 5306                 if (ma[i]->valid != VM_PAGE_BITS_ALL)
 5307                         redo = true;
 5308         }
 5309         if (redo && error == 0)
 5310                 goto again;
 5311         VM_OBJECT_WUNLOCK(object);
 5312         return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
 5313 }
 5314 
 5315 #include "opt_ddb.h"
 5316 #ifdef DDB
 5317 #include <ddb/ddb.h>
 5318 
 5319 /* DDB command to show buffer data */
 5320 DB_SHOW_COMMAND(buffer, db_show_buffer)
 5321 {
 5322         /* get args */
 5323         struct buf *bp = (struct buf *)addr;
 5324 #ifdef FULL_BUF_TRACKING
 5325         uint32_t i, j;
 5326 #endif
 5327 
 5328         if (!have_addr) {
 5329                 db_printf("usage: show buffer <addr>\n");
 5330                 return;
 5331         }
 5332 
 5333         db_printf("buf at %p\n", bp);
 5334         db_printf("b_flags = 0x%b, b_xflags=0x%b\n",
 5335             (u_int)bp->b_flags, PRINT_BUF_FLAGS,
 5336             (u_int)bp->b_xflags, PRINT_BUF_XFLAGS);
 5337         db_printf("b_vflags=0x%b b_ioflags0x%b\n",
 5338             (u_int)bp->b_vflags, PRINT_BUF_VFLAGS,
 5339             (u_int)bp->b_ioflags, PRINT_BIO_FLAGS);
 5340         db_printf(
 5341             "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
 5342             "b_bufobj = (%p), b_data = %p\n, b_blkno = %jd, b_lblkno = %jd, "
 5343             "b_vp = %p, b_dep = %p\n",
 5344             bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
 5345             bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
 5346             (intmax_t)bp->b_lblkno, bp->b_vp, bp->b_dep.lh_first);
 5347         db_printf("b_kvabase = %p, b_kvasize = %d\n",
 5348             bp->b_kvabase, bp->b_kvasize);
 5349         if (bp->b_npages) {
 5350                 int i;
 5351                 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
 5352                 for (i = 0; i < bp->b_npages; i++) {
 5353                         vm_page_t m;
 5354                         m = bp->b_pages[i];
 5355                         if (m != NULL)
 5356                                 db_printf("(%p, 0x%lx, 0x%lx)", m->object,
 5357                                     (u_long)m->pindex,
 5358                                     (u_long)VM_PAGE_TO_PHYS(m));
 5359                         else
 5360                                 db_printf("( ??? )");
 5361                         if ((i + 1) < bp->b_npages)
 5362                                 db_printf(",");
 5363                 }
 5364                 db_printf("\n");
 5365         }
 5366         BUF_LOCKPRINTINFO(bp);
 5367 #if defined(FULL_BUF_TRACKING)
 5368         db_printf("b_io_tracking: b_io_tcnt = %u\n", bp->b_io_tcnt);
 5369 
 5370         i = bp->b_io_tcnt % BUF_TRACKING_SIZE;
 5371         for (j = 1; j <= BUF_TRACKING_SIZE; j++) {
 5372                 if (bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)] == NULL)
 5373                         continue;
 5374                 db_printf(" %2u: %s\n", j,
 5375                     bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)]);
 5376         }
 5377 #elif defined(BUF_TRACKING)
 5378         db_printf("b_io_tracking: %s\n", bp->b_io_tracking);
 5379 #endif
 5380         db_printf(" ");
 5381 }
 5382 
 5383 DB_SHOW_COMMAND(bufqueues, bufqueues)
 5384 {
 5385         struct bufdomain *bd;
 5386         struct buf *bp;
 5387         long total;
 5388         int i, j, cnt;
 5389 
 5390         db_printf("bqempty: %d\n", bqempty.bq_len);
 5391 
 5392         for (i = 0; i < buf_domains; i++) {
 5393                 bd = &bdomain[i];
 5394                 db_printf("Buf domain %d\n", i);
 5395                 db_printf("\tfreebufs\t%d\n", bd->bd_freebuffers);
 5396                 db_printf("\tlofreebufs\t%d\n", bd->bd_lofreebuffers);
 5397                 db_printf("\thifreebufs\t%d\n", bd->bd_hifreebuffers);
 5398                 db_printf("\n");
 5399                 db_printf("\tbufspace\t%ld\n", bd->bd_bufspace);
 5400                 db_printf("\tmaxbufspace\t%ld\n", bd->bd_maxbufspace);
 5401                 db_printf("\thibufspace\t%ld\n", bd->bd_hibufspace);
 5402                 db_printf("\tlobufspace\t%ld\n", bd->bd_lobufspace);
 5403                 db_printf("\tbufspacethresh\t%ld\n", bd->bd_bufspacethresh);
 5404                 db_printf("\n");
 5405                 db_printf("\tnumdirtybuffers\t%d\n", bd->bd_numdirtybuffers);
 5406                 db_printf("\tlodirtybuffers\t%d\n", bd->bd_lodirtybuffers);
 5407                 db_printf("\thidirtybuffers\t%d\n", bd->bd_hidirtybuffers);
 5408                 db_printf("\tdirtybufthresh\t%d\n", bd->bd_dirtybufthresh);
 5409                 db_printf("\n");
 5410                 total = 0;
 5411                 TAILQ_FOREACH(bp, &bd->bd_cleanq->bq_queue, b_freelist)
 5412                         total += bp->b_bufsize;
 5413                 db_printf("\tcleanq count\t%d (%ld)\n",
 5414                     bd->bd_cleanq->bq_len, total);
 5415                 total = 0;
 5416                 TAILQ_FOREACH(bp, &bd->bd_dirtyq.bq_queue, b_freelist)
 5417                         total += bp->b_bufsize;
 5418                 db_printf("\tdirtyq count\t%d (%ld)\n",
 5419                     bd->bd_dirtyq.bq_len, total);
 5420                 db_printf("\twakeup\t\t%d\n", bd->bd_wanted);
 5421                 db_printf("\tlim\t\t%d\n", bd->bd_lim);
 5422                 db_printf("\tCPU ");
 5423                 for (j = 0; j <= mp_maxid; j++)
 5424                         db_printf("%d, ", bd->bd_subq[j].bq_len);
 5425                 db_printf("\n");
 5426                 cnt = 0;
 5427                 total = 0;
 5428                 for (j = 0; j < nbuf; j++)
 5429                         if (buf[j].b_domain == i && BUF_ISLOCKED(&buf[j])) {
 5430                                 cnt++;
 5431                                 total += buf[j].b_bufsize;
 5432                         }
 5433                 db_printf("\tLocked buffers: %d space %ld\n", cnt, total);
 5434                 cnt = 0;
 5435                 total = 0;
 5436                 for (j = 0; j < nbuf; j++)
 5437                         if (buf[j].b_domain == i) {
 5438                                 cnt++;
 5439                                 total += buf[j].b_bufsize;
 5440                         }
 5441                 db_printf("\tTotal buffers: %d space %ld\n", cnt, total);
 5442         }
 5443 }
 5444 
 5445 DB_SHOW_COMMAND(lockedbufs, lockedbufs)
 5446 {
 5447         struct buf *bp;
 5448         int i;
 5449 
 5450         for (i = 0; i < nbuf; i++) {
 5451                 bp = &buf[i];
 5452                 if (BUF_ISLOCKED(bp)) {
 5453                         db_show_buffer((uintptr_t)bp, 1, 0, NULL);
 5454                         db_printf("\n");
 5455                         if (db_pager_quit)
 5456                                 break;
 5457                 }
 5458         }
 5459 }
 5460 
 5461 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
 5462 {
 5463         struct vnode *vp;
 5464         struct buf *bp;
 5465 
 5466         if (!have_addr) {
 5467                 db_printf("usage: show vnodebufs <addr>\n");
 5468                 return;
 5469         }
 5470         vp = (struct vnode *)addr;
 5471         db_printf("Clean buffers:\n");
 5472         TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
 5473                 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
 5474                 db_printf("\n");
 5475         }
 5476         db_printf("Dirty buffers:\n");
 5477         TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
 5478                 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
 5479                 db_printf("\n");
 5480         }
 5481 }
 5482 
 5483 DB_COMMAND(countfreebufs, db_coundfreebufs)
 5484 {
 5485         struct buf *bp;
 5486         int i, used = 0, nfree = 0;
 5487 
 5488         if (have_addr) {
 5489                 db_printf("usage: countfreebufs\n");
 5490                 return;
 5491         }
 5492 
 5493         for (i = 0; i < nbuf; i++) {
 5494                 bp = &buf[i];
 5495                 if (bp->b_qindex == QUEUE_EMPTY)
 5496                         nfree++;
 5497                 else
 5498                         used++;
 5499         }
 5500 
 5501         db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
 5502             nfree + used);
 5503         db_printf("numfreebuffers is %d\n", numfreebuffers);
 5504 }
 5505 #endif /* DDB */

Cache object: 5916c49dfec5b838a83031b547dd01f8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.