FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_bio.c
1 /*-
2 * Copyright (c) 2004 Poul-Henning Kamp
3 * Copyright (c) 1994,1997 John S. Dyson
4 * Copyright (c) 2013 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Portions of this software were developed by Konstantin Belousov
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * this file contains a new buffer I/O scheme implementing a coherent
34 * VM object and buffer cache scheme. Pains have been taken to make
35 * sure that the performance degradation associated with schemes such
36 * as this is not realized.
37 *
38 * Author: John S. Dyson
39 * Significant help during the development and debugging phases
40 * had been provided by David Greenman, also of the FreeBSD core team.
41 *
42 * see man buf(9) for more info.
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/conf.h>
52 #include <sys/buf.h>
53 #include <sys/devicestat.h>
54 #include <sys/eventhandler.h>
55 #include <sys/fail.h>
56 #include <sys/limits.h>
57 #include <sys/lock.h>
58 #include <sys/malloc.h>
59 #include <sys/mount.h>
60 #include <sys/mutex.h>
61 #include <sys/kernel.h>
62 #include <sys/kthread.h>
63 #include <sys/proc.h>
64 #include <sys/resourcevar.h>
65 #include <sys/sysctl.h>
66 #include <sys/vmmeter.h>
67 #include <sys/vnode.h>
68 #include <geom/geom.h>
69 #include <vm/vm.h>
70 #include <vm/vm_param.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_map.h>
77 #include "opt_compat.h"
78 #include "opt_directio.h"
79 #include "opt_swap.h"
80
81 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
82
83 struct bio_ops bioops; /* I/O operation notification */
84
85 struct buf_ops buf_ops_bio = {
86 .bop_name = "buf_ops_bio",
87 .bop_write = bufwrite,
88 .bop_strategy = bufstrategy,
89 .bop_sync = bufsync,
90 .bop_bdflush = bufbdflush,
91 };
92
93 /*
94 * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
95 * carnal knowledge of buffers. This knowledge should be moved to vfs_bio.c.
96 */
97 struct buf *buf; /* buffer header pool */
98 caddr_t unmapped_buf;
99
100 static struct proc *bufdaemonproc;
101
102 static int inmem(struct vnode *vp, daddr_t blkno);
103 static void vm_hold_free_pages(struct buf *bp, int newbsize);
104 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
105 vm_offset_t to);
106 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
107 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
108 vm_page_t m);
109 static void vfs_drain_busy_pages(struct buf *bp);
110 static void vfs_clean_pages_dirty_buf(struct buf *bp);
111 static void vfs_setdirty_locked_object(struct buf *bp);
112 static void vfs_vmio_release(struct buf *bp);
113 static int vfs_bio_clcheck(struct vnode *vp, int size,
114 daddr_t lblkno, daddr_t blkno);
115 static int buf_do_flush(struct vnode *vp);
116 static int flushbufqueues(struct vnode *, int, int);
117 static void buf_daemon(void);
118 static void bremfreel(struct buf *bp);
119 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
120 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
121 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
122 #endif
123
124 int vmiodirenable = TRUE;
125 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
126 "Use the VM system for directory writes");
127 long runningbufspace;
128 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
129 "Amount of presently outstanding async buffer io");
130 static long bufspace;
131 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
132 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
133 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
134 &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
135 #else
136 SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
137 "Virtual memory used for buffers");
138 #endif
139 static long unmapped_bufspace;
140 SYSCTL_LONG(_vfs, OID_AUTO, unmapped_bufspace, CTLFLAG_RD,
141 &unmapped_bufspace, 0,
142 "Amount of unmapped buffers, inclusive in the bufspace");
143 static long maxbufspace;
144 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
145 "Maximum allowed value of bufspace (including buf_daemon)");
146 static long bufmallocspace;
147 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
148 "Amount of malloced memory for buffers");
149 static long maxbufmallocspace;
150 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
151 "Maximum amount of malloced memory for buffers");
152 static long lobufspace;
153 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
154 "Minimum amount of buffers we want to have");
155 long hibufspace;
156 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
157 "Maximum allowed value of bufspace (excluding buf_daemon)");
158 static int bufreusecnt;
159 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
160 "Number of times we have reused a buffer");
161 static int buffreekvacnt;
162 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
163 "Number of times we have freed the KVA space from some buffer");
164 static int bufdefragcnt;
165 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
166 "Number of times we have had to repeat buffer allocation to defragment");
167 static long lorunningspace;
168 SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
169 "Minimum preferred space used for in-progress I/O");
170 static long hirunningspace;
171 SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
172 "Maximum amount of space to use for in-progress I/O");
173 int dirtybufferflushes;
174 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
175 0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
176 int bdwriteskip;
177 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
178 0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
179 int altbufferflushes;
180 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
181 0, "Number of fsync flushes to limit dirty buffers");
182 static int recursiveflushes;
183 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
184 0, "Number of flushes skipped due to being recursive");
185 static int numdirtybuffers;
186 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
187 "Number of buffers that are dirty (has unwritten changes) at the moment");
188 static int lodirtybuffers;
189 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
190 "How many buffers we want to have free before bufdaemon can sleep");
191 static int hidirtybuffers;
192 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
193 "When the number of dirty buffers is considered severe");
194 int dirtybufthresh;
195 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
196 0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
197 static int numfreebuffers;
198 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
199 "Number of free buffers");
200 static int lofreebuffers;
201 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
202 "XXX Unused");
203 static int hifreebuffers;
204 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
205 "XXX Complicatedly unused");
206 static int getnewbufcalls;
207 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
208 "Number of calls to getnewbuf");
209 static int getnewbufrestarts;
210 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
211 "Number of times getnewbuf has had to restart a buffer aquisition");
212 static int mappingrestarts;
213 SYSCTL_INT(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RW, &mappingrestarts, 0,
214 "Number of times getblk has had to restart a buffer mapping for "
215 "unmapped buffer");
216 static int flushbufqtarget = 100;
217 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
218 "Amount of work to do in flushbufqueues when helping bufdaemon");
219 static long notbufdflashes;
220 SYSCTL_LONG(_vfs, OID_AUTO, notbufdflashes, CTLFLAG_RD, ¬bufdflashes, 0,
221 "Number of dirty buffer flushes done by the bufdaemon helpers");
222 static long barrierwrites;
223 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0,
224 "Number of barrier writes");
225 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
226 &unmapped_buf_allowed, 0,
227 "Permit the use of the unmapped i/o");
228
229 /*
230 * Wakeup point for bufdaemon, as well as indicator of whether it is already
231 * active. Set to 1 when the bufdaemon is already "on" the queue, 0 when it
232 * is idling.
233 */
234 static int bd_request;
235
236 /*
237 * Request for the buf daemon to write more buffers than is indicated by
238 * lodirtybuf. This may be necessary to push out excess dependencies or
239 * defragment the address space where a simple count of the number of dirty
240 * buffers is insufficient to characterize the demand for flushing them.
241 */
242 static int bd_speedupreq;
243
244 /*
245 * This lock synchronizes access to bd_request.
246 */
247 static struct mtx bdlock;
248
249 /*
250 * bogus page -- for I/O to/from partially complete buffers
251 * this is a temporary solution to the problem, but it is not
252 * really that bad. it would be better to split the buffer
253 * for input in the case of buffers partially already in memory,
254 * but the code is intricate enough already.
255 */
256 vm_page_t bogus_page;
257
258 /*
259 * Synchronization (sleep/wakeup) variable for active buffer space requests.
260 * Set when wait starts, cleared prior to wakeup().
261 * Used in runningbufwakeup() and waitrunningbufspace().
262 */
263 static int runningbufreq;
264
265 /*
266 * This lock protects the runningbufreq and synchronizes runningbufwakeup and
267 * waitrunningbufspace().
268 */
269 static struct mtx rbreqlock;
270
271 /*
272 * Synchronization (sleep/wakeup) variable for buffer requests.
273 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
274 * by and/or.
275 * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
276 * getnewbuf(), and getblk().
277 */
278 static int needsbuffer;
279
280 /*
281 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
282 */
283 static struct mtx nblock;
284
285 /*
286 * Definitions for the buffer free lists.
287 */
288 #define BUFFER_QUEUES 6 /* number of free buffer queues */
289
290 #define QUEUE_NONE 0 /* on no queue */
291 #define QUEUE_CLEAN 1 /* non-B_DELWRI buffers */
292 #define QUEUE_DIRTY 2 /* B_DELWRI buffers */
293 #define QUEUE_DIRTY_GIANT 3 /* B_DELWRI buffers that need giant */
294 #define QUEUE_EMPTYKVA 4 /* empty buffer headers w/KVA assignment */
295 #define QUEUE_EMPTY 5 /* empty buffer headers */
296 #define QUEUE_SENTINEL 1024 /* not an queue index, but mark for sentinel */
297
298 /* Queues for free buffers with various properties */
299 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
300 #ifdef INVARIANTS
301 static int bq_len[BUFFER_QUEUES];
302 #endif
303
304 /* Lock for the bufqueues */
305 static struct mtx bqlock;
306
307 /*
308 * Single global constant for BUF_WMESG, to avoid getting multiple references.
309 * buf_wmesg is referred from macros.
310 */
311 const char *buf_wmesg = BUF_WMESG;
312
313 #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */
314 #define VFS_BIO_NEED_DIRTYFLUSH 0x02 /* waiting for dirty buffer flush */
315 #define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */
316 #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */
317
318 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
319 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
320 static int
321 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
322 {
323 long lvalue;
324 int ivalue;
325
326 if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
327 return (sysctl_handle_long(oidp, arg1, arg2, req));
328 lvalue = *(long *)arg1;
329 if (lvalue > INT_MAX)
330 /* On overflow, still write out a long to trigger ENOMEM. */
331 return (sysctl_handle_long(oidp, &lvalue, 0, req));
332 ivalue = lvalue;
333 return (sysctl_handle_int(oidp, &ivalue, 0, req));
334 }
335 #endif
336
337 #ifdef DIRECTIO
338 extern void ffs_rawread_setup(void);
339 #endif /* DIRECTIO */
340 /*
341 * numdirtywakeup:
342 *
343 * If someone is blocked due to there being too many dirty buffers,
344 * and numdirtybuffers is now reasonable, wake them up.
345 */
346
347 static __inline void
348 numdirtywakeup(int level)
349 {
350
351 if (numdirtybuffers <= level) {
352 mtx_lock(&nblock);
353 if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
354 needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
355 wakeup(&needsbuffer);
356 }
357 mtx_unlock(&nblock);
358 }
359 }
360
361 /*
362 * bufspacewakeup:
363 *
364 * Called when buffer space is potentially available for recovery.
365 * getnewbuf() will block on this flag when it is unable to free
366 * sufficient buffer space. Buffer space becomes recoverable when
367 * bp's get placed back in the queues.
368 */
369
370 static __inline void
371 bufspacewakeup(void)
372 {
373
374 /*
375 * If someone is waiting for BUF space, wake them up. Even
376 * though we haven't freed the kva space yet, the waiting
377 * process will be able to now.
378 */
379 mtx_lock(&nblock);
380 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
381 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
382 wakeup(&needsbuffer);
383 }
384 mtx_unlock(&nblock);
385 }
386
387 /*
388 * runningbufwakeup() - in-progress I/O accounting.
389 *
390 */
391 void
392 runningbufwakeup(struct buf *bp)
393 {
394
395 if (bp->b_runningbufspace) {
396 atomic_subtract_long(&runningbufspace, bp->b_runningbufspace);
397 bp->b_runningbufspace = 0;
398 mtx_lock(&rbreqlock);
399 if (runningbufreq && runningbufspace <= lorunningspace) {
400 runningbufreq = 0;
401 wakeup(&runningbufreq);
402 }
403 mtx_unlock(&rbreqlock);
404 }
405 }
406
407 /*
408 * bufcountwakeup:
409 *
410 * Called when a buffer has been added to one of the free queues to
411 * account for the buffer and to wakeup anyone waiting for free buffers.
412 * This typically occurs when large amounts of metadata are being handled
413 * by the buffer cache ( else buffer space runs out first, usually ).
414 */
415
416 static __inline void
417 bufcountwakeup(struct buf *bp)
418 {
419 int old;
420
421 KASSERT((bp->b_vflags & BV_INFREECNT) == 0,
422 ("buf %p already counted as free", bp));
423 if (bp->b_bufobj != NULL)
424 mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
425 bp->b_vflags |= BV_INFREECNT;
426 old = atomic_fetchadd_int(&numfreebuffers, 1);
427 KASSERT(old >= 0 && old < nbuf,
428 ("numfreebuffers climbed to %d", old + 1));
429 mtx_lock(&nblock);
430 if (needsbuffer) {
431 needsbuffer &= ~VFS_BIO_NEED_ANY;
432 if (numfreebuffers >= hifreebuffers)
433 needsbuffer &= ~VFS_BIO_NEED_FREE;
434 wakeup(&needsbuffer);
435 }
436 mtx_unlock(&nblock);
437 }
438
439 /*
440 * waitrunningbufspace()
441 *
442 * runningbufspace is a measure of the amount of I/O currently
443 * running. This routine is used in async-write situations to
444 * prevent creating huge backups of pending writes to a device.
445 * Only asynchronous writes are governed by this function.
446 *
447 * Reads will adjust runningbufspace, but will not block based on it.
448 * The read load has a side effect of reducing the allowed write load.
449 *
450 * This does NOT turn an async write into a sync write. It waits
451 * for earlier writes to complete and generally returns before the
452 * caller's write has reached the device.
453 */
454 void
455 waitrunningbufspace(void)
456 {
457
458 mtx_lock(&rbreqlock);
459 while (runningbufspace > hirunningspace) {
460 ++runningbufreq;
461 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
462 }
463 mtx_unlock(&rbreqlock);
464 }
465
466
467 /*
468 * vfs_buf_test_cache:
469 *
470 * Called when a buffer is extended. This function clears the B_CACHE
471 * bit if the newly extended portion of the buffer does not contain
472 * valid data.
473 */
474 static __inline
475 void
476 vfs_buf_test_cache(struct buf *bp,
477 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
478 vm_page_t m)
479 {
480
481 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
482 if (bp->b_flags & B_CACHE) {
483 int base = (foff + off) & PAGE_MASK;
484 if (vm_page_is_valid(m, base, size) == 0)
485 bp->b_flags &= ~B_CACHE;
486 }
487 }
488
489 /* Wake up the buffer daemon if necessary */
490 static __inline
491 void
492 bd_wakeup(int dirtybuflevel)
493 {
494
495 mtx_lock(&bdlock);
496 if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
497 bd_request = 1;
498 wakeup(&bd_request);
499 }
500 mtx_unlock(&bdlock);
501 }
502
503 /*
504 * bd_speedup - speedup the buffer cache flushing code
505 */
506
507 void
508 bd_speedup(void)
509 {
510 int needwake;
511
512 mtx_lock(&bdlock);
513 needwake = 0;
514 if (bd_speedupreq == 0 || bd_request == 0)
515 needwake = 1;
516 bd_speedupreq = 1;
517 bd_request = 1;
518 if (needwake)
519 wakeup(&bd_request);
520 mtx_unlock(&bdlock);
521 }
522
523 #ifndef NSWBUF_MIN
524 #define NSWBUF_MIN 16
525 #endif
526
527 #ifdef __i386__
528 #define TRANSIENT_DENOM 5
529 #else
530 #define TRANSIENT_DENOM 10
531 #endif
532
533 /*
534 * Calculating buffer cache scaling values and reserve space for buffer
535 * headers. This is called during low level kernel initialization and
536 * may be called more then once. We CANNOT write to the memory area
537 * being reserved at this time.
538 */
539 caddr_t
540 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
541 {
542 int tuned_nbuf;
543 long maxbuf, maxbuf_sz, buf_sz, biotmap_sz;
544
545 /*
546 * physmem_est is in pages. Convert it to kilobytes (assumes
547 * PAGE_SIZE is >= 1K)
548 */
549 physmem_est = physmem_est * (PAGE_SIZE / 1024);
550
551 /*
552 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
553 * For the first 64MB of ram nominally allocate sufficient buffers to
554 * cover 1/4 of our ram. Beyond the first 64MB allocate additional
555 * buffers to cover 1/10 of our ram over 64MB. When auto-sizing
556 * the buffer cache we limit the eventual kva reservation to
557 * maxbcache bytes.
558 *
559 * factor represents the 1/4 x ram conversion.
560 */
561 if (nbuf == 0) {
562 int factor = 4 * BKVASIZE / 1024;
563
564 nbuf = 50;
565 if (physmem_est > 4096)
566 nbuf += min((physmem_est - 4096) / factor,
567 65536 / factor);
568 if (physmem_est > 65536)
569 nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
570 32 * 1024 * 1024 / (factor * 5));
571
572 if (maxbcache && nbuf > maxbcache / BKVASIZE)
573 nbuf = maxbcache / BKVASIZE;
574 tuned_nbuf = 1;
575 } else
576 tuned_nbuf = 0;
577
578 /* XXX Avoid unsigned long overflows later on with maxbufspace. */
579 maxbuf = (LONG_MAX / 3) / BKVASIZE;
580 if (nbuf > maxbuf) {
581 if (!tuned_nbuf)
582 printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
583 maxbuf);
584 nbuf = maxbuf;
585 }
586
587 /*
588 * Ideal allocation size for the transient bio submap if 10%
589 * of the maximal space buffer map. This roughly corresponds
590 * to the amount of the buffer mapped for typical UFS load.
591 *
592 * Clip the buffer map to reserve space for the transient
593 * BIOs, if its extent is bigger than 90% (80% on i386) of the
594 * maximum buffer map extent on the platform.
595 *
596 * The fall-back to the maxbuf in case of maxbcache unset,
597 * allows to not trim the buffer KVA for the architectures
598 * with ample KVA space.
599 */
600 if (bio_transient_maxcnt == 0) {
601 maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
602 buf_sz = (long)nbuf * BKVASIZE;
603 if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
604 (TRANSIENT_DENOM - 1)) {
605 /*
606 * There is more KVA than memory. Do not
607 * adjust buffer map size, and assign the rest
608 * of maxbuf to transient map.
609 */
610 biotmap_sz = maxbuf_sz - buf_sz;
611 } else {
612 /*
613 * Buffer map spans all KVA we could afford on
614 * this platform. Give 10% (20% on i386) of
615 * the buffer map to the transient bio map.
616 */
617 biotmap_sz = buf_sz / TRANSIENT_DENOM;
618 buf_sz -= biotmap_sz;
619 }
620 if (biotmap_sz / INT_MAX > MAXPHYS)
621 bio_transient_maxcnt = INT_MAX;
622 else
623 bio_transient_maxcnt = biotmap_sz / MAXPHYS;
624 /*
625 * Artifically limit to 1024 simultaneous in-flight I/Os
626 * using the transient mapping.
627 */
628 if (bio_transient_maxcnt > 1024)
629 bio_transient_maxcnt = 1024;
630 if (tuned_nbuf)
631 nbuf = buf_sz / BKVASIZE;
632 }
633
634 /*
635 * swbufs are used as temporary holders for I/O, such as paging I/O.
636 * We have no less then 16 and no more then 256.
637 */
638 nswbuf = min(nbuf / 4, 256);
639 TUNABLE_INT_FETCH("kern.nswbuf", &nswbuf);
640 if (nswbuf < NSWBUF_MIN)
641 nswbuf = NSWBUF_MIN;
642 #ifdef DIRECTIO
643 ffs_rawread_setup();
644 #endif
645
646 /*
647 * Reserve space for the buffer cache buffers
648 */
649 swbuf = (void *)v;
650 v = (caddr_t)(swbuf + nswbuf);
651 buf = (void *)v;
652 v = (caddr_t)(buf + nbuf);
653
654 return(v);
655 }
656
657 /* Initialize the buffer subsystem. Called before use of any buffers. */
658 void
659 bufinit(void)
660 {
661 struct buf *bp;
662 int i;
663
664 mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF);
665 mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
666 mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
667 mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
668
669 /* next, make a null set of free lists */
670 for (i = 0; i < BUFFER_QUEUES; i++)
671 TAILQ_INIT(&bufqueues[i]);
672
673 /* finally, initialize each buffer header and stick on empty q */
674 for (i = 0; i < nbuf; i++) {
675 bp = &buf[i];
676 bzero(bp, sizeof *bp);
677 bp->b_flags = B_INVAL; /* we're just an empty header */
678 bp->b_rcred = NOCRED;
679 bp->b_wcred = NOCRED;
680 bp->b_qindex = QUEUE_EMPTY;
681 bp->b_vflags = BV_INFREECNT; /* buf is counted as free */
682 bp->b_xflags = 0;
683 LIST_INIT(&bp->b_dep);
684 BUF_LOCKINIT(bp);
685 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
686 #ifdef INVARIANTS
687 bq_len[QUEUE_EMPTY]++;
688 #endif
689 }
690
691 /*
692 * maxbufspace is the absolute maximum amount of buffer space we are
693 * allowed to reserve in KVM and in real terms. The absolute maximum
694 * is nominally used by buf_daemon. hibufspace is the nominal maximum
695 * used by most other processes. The differential is required to
696 * ensure that buf_daemon is able to run when other processes might
697 * be blocked waiting for buffer space.
698 *
699 * maxbufspace is based on BKVASIZE. Allocating buffers larger then
700 * this may result in KVM fragmentation which is not handled optimally
701 * by the system.
702 */
703 maxbufspace = (long)nbuf * BKVASIZE;
704 hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
705 lobufspace = hibufspace - MAXBSIZE;
706
707 /*
708 * Note: The 16 MiB upper limit for hirunningspace was chosen
709 * arbitrarily and may need further tuning. It corresponds to
710 * 128 outstanding write IO requests (if IO size is 128 KiB),
711 * which fits with many RAID controllers' tagged queuing limits.
712 * The lower 1 MiB limit is the historical upper limit for
713 * hirunningspace.
714 */
715 hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBSIZE),
716 16 * 1024 * 1024), 1024 * 1024);
717 lorunningspace = roundup((hirunningspace * 2) / 3, MAXBSIZE);
718
719 /*
720 * Limit the amount of malloc memory since it is wired permanently into
721 * the kernel space. Even though this is accounted for in the buffer
722 * allocation, we don't want the malloced region to grow uncontrolled.
723 * The malloc scheme improves memory utilization significantly on average
724 * (small) directories.
725 */
726 maxbufmallocspace = hibufspace / 20;
727
728 /*
729 * Reduce the chance of a deadlock occuring by limiting the number
730 * of delayed-write dirty buffers we allow to stack up.
731 */
732 hidirtybuffers = nbuf / 4 + 20;
733 dirtybufthresh = hidirtybuffers * 9 / 10;
734 numdirtybuffers = 0;
735 /*
736 * To support extreme low-memory systems, make sure hidirtybuffers cannot
737 * eat up all available buffer space. This occurs when our minimum cannot
738 * be met. We try to size hidirtybuffers to 3/4 our buffer space assuming
739 * BKVASIZE'd buffers.
740 */
741 while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
742 hidirtybuffers >>= 1;
743 }
744 lodirtybuffers = hidirtybuffers / 2;
745
746 /*
747 * Try to keep the number of free buffers in the specified range,
748 * and give special processes (e.g. like buf_daemon) access to an
749 * emergency reserve.
750 */
751 lofreebuffers = nbuf / 18 + 5;
752 hifreebuffers = 2 * lofreebuffers;
753 numfreebuffers = nbuf;
754
755 bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
756 VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
757 unmapped_buf = (caddr_t)kmem_alloc_nofault(kernel_map, MAXPHYS);
758 }
759
760 #ifdef INVARIANTS
761 static inline void
762 vfs_buf_check_mapped(struct buf *bp)
763 {
764
765 KASSERT((bp->b_flags & B_UNMAPPED) == 0,
766 ("mapped buf %p %x", bp, bp->b_flags));
767 KASSERT(bp->b_kvabase != unmapped_buf,
768 ("mapped buf: b_kvabase was not updated %p", bp));
769 KASSERT(bp->b_data != unmapped_buf,
770 ("mapped buf: b_data was not updated %p", bp));
771 }
772
773 static inline void
774 vfs_buf_check_unmapped(struct buf *bp)
775 {
776
777 KASSERT((bp->b_flags & B_UNMAPPED) == B_UNMAPPED,
778 ("unmapped buf %p %x", bp, bp->b_flags));
779 KASSERT(bp->b_kvabase == unmapped_buf,
780 ("unmapped buf: corrupted b_kvabase %p", bp));
781 KASSERT(bp->b_data == unmapped_buf,
782 ("unmapped buf: corrupted b_data %p", bp));
783 }
784
785 #define BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
786 #define BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
787 #else
788 #define BUF_CHECK_MAPPED(bp) do {} while (0)
789 #define BUF_CHECK_UNMAPPED(bp) do {} while (0)
790 #endif
791
792 static void
793 bpmap_qenter(struct buf *bp)
794 {
795
796 BUF_CHECK_MAPPED(bp);
797
798 /*
799 * bp->b_data is relative to bp->b_offset, but
800 * bp->b_offset may be offset into the first page.
801 */
802 bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
803 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
804 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
805 (vm_offset_t)(bp->b_offset & PAGE_MASK));
806 }
807
808 /*
809 * bfreekva() - free the kva allocation for a buffer.
810 *
811 * Since this call frees up buffer space, we call bufspacewakeup().
812 */
813 static void
814 bfreekva(struct buf *bp)
815 {
816
817 if (bp->b_kvasize == 0)
818 return;
819
820 atomic_add_int(&buffreekvacnt, 1);
821 atomic_subtract_long(&bufspace, bp->b_kvasize);
822 if ((bp->b_flags & B_UNMAPPED) == 0) {
823 BUF_CHECK_MAPPED(bp);
824 vm_map_remove(buffer_map, (vm_offset_t)bp->b_kvabase,
825 (vm_offset_t)bp->b_kvabase + bp->b_kvasize);
826 } else {
827 BUF_CHECK_UNMAPPED(bp);
828 if ((bp->b_flags & B_KVAALLOC) != 0) {
829 vm_map_remove(buffer_map, (vm_offset_t)bp->b_kvaalloc,
830 (vm_offset_t)bp->b_kvaalloc + bp->b_kvasize);
831 }
832 atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize);
833 bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC);
834 }
835 bp->b_kvasize = 0;
836 bufspacewakeup();
837 }
838
839 /*
840 * bremfree:
841 *
842 * Mark the buffer for removal from the appropriate free list in brelse.
843 *
844 */
845 void
846 bremfree(struct buf *bp)
847 {
848 int old;
849
850 CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
851 KASSERT((bp->b_flags & B_REMFREE) == 0,
852 ("bremfree: buffer %p already marked for delayed removal.", bp));
853 KASSERT(bp->b_qindex != QUEUE_NONE,
854 ("bremfree: buffer %p not on a queue.", bp));
855 BUF_ASSERT_HELD(bp);
856
857 bp->b_flags |= B_REMFREE;
858 /* Fixup numfreebuffers count. */
859 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
860 KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
861 ("buf %p not counted in numfreebuffers", bp));
862 if (bp->b_bufobj != NULL)
863 mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
864 bp->b_vflags &= ~BV_INFREECNT;
865 old = atomic_fetchadd_int(&numfreebuffers, -1);
866 KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
867 }
868 }
869
870 /*
871 * bremfreef:
872 *
873 * Force an immediate removal from a free list. Used only in nfs when
874 * it abuses the b_freelist pointer.
875 */
876 void
877 bremfreef(struct buf *bp)
878 {
879 mtx_lock(&bqlock);
880 bremfreel(bp);
881 mtx_unlock(&bqlock);
882 }
883
884 /*
885 * bremfreel:
886 *
887 * Removes a buffer from the free list, must be called with the
888 * bqlock held.
889 */
890 static void
891 bremfreel(struct buf *bp)
892 {
893 int old;
894
895 CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
896 bp, bp->b_vp, bp->b_flags);
897 KASSERT(bp->b_qindex != QUEUE_NONE,
898 ("bremfreel: buffer %p not on a queue.", bp));
899 BUF_ASSERT_HELD(bp);
900 mtx_assert(&bqlock, MA_OWNED);
901
902 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
903 #ifdef INVARIANTS
904 KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow",
905 bp->b_qindex));
906 bq_len[bp->b_qindex]--;
907 #endif
908 bp->b_qindex = QUEUE_NONE;
909 /*
910 * If this was a delayed bremfree() we only need to remove the buffer
911 * from the queue and return the stats are already done.
912 */
913 if (bp->b_flags & B_REMFREE) {
914 bp->b_flags &= ~B_REMFREE;
915 return;
916 }
917 /*
918 * Fixup numfreebuffers count. If the buffer is invalid or not
919 * delayed-write, the buffer was free and we must decrement
920 * numfreebuffers.
921 */
922 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
923 KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
924 ("buf %p not counted in numfreebuffers", bp));
925 if (bp->b_bufobj != NULL)
926 mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
927 bp->b_vflags &= ~BV_INFREECNT;
928 old = atomic_fetchadd_int(&numfreebuffers, -1);
929 KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
930 }
931 }
932
933 /*
934 * Get a buffer with the specified data.
935 */
936 int
937 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
938 struct buf **bpp)
939 {
940
941 return (breadn_flags(vp, blkno, size, 0, 0, 0, cred, 0, bpp));
942 }
943
944 /*
945 * Attempt to initiate asynchronous I/O on read-ahead blocks. We must
946 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
947 * the buffer is valid and we do not have to do anything.
948 */
949 void
950 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
951 int cnt, struct ucred * cred)
952 {
953 struct buf *rabp;
954 int i;
955
956 for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
957 if (inmem(vp, *rablkno))
958 continue;
959 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
960
961 if ((rabp->b_flags & B_CACHE) == 0) {
962 if (!TD_IS_IDLETHREAD(curthread))
963 curthread->td_ru.ru_inblock++;
964 rabp->b_flags |= B_ASYNC;
965 rabp->b_flags &= ~B_INVAL;
966 rabp->b_ioflags &= ~BIO_ERROR;
967 rabp->b_iocmd = BIO_READ;
968 if (rabp->b_rcred == NOCRED && cred != NOCRED)
969 rabp->b_rcred = crhold(cred);
970 vfs_busy_pages(rabp, 0);
971 BUF_KERNPROC(rabp);
972 rabp->b_iooffset = dbtob(rabp->b_blkno);
973 bstrategy(rabp);
974 } else {
975 brelse(rabp);
976 }
977 }
978 }
979
980 /*
981 * Operates like bread, but with getblk flags.
982 */
983 int
984 bread_gb(struct vnode * vp, daddr_t blkno, int cnt, struct ucred * cred,
985 int gbflags, struct buf **bpp)
986 {
987
988 return (breadn_flags(vp, blkno, cnt, NULL, NULL, 0,
989 cred, gbflags, bpp));
990 }
991
992 /*
993 * Operates like bread, but also starts asynchronous I/O on
994 * read-ahead blocks.
995 */
996 int
997 breadn(struct vnode * vp, daddr_t blkno, int size,
998 daddr_t * rablkno, int *rabsize,
999 int cnt, struct ucred * cred, struct buf **bpp)
1000 {
1001
1002 return (breadn_flags(vp, blkno, size, rablkno, rabsize, cnt,
1003 cred, 0, bpp));
1004 }
1005
1006 /*
1007 * Entry point for bread() and breadn().
1008 *
1009 * Get a buffer with the specified data. Look in the cache first. We
1010 * must clear BIO_ERROR and B_INVAL prior to initiating I/O. If B_CACHE
1011 * is set, the buffer is valid and we do not have to do anything, see
1012 * getblk(). Also starts asynchronous I/O on read-ahead blocks.
1013 */
1014 int
1015 breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno,
1016 int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp)
1017 {
1018 struct buf *bp;
1019 int rv = 0, readwait = 0;
1020
1021 CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
1022 /*
1023 * Can only return NULL if GB_LOCK_NOWAIT flag is specified.
1024 */
1025 *bpp = bp = getblk(vp, blkno, size, 0, 0, flags);
1026 if (bp == NULL)
1027 return (EBUSY);
1028
1029 /* if not found in cache, do some I/O */
1030 if ((bp->b_flags & B_CACHE) == 0) {
1031 if (!TD_IS_IDLETHREAD(curthread))
1032 curthread->td_ru.ru_inblock++;
1033 bp->b_iocmd = BIO_READ;
1034 bp->b_flags &= ~B_INVAL;
1035 bp->b_ioflags &= ~BIO_ERROR;
1036 if (bp->b_rcred == NOCRED && cred != NOCRED)
1037 bp->b_rcred = crhold(cred);
1038 vfs_busy_pages(bp, 0);
1039 bp->b_iooffset = dbtob(bp->b_blkno);
1040 bstrategy(bp);
1041 ++readwait;
1042 }
1043
1044 breada(vp, rablkno, rabsize, cnt, cred);
1045
1046 if (readwait) {
1047 rv = bufwait(bp);
1048 }
1049 return (rv);
1050 }
1051
1052 /*
1053 * Write, release buffer on completion. (Done by iodone
1054 * if async). Do not bother writing anything if the buffer
1055 * is invalid.
1056 *
1057 * Note that we set B_CACHE here, indicating that buffer is
1058 * fully valid and thus cacheable. This is true even of NFS
1059 * now so we set it generally. This could be set either here
1060 * or in biodone() since the I/O is synchronous. We put it
1061 * here.
1062 */
1063 int
1064 bufwrite(struct buf *bp)
1065 {
1066 int oldflags;
1067 struct vnode *vp;
1068 int vp_md;
1069
1070 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1071 if (bp->b_flags & B_INVAL) {
1072 brelse(bp);
1073 return (0);
1074 }
1075
1076 if (bp->b_flags & B_BARRIER)
1077 barrierwrites++;
1078
1079 oldflags = bp->b_flags;
1080
1081 BUF_ASSERT_HELD(bp);
1082
1083 if (bp->b_pin_count > 0)
1084 bunpin_wait(bp);
1085
1086 KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
1087 ("FFS background buffer should not get here %p", bp));
1088
1089 vp = bp->b_vp;
1090 if (vp)
1091 vp_md = vp->v_vflag & VV_MD;
1092 else
1093 vp_md = 0;
1094
1095 /*
1096 * Mark the buffer clean. Increment the bufobj write count
1097 * before bundirty() call, to prevent other thread from seeing
1098 * empty dirty list and zero counter for writes in progress,
1099 * falsely indicating that the bufobj is clean.
1100 */
1101 bufobj_wref(bp->b_bufobj);
1102 bundirty(bp);
1103
1104 bp->b_flags &= ~B_DONE;
1105 bp->b_ioflags &= ~BIO_ERROR;
1106 bp->b_flags |= B_CACHE;
1107 bp->b_iocmd = BIO_WRITE;
1108
1109 vfs_busy_pages(bp, 1);
1110
1111 /*
1112 * Normal bwrites pipeline writes
1113 */
1114 bp->b_runningbufspace = bp->b_bufsize;
1115 atomic_add_long(&runningbufspace, bp->b_runningbufspace);
1116
1117 if (!TD_IS_IDLETHREAD(curthread))
1118 curthread->td_ru.ru_oublock++;
1119 if (oldflags & B_ASYNC)
1120 BUF_KERNPROC(bp);
1121 bp->b_iooffset = dbtob(bp->b_blkno);
1122 bstrategy(bp);
1123
1124 if ((oldflags & B_ASYNC) == 0) {
1125 int rtval = bufwait(bp);
1126 brelse(bp);
1127 return (rtval);
1128 } else {
1129 /*
1130 * don't allow the async write to saturate the I/O
1131 * system. We will not deadlock here because
1132 * we are blocking waiting for I/O that is already in-progress
1133 * to complete. We do not block here if it is the update
1134 * or syncer daemon trying to clean up as that can lead
1135 * to deadlock.
1136 */
1137 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
1138 waitrunningbufspace();
1139 }
1140
1141 return (0);
1142 }
1143
1144 void
1145 bufbdflush(struct bufobj *bo, struct buf *bp)
1146 {
1147 struct buf *nbp;
1148
1149 if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
1150 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
1151 altbufferflushes++;
1152 } else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
1153 BO_LOCK(bo);
1154 /*
1155 * Try to find a buffer to flush.
1156 */
1157 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
1158 if ((nbp->b_vflags & BV_BKGRDINPROG) ||
1159 BUF_LOCK(nbp,
1160 LK_EXCLUSIVE | LK_NOWAIT, NULL))
1161 continue;
1162 if (bp == nbp)
1163 panic("bdwrite: found ourselves");
1164 BO_UNLOCK(bo);
1165 /* Don't countdeps with the bo lock held. */
1166 if (buf_countdeps(nbp, 0)) {
1167 BO_LOCK(bo);
1168 BUF_UNLOCK(nbp);
1169 continue;
1170 }
1171 if (nbp->b_flags & B_CLUSTEROK) {
1172 vfs_bio_awrite(nbp);
1173 } else {
1174 bremfree(nbp);
1175 bawrite(nbp);
1176 }
1177 dirtybufferflushes++;
1178 break;
1179 }
1180 if (nbp == NULL)
1181 BO_UNLOCK(bo);
1182 }
1183 }
1184
1185 /*
1186 * Delayed write. (Buffer is marked dirty). Do not bother writing
1187 * anything if the buffer is marked invalid.
1188 *
1189 * Note that since the buffer must be completely valid, we can safely
1190 * set B_CACHE. In fact, we have to set B_CACHE here rather then in
1191 * biodone() in order to prevent getblk from writing the buffer
1192 * out synchronously.
1193 */
1194 void
1195 bdwrite(struct buf *bp)
1196 {
1197 struct thread *td = curthread;
1198 struct vnode *vp;
1199 struct bufobj *bo;
1200
1201 CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1202 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1203 KASSERT((bp->b_flags & B_BARRIER) == 0,
1204 ("Barrier request in delayed write %p", bp));
1205 BUF_ASSERT_HELD(bp);
1206
1207 if (bp->b_flags & B_INVAL) {
1208 brelse(bp);
1209 return;
1210 }
1211
1212 /*
1213 * If we have too many dirty buffers, don't create any more.
1214 * If we are wildly over our limit, then force a complete
1215 * cleanup. Otherwise, just keep the situation from getting
1216 * out of control. Note that we have to avoid a recursive
1217 * disaster and not try to clean up after our own cleanup!
1218 */
1219 vp = bp->b_vp;
1220 bo = bp->b_bufobj;
1221 if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
1222 td->td_pflags |= TDP_INBDFLUSH;
1223 BO_BDFLUSH(bo, bp);
1224 td->td_pflags &= ~TDP_INBDFLUSH;
1225 } else
1226 recursiveflushes++;
1227
1228 bdirty(bp);
1229 /*
1230 * Set B_CACHE, indicating that the buffer is fully valid. This is
1231 * true even of NFS now.
1232 */
1233 bp->b_flags |= B_CACHE;
1234
1235 /*
1236 * This bmap keeps the system from needing to do the bmap later,
1237 * perhaps when the system is attempting to do a sync. Since it
1238 * is likely that the indirect block -- or whatever other datastructure
1239 * that the filesystem needs is still in memory now, it is a good
1240 * thing to do this. Note also, that if the pageout daemon is
1241 * requesting a sync -- there might not be enough memory to do
1242 * the bmap then... So, this is important to do.
1243 */
1244 if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
1245 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1246 }
1247
1248 /*
1249 * Set the *dirty* buffer range based upon the VM system dirty
1250 * pages.
1251 *
1252 * Mark the buffer pages as clean. We need to do this here to
1253 * satisfy the vnode_pager and the pageout daemon, so that it
1254 * thinks that the pages have been "cleaned". Note that since
1255 * the pages are in a delayed write buffer -- the VFS layer
1256 * "will" see that the pages get written out on the next sync,
1257 * or perhaps the cluster will be completed.
1258 */
1259 vfs_clean_pages_dirty_buf(bp);
1260 bqrelse(bp);
1261
1262 /*
1263 * Wakeup the buffer flushing daemon if we have a lot of dirty
1264 * buffers (midpoint between our recovery point and our stall
1265 * point).
1266 */
1267 bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1268
1269 /*
1270 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
1271 * due to the softdep code.
1272 */
1273 }
1274
1275 /*
1276 * bdirty:
1277 *
1278 * Turn buffer into delayed write request. We must clear BIO_READ and
1279 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to
1280 * itself to properly update it in the dirty/clean lists. We mark it
1281 * B_DONE to ensure that any asynchronization of the buffer properly
1282 * clears B_DONE ( else a panic will occur later ).
1283 *
1284 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
1285 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty()
1286 * should only be called if the buffer is known-good.
1287 *
1288 * Since the buffer is not on a queue, we do not update the numfreebuffers
1289 * count.
1290 *
1291 * The buffer must be on QUEUE_NONE.
1292 */
1293 void
1294 bdirty(struct buf *bp)
1295 {
1296
1297 CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
1298 bp, bp->b_vp, bp->b_flags);
1299 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1300 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1301 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
1302 BUF_ASSERT_HELD(bp);
1303 bp->b_flags &= ~(B_RELBUF);
1304 bp->b_iocmd = BIO_WRITE;
1305
1306 if ((bp->b_flags & B_DELWRI) == 0) {
1307 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
1308 reassignbuf(bp);
1309 atomic_add_int(&numdirtybuffers, 1);
1310 bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1311 }
1312 }
1313
1314 /*
1315 * bundirty:
1316 *
1317 * Clear B_DELWRI for buffer.
1318 *
1319 * Since the buffer is not on a queue, we do not update the numfreebuffers
1320 * count.
1321 *
1322 * The buffer must be on QUEUE_NONE.
1323 */
1324
1325 void
1326 bundirty(struct buf *bp)
1327 {
1328
1329 CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1330 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1331 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1332 ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
1333 BUF_ASSERT_HELD(bp);
1334
1335 if (bp->b_flags & B_DELWRI) {
1336 bp->b_flags &= ~B_DELWRI;
1337 reassignbuf(bp);
1338 atomic_subtract_int(&numdirtybuffers, 1);
1339 numdirtywakeup(lodirtybuffers);
1340 }
1341 /*
1342 * Since it is now being written, we can clear its deferred write flag.
1343 */
1344 bp->b_flags &= ~B_DEFERRED;
1345 }
1346
1347 /*
1348 * bawrite:
1349 *
1350 * Asynchronous write. Start output on a buffer, but do not wait for
1351 * it to complete. The buffer is released when the output completes.
1352 *
1353 * bwrite() ( or the VOP routine anyway ) is responsible for handling
1354 * B_INVAL buffers. Not us.
1355 */
1356 void
1357 bawrite(struct buf *bp)
1358 {
1359
1360 bp->b_flags |= B_ASYNC;
1361 (void) bwrite(bp);
1362 }
1363
1364 /*
1365 * babarrierwrite:
1366 *
1367 * Asynchronous barrier write. Start output on a buffer, but do not
1368 * wait for it to complete. Place a write barrier after this write so
1369 * that this buffer and all buffers written before it are committed to
1370 * the disk before any buffers written after this write are committed
1371 * to the disk. The buffer is released when the output completes.
1372 */
1373 void
1374 babarrierwrite(struct buf *bp)
1375 {
1376
1377 bp->b_flags |= B_ASYNC | B_BARRIER;
1378 (void) bwrite(bp);
1379 }
1380
1381 /*
1382 * bbarrierwrite:
1383 *
1384 * Synchronous barrier write. Start output on a buffer and wait for
1385 * it to complete. Place a write barrier after this write so that
1386 * this buffer and all buffers written before it are committed to
1387 * the disk before any buffers written after this write are committed
1388 * to the disk. The buffer is released when the output completes.
1389 */
1390 int
1391 bbarrierwrite(struct buf *bp)
1392 {
1393
1394 bp->b_flags |= B_BARRIER;
1395 return (bwrite(bp));
1396 }
1397
1398 /*
1399 * bwillwrite:
1400 *
1401 * Called prior to the locking of any vnodes when we are expecting to
1402 * write. We do not want to starve the buffer cache with too many
1403 * dirty buffers so we block here. By blocking prior to the locking
1404 * of any vnodes we attempt to avoid the situation where a locked vnode
1405 * prevents the various system daemons from flushing related buffers.
1406 */
1407
1408 void
1409 bwillwrite(void)
1410 {
1411
1412 if (numdirtybuffers >= hidirtybuffers) {
1413 mtx_lock(&nblock);
1414 while (numdirtybuffers >= hidirtybuffers) {
1415 bd_wakeup(1);
1416 needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
1417 msleep(&needsbuffer, &nblock,
1418 (PRIBIO + 4), "flswai", 0);
1419 }
1420 mtx_unlock(&nblock);
1421 }
1422 }
1423
1424 /*
1425 * Return true if we have too many dirty buffers.
1426 */
1427 int
1428 buf_dirty_count_severe(void)
1429 {
1430
1431 return(numdirtybuffers >= hidirtybuffers);
1432 }
1433
1434 static __noinline int
1435 buf_vm_page_count_severe(void)
1436 {
1437
1438 KFAIL_POINT_CODE(DEBUG_FP, buf_pressure, return 1);
1439
1440 return vm_page_count_severe();
1441 }
1442
1443 /*
1444 * brelse:
1445 *
1446 * Release a busy buffer and, if requested, free its resources. The
1447 * buffer will be stashed in the appropriate bufqueue[] allowing it
1448 * to be accessed later as a cache entity or reused for other purposes.
1449 */
1450 void
1451 brelse(struct buf *bp)
1452 {
1453 CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
1454 bp, bp->b_vp, bp->b_flags);
1455 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1456 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1457
1458 if (BUF_LOCKRECURSED(bp)) {
1459 /*
1460 * Do not process, in particular, do not handle the
1461 * B_INVAL/B_RELBUF and do not release to free list.
1462 */
1463 BUF_UNLOCK(bp);
1464 return;
1465 }
1466
1467 if (bp->b_flags & B_MANAGED) {
1468 bqrelse(bp);
1469 return;
1470 }
1471
1472 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
1473 bp->b_error == EIO && !(bp->b_flags & B_INVAL)) {
1474 /*
1475 * Failed write, redirty. Must clear BIO_ERROR to prevent
1476 * pages from being scrapped. If the error is anything
1477 * other than an I/O error (EIO), assume that retrying
1478 * is futile.
1479 */
1480 bp->b_ioflags &= ~BIO_ERROR;
1481 bdirty(bp);
1482 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
1483 (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
1484 /*
1485 * Either a failed I/O or we were asked to free or not
1486 * cache the buffer.
1487 */
1488 bp->b_flags |= B_INVAL;
1489 if (!LIST_EMPTY(&bp->b_dep))
1490 buf_deallocate(bp);
1491 if (bp->b_flags & B_DELWRI) {
1492 atomic_subtract_int(&numdirtybuffers, 1);
1493 numdirtywakeup(lodirtybuffers);
1494 }
1495 bp->b_flags &= ~(B_DELWRI | B_CACHE);
1496 if ((bp->b_flags & B_VMIO) == 0) {
1497 if (bp->b_bufsize)
1498 allocbuf(bp, 0);
1499 if (bp->b_vp)
1500 brelvp(bp);
1501 }
1502 }
1503
1504 /*
1505 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release()
1506 * is called with B_DELWRI set, the underlying pages may wind up
1507 * getting freed causing a previous write (bdwrite()) to get 'lost'
1508 * because pages associated with a B_DELWRI bp are marked clean.
1509 *
1510 * We still allow the B_INVAL case to call vfs_vmio_release(), even
1511 * if B_DELWRI is set.
1512 *
1513 * If B_DELWRI is not set we may have to set B_RELBUF if we are low
1514 * on pages to return pages to the VM page queues.
1515 */
1516 if (bp->b_flags & B_DELWRI)
1517 bp->b_flags &= ~B_RELBUF;
1518 else if (buf_vm_page_count_severe()) {
1519 /*
1520 * The locking of the BO_LOCK is not necessary since
1521 * BKGRDINPROG cannot be set while we hold the buf
1522 * lock, it can only be cleared if it is already
1523 * pending.
1524 */
1525 if (bp->b_vp) {
1526 if (!(bp->b_vflags & BV_BKGRDINPROG))
1527 bp->b_flags |= B_RELBUF;
1528 } else
1529 bp->b_flags |= B_RELBUF;
1530 }
1531
1532 /*
1533 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer
1534 * constituted, not even NFS buffers now. Two flags effect this. If
1535 * B_INVAL, the struct buf is invalidated but the VM object is kept
1536 * around ( i.e. so it is trivial to reconstitute the buffer later ).
1537 *
1538 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
1539 * invalidated. BIO_ERROR cannot be set for a failed write unless the
1540 * buffer is also B_INVAL because it hits the re-dirtying code above.
1541 *
1542 * Normally we can do this whether a buffer is B_DELWRI or not. If
1543 * the buffer is an NFS buffer, it is tracking piecemeal writes or
1544 * the commit state and we cannot afford to lose the buffer. If the
1545 * buffer has a background write in progress, we need to keep it
1546 * around to prevent it from being reconstituted and starting a second
1547 * background write.
1548 */
1549 if ((bp->b_flags & B_VMIO)
1550 && !(bp->b_vp->v_mount != NULL &&
1551 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
1552 !vn_isdisk(bp->b_vp, NULL) &&
1553 (bp->b_flags & B_DELWRI))
1554 ) {
1555
1556 int i, j, resid;
1557 vm_page_t m;
1558 off_t foff;
1559 vm_pindex_t poff;
1560 vm_object_t obj;
1561
1562 obj = bp->b_bufobj->bo_object;
1563
1564 /*
1565 * Get the base offset and length of the buffer. Note that
1566 * in the VMIO case if the buffer block size is not
1567 * page-aligned then b_data pointer may not be page-aligned.
1568 * But our b_pages[] array *IS* page aligned.
1569 *
1570 * block sizes less then DEV_BSIZE (usually 512) are not
1571 * supported due to the page granularity bits (m->valid,
1572 * m->dirty, etc...).
1573 *
1574 * See man buf(9) for more information
1575 */
1576 resid = bp->b_bufsize;
1577 foff = bp->b_offset;
1578 VM_OBJECT_LOCK(obj);
1579 for (i = 0; i < bp->b_npages; i++) {
1580 int had_bogus = 0;
1581
1582 m = bp->b_pages[i];
1583
1584 /*
1585 * If we hit a bogus page, fixup *all* the bogus pages
1586 * now.
1587 */
1588 if (m == bogus_page) {
1589 poff = OFF_TO_IDX(bp->b_offset);
1590 had_bogus = 1;
1591
1592 for (j = i; j < bp->b_npages; j++) {
1593 vm_page_t mtmp;
1594 mtmp = bp->b_pages[j];
1595 if (mtmp == bogus_page) {
1596 mtmp = vm_page_lookup(obj, poff + j);
1597 if (!mtmp) {
1598 panic("brelse: page missing\n");
1599 }
1600 bp->b_pages[j] = mtmp;
1601 }
1602 }
1603
1604 if ((bp->b_flags & (B_INVAL | B_UNMAPPED)) == 0) {
1605 BUF_CHECK_MAPPED(bp);
1606 pmap_qenter(
1607 trunc_page((vm_offset_t)bp->b_data),
1608 bp->b_pages, bp->b_npages);
1609 }
1610 m = bp->b_pages[i];
1611 }
1612 if ((bp->b_flags & B_NOCACHE) ||
1613 (bp->b_ioflags & BIO_ERROR &&
1614 bp->b_iocmd == BIO_READ)) {
1615 int poffset = foff & PAGE_MASK;
1616 int presid = resid > (PAGE_SIZE - poffset) ?
1617 (PAGE_SIZE - poffset) : resid;
1618
1619 KASSERT(presid >= 0, ("brelse: extra page"));
1620 if (pmap_page_wired_mappings(m) == 0)
1621 vm_page_set_invalid(m, poffset, presid);
1622 if (had_bogus)
1623 printf("avoided corruption bug in bogus_page/brelse code\n");
1624 }
1625 resid -= PAGE_SIZE - (foff & PAGE_MASK);
1626 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
1627 }
1628 VM_OBJECT_UNLOCK(obj);
1629 if (bp->b_flags & (B_INVAL | B_RELBUF))
1630 vfs_vmio_release(bp);
1631
1632 } else if (bp->b_flags & B_VMIO) {
1633
1634 if (bp->b_flags & (B_INVAL | B_RELBUF)) {
1635 vfs_vmio_release(bp);
1636 }
1637
1638 } else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) {
1639 if (bp->b_bufsize != 0)
1640 allocbuf(bp, 0);
1641 if (bp->b_vp != NULL)
1642 brelvp(bp);
1643 }
1644
1645 /* enqueue */
1646 mtx_lock(&bqlock);
1647 /* Handle delayed bremfree() processing. */
1648 if (bp->b_flags & B_REMFREE) {
1649 struct bufobj *bo;
1650
1651 bo = bp->b_bufobj;
1652 if (bo != NULL)
1653 BO_LOCK(bo);
1654 bremfreel(bp);
1655 if (bo != NULL)
1656 BO_UNLOCK(bo);
1657 }
1658 if (bp->b_qindex != QUEUE_NONE)
1659 panic("brelse: free buffer onto another queue???");
1660
1661 /*
1662 * If the buffer has junk contents signal it and eventually
1663 * clean up B_DELWRI and diassociate the vnode so that gbincore()
1664 * doesn't find it.
1665 */
1666 if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
1667 (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
1668 bp->b_flags |= B_INVAL;
1669 if (bp->b_flags & B_INVAL) {
1670 if (bp->b_flags & B_DELWRI)
1671 bundirty(bp);
1672 if (bp->b_vp)
1673 brelvp(bp);
1674 }
1675
1676 /* buffers with no memory */
1677 if (bp->b_bufsize == 0) {
1678 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1679 if (bp->b_vflags & BV_BKGRDINPROG)
1680 panic("losing buffer 1");
1681 if (bp->b_kvasize) {
1682 bp->b_qindex = QUEUE_EMPTYKVA;
1683 } else {
1684 bp->b_qindex = QUEUE_EMPTY;
1685 }
1686 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1687 /* buffers with junk contents */
1688 } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
1689 (bp->b_ioflags & BIO_ERROR)) {
1690 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1691 if (bp->b_vflags & BV_BKGRDINPROG)
1692 panic("losing buffer 2");
1693 bp->b_qindex = QUEUE_CLEAN;
1694 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1695 /* remaining buffers */
1696 } else {
1697 if ((bp->b_flags & (B_DELWRI|B_NEEDSGIANT)) ==
1698 (B_DELWRI|B_NEEDSGIANT))
1699 bp->b_qindex = QUEUE_DIRTY_GIANT;
1700 else if (bp->b_flags & B_DELWRI)
1701 bp->b_qindex = QUEUE_DIRTY;
1702 else
1703 bp->b_qindex = QUEUE_CLEAN;
1704 if (bp->b_flags & B_AGE) {
1705 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp,
1706 b_freelist);
1707 } else {
1708 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp,
1709 b_freelist);
1710 }
1711 }
1712 #ifdef INVARIANTS
1713 bq_len[bp->b_qindex]++;
1714 #endif
1715 mtx_unlock(&bqlock);
1716
1717 /*
1718 * Fixup numfreebuffers count. The bp is on an appropriate queue
1719 * unless locked. We then bump numfreebuffers if it is not B_DELWRI.
1720 * We've already handled the B_INVAL case ( B_DELWRI will be clear
1721 * if B_INVAL is set ).
1722 */
1723
1724 if (!(bp->b_flags & B_DELWRI)) {
1725 struct bufobj *bo;
1726
1727 bo = bp->b_bufobj;
1728 if (bo != NULL)
1729 BO_LOCK(bo);
1730 bufcountwakeup(bp);
1731 if (bo != NULL)
1732 BO_UNLOCK(bo);
1733 }
1734
1735 /*
1736 * Something we can maybe free or reuse
1737 */
1738 if (bp->b_bufsize || bp->b_kvasize)
1739 bufspacewakeup();
1740
1741 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
1742 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1743 panic("brelse: not dirty");
1744 /* unlock */
1745 BUF_UNLOCK(bp);
1746 }
1747
1748 /*
1749 * Release a buffer back to the appropriate queue but do not try to free
1750 * it. The buffer is expected to be used again soon.
1751 *
1752 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
1753 * biodone() to requeue an async I/O on completion. It is also used when
1754 * known good buffers need to be requeued but we think we may need the data
1755 * again soon.
1756 *
1757 * XXX we should be able to leave the B_RELBUF hint set on completion.
1758 */
1759 void
1760 bqrelse(struct buf *bp)
1761 {
1762 struct bufobj *bo;
1763
1764 CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1765 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1766 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1767
1768 if (BUF_LOCKRECURSED(bp)) {
1769 /* do not release to free list */
1770 BUF_UNLOCK(bp);
1771 return;
1772 }
1773
1774 bo = bp->b_bufobj;
1775 if (bp->b_flags & B_MANAGED) {
1776 if (bp->b_flags & B_REMFREE) {
1777 mtx_lock(&bqlock);
1778 if (bo != NULL)
1779 BO_LOCK(bo);
1780 bremfreel(bp);
1781 if (bo != NULL)
1782 BO_UNLOCK(bo);
1783 mtx_unlock(&bqlock);
1784 }
1785 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1786 BUF_UNLOCK(bp);
1787 return;
1788 }
1789
1790 mtx_lock(&bqlock);
1791 /* Handle delayed bremfree() processing. */
1792 if (bp->b_flags & B_REMFREE) {
1793 if (bo != NULL)
1794 BO_LOCK(bo);
1795 bremfreel(bp);
1796 if (bo != NULL)
1797 BO_UNLOCK(bo);
1798 }
1799 if (bp->b_qindex != QUEUE_NONE)
1800 panic("bqrelse: free buffer onto another queue???");
1801 /* buffers with stale but valid contents */
1802 if (bp->b_flags & B_DELWRI) {
1803 if (bp->b_flags & B_NEEDSGIANT)
1804 bp->b_qindex = QUEUE_DIRTY_GIANT;
1805 else
1806 bp->b_qindex = QUEUE_DIRTY;
1807 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1808 #ifdef INVARIANTS
1809 bq_len[bp->b_qindex]++;
1810 #endif
1811 } else {
1812 /*
1813 * The locking of the BO_LOCK for checking of the
1814 * BV_BKGRDINPROG is not necessary since the
1815 * BV_BKGRDINPROG cannot be set while we hold the buf
1816 * lock, it can only be cleared if it is already
1817 * pending.
1818 */
1819 if (!buf_vm_page_count_severe() || (bp->b_vflags & BV_BKGRDINPROG)) {
1820 bp->b_qindex = QUEUE_CLEAN;
1821 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp,
1822 b_freelist);
1823 #ifdef INVARIANTS
1824 bq_len[QUEUE_CLEAN]++;
1825 #endif
1826 } else {
1827 /*
1828 * We are too low on memory, we have to try to free
1829 * the buffer (most importantly: the wired pages
1830 * making up its backing store) *now*.
1831 */
1832 mtx_unlock(&bqlock);
1833 brelse(bp);
1834 return;
1835 }
1836 }
1837 mtx_unlock(&bqlock);
1838
1839 if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) {
1840 if (bo != NULL)
1841 BO_LOCK(bo);
1842 bufcountwakeup(bp);
1843 if (bo != NULL)
1844 BO_UNLOCK(bo);
1845 }
1846
1847 /*
1848 * Something we can maybe free or reuse.
1849 */
1850 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
1851 bufspacewakeup();
1852
1853 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1854 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1855 panic("bqrelse: not dirty");
1856 /* unlock */
1857 BUF_UNLOCK(bp);
1858 }
1859
1860 /* Give pages used by the bp back to the VM system (where possible) */
1861 static void
1862 vfs_vmio_release(struct buf *bp)
1863 {
1864 int i;
1865 vm_page_t m;
1866
1867 if ((bp->b_flags & B_UNMAPPED) == 0) {
1868 BUF_CHECK_MAPPED(bp);
1869 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
1870 } else
1871 BUF_CHECK_UNMAPPED(bp);
1872 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
1873 for (i = 0; i < bp->b_npages; i++) {
1874 m = bp->b_pages[i];
1875 bp->b_pages[i] = NULL;
1876 /*
1877 * In order to keep page LRU ordering consistent, put
1878 * everything on the inactive queue.
1879 */
1880 vm_page_lock(m);
1881 vm_page_unwire(m, 0);
1882 /*
1883 * We don't mess with busy pages, it is
1884 * the responsibility of the process that
1885 * busied the pages to deal with them.
1886 */
1887 if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 &&
1888 m->wire_count == 0) {
1889 /*
1890 * Might as well free the page if we can and it has
1891 * no valid data. We also free the page if the
1892 * buffer was used for direct I/O
1893 */
1894 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) {
1895 vm_page_free(m);
1896 } else if (bp->b_flags & B_DIRECT) {
1897 vm_page_try_to_free(m);
1898 } else if (buf_vm_page_count_severe()) {
1899 vm_page_try_to_cache(m);
1900 }
1901 }
1902 vm_page_unlock(m);
1903 }
1904 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
1905
1906 if (bp->b_bufsize) {
1907 bufspacewakeup();
1908 bp->b_bufsize = 0;
1909 }
1910 bp->b_npages = 0;
1911 bp->b_flags &= ~B_VMIO;
1912 if (bp->b_vp)
1913 brelvp(bp);
1914 }
1915
1916 /*
1917 * Check to see if a block at a particular lbn is available for a clustered
1918 * write.
1919 */
1920 static int
1921 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
1922 {
1923 struct buf *bpa;
1924 int match;
1925
1926 match = 0;
1927
1928 /* If the buf isn't in core skip it */
1929 if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
1930 return (0);
1931
1932 /* If the buf is busy we don't want to wait for it */
1933 if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1934 return (0);
1935
1936 /* Only cluster with valid clusterable delayed write buffers */
1937 if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
1938 (B_DELWRI | B_CLUSTEROK))
1939 goto done;
1940
1941 if (bpa->b_bufsize != size)
1942 goto done;
1943
1944 /*
1945 * Check to see if it is in the expected place on disk and that the
1946 * block has been mapped.
1947 */
1948 if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
1949 match = 1;
1950 done:
1951 BUF_UNLOCK(bpa);
1952 return (match);
1953 }
1954
1955 /*
1956 * vfs_bio_awrite:
1957 *
1958 * Implement clustered async writes for clearing out B_DELWRI buffers.
1959 * This is much better then the old way of writing only one buffer at
1960 * a time. Note that we may not be presented with the buffers in the
1961 * correct order, so we search for the cluster in both directions.
1962 */
1963 int
1964 vfs_bio_awrite(struct buf *bp)
1965 {
1966 struct bufobj *bo;
1967 int i;
1968 int j;
1969 daddr_t lblkno = bp->b_lblkno;
1970 struct vnode *vp = bp->b_vp;
1971 int ncl;
1972 int nwritten;
1973 int size;
1974 int maxcl;
1975 int gbflags;
1976
1977 bo = &vp->v_bufobj;
1978 gbflags = (bp->b_flags & B_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
1979 /*
1980 * right now we support clustered writing only to regular files. If
1981 * we find a clusterable block we could be in the middle of a cluster
1982 * rather then at the beginning.
1983 */
1984 if ((vp->v_type == VREG) &&
1985 (vp->v_mount != 0) && /* Only on nodes that have the size info */
1986 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1987
1988 size = vp->v_mount->mnt_stat.f_iosize;
1989 maxcl = MAXPHYS / size;
1990
1991 BO_LOCK(bo);
1992 for (i = 1; i < maxcl; i++)
1993 if (vfs_bio_clcheck(vp, size, lblkno + i,
1994 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
1995 break;
1996
1997 for (j = 1; i + j <= maxcl && j <= lblkno; j++)
1998 if (vfs_bio_clcheck(vp, size, lblkno - j,
1999 bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
2000 break;
2001 BO_UNLOCK(bo);
2002 --j;
2003 ncl = i + j;
2004 /*
2005 * this is a possible cluster write
2006 */
2007 if (ncl != 1) {
2008 BUF_UNLOCK(bp);
2009 nwritten = cluster_wbuild_gb(vp, size, lblkno - j,
2010 ncl, gbflags);
2011 return (nwritten);
2012 }
2013 }
2014 bremfree(bp);
2015 bp->b_flags |= B_ASYNC;
2016 /*
2017 * default (old) behavior, writing out only one block
2018 *
2019 * XXX returns b_bufsize instead of b_bcount for nwritten?
2020 */
2021 nwritten = bp->b_bufsize;
2022 (void) bwrite(bp);
2023
2024 return (nwritten);
2025 }
2026
2027 static void
2028 setbufkva(struct buf *bp, vm_offset_t addr, int maxsize, int gbflags)
2029 {
2030
2031 KASSERT((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 &&
2032 bp->b_kvasize == 0, ("call bfreekva(%p)", bp));
2033 if ((gbflags & GB_UNMAPPED) == 0) {
2034 bp->b_kvabase = (caddr_t)addr;
2035 } else if ((gbflags & GB_KVAALLOC) != 0) {
2036 KASSERT((gbflags & GB_UNMAPPED) != 0,
2037 ("GB_KVAALLOC without GB_UNMAPPED"));
2038 bp->b_kvaalloc = (caddr_t)addr;
2039 bp->b_flags |= B_UNMAPPED | B_KVAALLOC;
2040 atomic_add_long(&unmapped_bufspace, bp->b_kvasize);
2041 }
2042 bp->b_kvasize = maxsize;
2043 }
2044
2045 /*
2046 * Allocate the buffer KVA and set b_kvasize. Also set b_kvabase if
2047 * needed.
2048 */
2049 static int
2050 allocbufkva(struct buf *bp, int maxsize, int gbflags)
2051 {
2052 vm_offset_t addr;
2053 int rv;
2054
2055 bfreekva(bp);
2056 addr = 0;
2057
2058 vm_map_lock(buffer_map);
2059 if (vm_map_findspace(buffer_map, vm_map_min(buffer_map), maxsize,
2060 &addr)) {
2061 vm_map_unlock(buffer_map);
2062 /*
2063 * Buffer map is too fragmented. Request the caller
2064 * to defragment the map.
2065 */
2066 atomic_add_int(&bufdefragcnt, 1);
2067 return (1);
2068 }
2069 rv = vm_map_insert(buffer_map, NULL, 0, addr, addr + maxsize,
2070 VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
2071 KASSERT(rv == KERN_SUCCESS, ("vm_map_insert(buffer_map) rv %d", rv));
2072 vm_map_unlock(buffer_map);
2073 setbufkva(bp, addr, maxsize, gbflags);
2074 atomic_add_long(&bufspace, bp->b_kvasize);
2075 return (0);
2076 }
2077
2078 /*
2079 * Ask the bufdaemon for help, or act as bufdaemon itself, when a
2080 * locked vnode is supplied.
2081 */
2082 static void
2083 getnewbuf_bufd_help(struct vnode *vp, int gbflags, int slpflag, int slptimeo,
2084 int defrag)
2085 {
2086 struct thread *td;
2087 char *waitmsg;
2088 int fl, flags, norunbuf;
2089
2090 mtx_assert(&bqlock, MA_OWNED);
2091
2092 if (defrag) {
2093 flags = VFS_BIO_NEED_BUFSPACE;
2094 waitmsg = "nbufkv";
2095 } else if (bufspace >= hibufspace) {
2096 waitmsg = "nbufbs";
2097 flags = VFS_BIO_NEED_BUFSPACE;
2098 } else {
2099 waitmsg = "newbuf";
2100 flags = VFS_BIO_NEED_ANY;
2101 }
2102 mtx_lock(&nblock);
2103 needsbuffer |= flags;
2104 mtx_unlock(&nblock);
2105 mtx_unlock(&bqlock);
2106
2107 bd_speedup(); /* heeeelp */
2108 if ((gbflags & GB_NOWAIT_BD) != 0)
2109 return;
2110
2111 td = curthread;
2112 mtx_lock(&nblock);
2113 while (needsbuffer & flags) {
2114 if (vp != NULL && vp->v_type != VCHR &&
2115 (td->td_pflags & TDP_BUFNEED) == 0) {
2116 mtx_unlock(&nblock);
2117 /*
2118 * getblk() is called with a vnode locked, and
2119 * some majority of the dirty buffers may as
2120 * well belong to the vnode. Flushing the
2121 * buffers there would make a progress that
2122 * cannot be achieved by the buf_daemon, that
2123 * cannot lock the vnode.
2124 */
2125 norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
2126 (td->td_pflags & TDP_NORUNNINGBUF);
2127 /* play bufdaemon */
2128 td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
2129 fl = buf_do_flush(vp);
2130 td->td_pflags &= norunbuf;
2131 mtx_lock(&nblock);
2132 if (fl != 0)
2133 continue;
2134 if ((needsbuffer & flags) == 0)
2135 break;
2136 }
2137 if (msleep(&needsbuffer, &nblock, (PRIBIO + 4) | slpflag,
2138 waitmsg, slptimeo))
2139 break;
2140 }
2141 mtx_unlock(&nblock);
2142 }
2143
2144 static void
2145 getnewbuf_reuse_bp(struct buf *bp, int qindex)
2146 {
2147
2148 CTR6(KTR_BUF, "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d "
2149 "queue %d (recycling)", bp, bp->b_vp, bp->b_flags,
2150 bp->b_kvasize, bp->b_bufsize, qindex);
2151 mtx_assert(&bqlock, MA_NOTOWNED);
2152
2153 /*
2154 * Note: we no longer distinguish between VMIO and non-VMIO
2155 * buffers.
2156 */
2157 KASSERT((bp->b_flags & B_DELWRI) == 0,
2158 ("delwri buffer %p found in queue %d", bp, qindex));
2159
2160 if (qindex == QUEUE_CLEAN) {
2161 if (bp->b_flags & B_VMIO) {
2162 bp->b_flags &= ~B_ASYNC;
2163 vfs_vmio_release(bp);
2164 }
2165 if (bp->b_vp != NULL)
2166 brelvp(bp);
2167 }
2168
2169 /*
2170 * Get the rest of the buffer freed up. b_kva* is still valid
2171 * after this operation.
2172 */
2173
2174 if (bp->b_rcred != NOCRED) {
2175 crfree(bp->b_rcred);
2176 bp->b_rcred = NOCRED;
2177 }
2178 if (bp->b_wcred != NOCRED) {
2179 crfree(bp->b_wcred);
2180 bp->b_wcred = NOCRED;
2181 }
2182 if (!LIST_EMPTY(&bp->b_dep))
2183 buf_deallocate(bp);
2184 if (bp->b_vflags & BV_BKGRDINPROG)
2185 panic("losing buffer 3");
2186 KASSERT(bp->b_vp == NULL, ("bp: %p still has vnode %p. qindex: %d",
2187 bp, bp->b_vp, qindex));
2188 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
2189 ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
2190
2191 if (bp->b_bufsize)
2192 allocbuf(bp, 0);
2193
2194 bp->b_flags &= B_UNMAPPED | B_KVAALLOC;
2195 bp->b_ioflags = 0;
2196 bp->b_xflags = 0;
2197 KASSERT((bp->b_vflags & BV_INFREECNT) == 0,
2198 ("buf %p still counted as free?", bp));
2199 bp->b_vflags = 0;
2200 bp->b_vp = NULL;
2201 bp->b_blkno = bp->b_lblkno = 0;
2202 bp->b_offset = NOOFFSET;
2203 bp->b_iodone = 0;
2204 bp->b_error = 0;
2205 bp->b_resid = 0;
2206 bp->b_bcount = 0;
2207 bp->b_npages = 0;
2208 bp->b_dirtyoff = bp->b_dirtyend = 0;
2209 bp->b_bufobj = NULL;
2210 bp->b_pin_count = 0;
2211 bp->b_fsprivate1 = NULL;
2212 bp->b_fsprivate2 = NULL;
2213 bp->b_fsprivate3 = NULL;
2214
2215 LIST_INIT(&bp->b_dep);
2216 }
2217
2218 static int flushingbufs;
2219
2220 static struct buf *
2221 getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata)
2222 {
2223 struct buf *bp, *nbp;
2224 int nqindex, qindex, pass;
2225
2226 KASSERT(!unmapped || !defrag, ("both unmapped and defrag"));
2227
2228 pass = 1;
2229 restart:
2230 atomic_add_int(&getnewbufrestarts, 1);
2231
2232 /*
2233 * Setup for scan. If we do not have enough free buffers,
2234 * we setup a degenerate case that immediately fails. Note
2235 * that if we are specially marked process, we are allowed to
2236 * dip into our reserves.
2237 *
2238 * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN
2239 * for the allocation of the mapped buffer. For unmapped, the
2240 * easiest is to start with EMPTY outright.
2241 *
2242 * We start with EMPTYKVA. If the list is empty we backup to EMPTY.
2243 * However, there are a number of cases (defragging, reusing, ...)
2244 * where we cannot backup.
2245 */
2246 nbp = NULL;
2247 mtx_lock(&bqlock);
2248 if (!defrag && unmapped) {
2249 nqindex = QUEUE_EMPTY;
2250 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
2251 }
2252 if (nbp == NULL) {
2253 nqindex = QUEUE_EMPTYKVA;
2254 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
2255 }
2256
2257 /*
2258 * If no EMPTYKVA buffers and we are either defragging or
2259 * reusing, locate a CLEAN buffer to free or reuse. If
2260 * bufspace useage is low skip this step so we can allocate a
2261 * new buffer.
2262 */
2263 if (nbp == NULL && (defrag || bufspace >= lobufspace)) {
2264 nqindex = QUEUE_CLEAN;
2265 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
2266 }
2267
2268 /*
2269 * If we could not find or were not allowed to reuse a CLEAN
2270 * buffer, check to see if it is ok to use an EMPTY buffer.
2271 * We can only use an EMPTY buffer if allocating its KVA would
2272 * not otherwise run us out of buffer space. No KVA is needed
2273 * for the unmapped allocation.
2274 */
2275 if (nbp == NULL && defrag == 0 && (bufspace + maxsize < hibufspace ||
2276 metadata)) {
2277 nqindex = QUEUE_EMPTY;
2278 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
2279 }
2280
2281 /*
2282 * All available buffers might be clean, retry ignoring the
2283 * lobufspace as the last resort.
2284 */
2285 if (nbp == NULL && !TAILQ_EMPTY(&bufqueues[QUEUE_CLEAN])) {
2286 nqindex = QUEUE_CLEAN;
2287 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
2288 }
2289
2290 /*
2291 * Run scan, possibly freeing data and/or kva mappings on the fly
2292 * depending.
2293 */
2294 while ((bp = nbp) != NULL) {
2295 qindex = nqindex;
2296
2297 /*
2298 * Calculate next bp (we can only use it if we do not
2299 * block or do other fancy things).
2300 */
2301 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
2302 switch (qindex) {
2303 case QUEUE_EMPTY:
2304 nqindex = QUEUE_EMPTYKVA;
2305 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
2306 if (nbp != NULL)
2307 break;
2308 /* FALLTHROUGH */
2309 case QUEUE_EMPTYKVA:
2310 nqindex = QUEUE_CLEAN;
2311 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
2312 if (nbp != NULL)
2313 break;
2314 /* FALLTHROUGH */
2315 case QUEUE_CLEAN:
2316 if (metadata && pass == 1) {
2317 pass = 2;
2318 nqindex = QUEUE_EMPTY;
2319 nbp = TAILQ_FIRST(
2320 &bufqueues[QUEUE_EMPTY]);
2321 }
2322 /*
2323 * nbp is NULL.
2324 */
2325 break;
2326 }
2327 }
2328 /*
2329 * If we are defragging then we need a buffer with
2330 * b_kvasize != 0. XXX this situation should no longer
2331 * occur, if defrag is non-zero the buffer's b_kvasize
2332 * should also be non-zero at this point. XXX
2333 */
2334 if (defrag && bp->b_kvasize == 0) {
2335 printf("Warning: defrag empty buffer %p\n", bp);
2336 continue;
2337 }
2338
2339 /*
2340 * Start freeing the bp. This is somewhat involved. nbp
2341 * remains valid only for QUEUE_EMPTY[KVA] bp's.
2342 */
2343 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2344 continue;
2345 if (bp->b_vp) {
2346 BO_LOCK(bp->b_bufobj);
2347 if (bp->b_vflags & BV_BKGRDINPROG) {
2348 BO_UNLOCK(bp->b_bufobj);
2349 BUF_UNLOCK(bp);
2350 continue;
2351 }
2352 BO_UNLOCK(bp->b_bufobj);
2353 }
2354
2355 KASSERT(bp->b_qindex == qindex,
2356 ("getnewbuf: inconsistent queue %d bp %p", qindex, bp));
2357
2358 if (bp->b_bufobj != NULL)
2359 BO_LOCK(bp->b_bufobj);
2360 bremfreel(bp);
2361 if (bp->b_bufobj != NULL)
2362 BO_UNLOCK(bp->b_bufobj);
2363 mtx_unlock(&bqlock);
2364 /*
2365 * NOTE: nbp is now entirely invalid. We can only restart
2366 * the scan from this point on.
2367 */
2368
2369 getnewbuf_reuse_bp(bp, qindex);
2370 mtx_assert(&bqlock, MA_NOTOWNED);
2371
2372 /*
2373 * If we are defragging then free the buffer.
2374 */
2375 if (defrag) {
2376 bp->b_flags |= B_INVAL;
2377 bfreekva(bp);
2378 brelse(bp);
2379 defrag = 0;
2380 goto restart;
2381 }
2382
2383 /*
2384 * Notify any waiters for the buffer lock about
2385 * identity change by freeing the buffer.
2386 */
2387 if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) {
2388 bp->b_flags |= B_INVAL;
2389 bfreekva(bp);
2390 brelse(bp);
2391 goto restart;
2392 }
2393
2394 if (metadata)
2395 break;
2396
2397 /*
2398 * If we are overcomitted then recover the buffer and its
2399 * KVM space. This occurs in rare situations when multiple
2400 * processes are blocked in getnewbuf() or allocbuf().
2401 */
2402 if (bufspace >= hibufspace)
2403 flushingbufs = 1;
2404 if (flushingbufs && bp->b_kvasize != 0) {
2405 bp->b_flags |= B_INVAL;
2406 bfreekva(bp);
2407 brelse(bp);
2408 goto restart;
2409 }
2410 if (bufspace < lobufspace)
2411 flushingbufs = 0;
2412 break;
2413 }
2414 return (bp);
2415 }
2416
2417 /*
2418 * getnewbuf:
2419 *
2420 * Find and initialize a new buffer header, freeing up existing buffers
2421 * in the bufqueues as necessary. The new buffer is returned locked.
2422 *
2423 * Important: B_INVAL is not set. If the caller wishes to throw the
2424 * buffer away, the caller must set B_INVAL prior to calling brelse().
2425 *
2426 * We block if:
2427 * We have insufficient buffer headers
2428 * We have insufficient buffer space
2429 * buffer_map is too fragmented ( space reservation fails )
2430 * If we have to flush dirty buffers ( but we try to avoid this )
2431 *
2432 * To avoid VFS layer recursion we do not flush dirty buffers ourselves.
2433 * Instead we ask the buf daemon to do it for us. We attempt to
2434 * avoid piecemeal wakeups of the pageout daemon.
2435 */
2436 static struct buf *
2437 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize,
2438 int gbflags)
2439 {
2440 struct buf *bp;
2441 int defrag, metadata;
2442
2443 KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
2444 ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
2445 if (!unmapped_buf_allowed)
2446 gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
2447
2448 defrag = 0;
2449 if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
2450 vp->v_type == VCHR)
2451 metadata = 1;
2452 else
2453 metadata = 0;
2454 /*
2455 * We can't afford to block since we might be holding a vnode lock,
2456 * which may prevent system daemons from running. We deal with
2457 * low-memory situations by proactively returning memory and running
2458 * async I/O rather then sync I/O.
2459 */
2460 atomic_add_int(&getnewbufcalls, 1);
2461 atomic_subtract_int(&getnewbufrestarts, 1);
2462 restart:
2463 bp = getnewbuf_scan(maxsize, defrag, (gbflags & (GB_UNMAPPED |
2464 GB_KVAALLOC)) == GB_UNMAPPED, metadata);
2465 if (bp != NULL)
2466 defrag = 0;
2467
2468 /*
2469 * If we exhausted our list, sleep as appropriate. We may have to
2470 * wakeup various daemons and write out some dirty buffers.
2471 *
2472 * Generally we are sleeping due to insufficient buffer space.
2473 */
2474 if (bp == NULL) {
2475 mtx_assert(&bqlock, MA_OWNED);
2476 getnewbuf_bufd_help(vp, gbflags, slpflag, slptimeo, defrag);
2477 mtx_assert(&bqlock, MA_NOTOWNED);
2478 } else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == GB_UNMAPPED) {
2479 mtx_assert(&bqlock, MA_NOTOWNED);
2480
2481 bfreekva(bp);
2482 bp->b_flags |= B_UNMAPPED;
2483 bp->b_kvabase = bp->b_data = unmapped_buf;
2484 bp->b_kvasize = maxsize;
2485 atomic_add_long(&bufspace, bp->b_kvasize);
2486 atomic_add_long(&unmapped_bufspace, bp->b_kvasize);
2487 atomic_add_int(&bufreusecnt, 1);
2488 } else {
2489 mtx_assert(&bqlock, MA_NOTOWNED);
2490
2491 /*
2492 * We finally have a valid bp. We aren't quite out of the
2493 * woods, we still have to reserve kva space. In order
2494 * to keep fragmentation sane we only allocate kva in
2495 * BKVASIZE chunks.
2496 */
2497 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2498
2499 if (maxsize != bp->b_kvasize || (bp->b_flags & (B_UNMAPPED |
2500 B_KVAALLOC)) == B_UNMAPPED) {
2501 if (allocbufkva(bp, maxsize, gbflags)) {
2502 defrag = 1;
2503 bp->b_flags |= B_INVAL;
2504 brelse(bp);
2505 goto restart;
2506 }
2507 atomic_add_int(&bufreusecnt, 1);
2508 } else if ((bp->b_flags & B_KVAALLOC) != 0 &&
2509 (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == 0) {
2510 /*
2511 * If the reused buffer has KVA allocated,
2512 * reassign b_kvaalloc to b_kvabase.
2513 */
2514 bp->b_kvabase = bp->b_kvaalloc;
2515 bp->b_flags &= ~B_KVAALLOC;
2516 atomic_subtract_long(&unmapped_bufspace,
2517 bp->b_kvasize);
2518 atomic_add_int(&bufreusecnt, 1);
2519 } else if ((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 &&
2520 (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == (GB_UNMAPPED |
2521 GB_KVAALLOC)) {
2522 /*
2523 * The case of reused buffer already have KVA
2524 * mapped, but the request is for unmapped
2525 * buffer with KVA allocated.
2526 */
2527 bp->b_kvaalloc = bp->b_kvabase;
2528 bp->b_data = bp->b_kvabase = unmapped_buf;
2529 bp->b_flags |= B_UNMAPPED | B_KVAALLOC;
2530 atomic_add_long(&unmapped_bufspace,
2531 bp->b_kvasize);
2532 atomic_add_int(&bufreusecnt, 1);
2533 }
2534 if ((gbflags & GB_UNMAPPED) == 0) {
2535 bp->b_saveaddr = bp->b_kvabase;
2536 bp->b_data = bp->b_saveaddr;
2537 bp->b_flags &= ~B_UNMAPPED;
2538 BUF_CHECK_MAPPED(bp);
2539 }
2540 }
2541 return (bp);
2542 }
2543
2544 /*
2545 * buf_daemon:
2546 *
2547 * buffer flushing daemon. Buffers are normally flushed by the
2548 * update daemon but if it cannot keep up this process starts to
2549 * take the load in an attempt to prevent getnewbuf() from blocking.
2550 */
2551
2552 static struct kproc_desc buf_kp = {
2553 "bufdaemon",
2554 buf_daemon,
2555 &bufdaemonproc
2556 };
2557 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
2558
2559 static int
2560 buf_do_flush(struct vnode *vp)
2561 {
2562 int flushed;
2563
2564 flushed = flushbufqueues(vp, QUEUE_DIRTY, 0);
2565 /* The list empty check here is slightly racy */
2566 if (!TAILQ_EMPTY(&bufqueues[QUEUE_DIRTY_GIANT])) {
2567 mtx_lock(&Giant);
2568 flushed += flushbufqueues(vp, QUEUE_DIRTY_GIANT, 0);
2569 mtx_unlock(&Giant);
2570 }
2571 if (flushed == 0) {
2572 /*
2573 * Could not find any buffers without rollback
2574 * dependencies, so just write the first one
2575 * in the hopes of eventually making progress.
2576 */
2577 flushbufqueues(vp, QUEUE_DIRTY, 1);
2578 if (!TAILQ_EMPTY(
2579 &bufqueues[QUEUE_DIRTY_GIANT])) {
2580 mtx_lock(&Giant);
2581 flushbufqueues(vp, QUEUE_DIRTY_GIANT, 1);
2582 mtx_unlock(&Giant);
2583 }
2584 }
2585 return (flushed);
2586 }
2587
2588 static void
2589 buf_daemon()
2590 {
2591 int lodirtysave;
2592
2593 /*
2594 * This process needs to be suspended prior to shutdown sync.
2595 */
2596 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
2597 SHUTDOWN_PRI_LAST);
2598
2599 /*
2600 * This process is allowed to take the buffer cache to the limit
2601 */
2602 curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
2603 mtx_lock(&bdlock);
2604 for (;;) {
2605 bd_request = 0;
2606 mtx_unlock(&bdlock);
2607
2608 kproc_suspend_check(bufdaemonproc);
2609 lodirtysave = lodirtybuffers;
2610 if (bd_speedupreq) {
2611 lodirtybuffers = numdirtybuffers / 2;
2612 bd_speedupreq = 0;
2613 }
2614 /*
2615 * Do the flush. Limit the amount of in-transit I/O we
2616 * allow to build up, otherwise we would completely saturate
2617 * the I/O system. Wakeup any waiting processes before we
2618 * normally would so they can run in parallel with our drain.
2619 */
2620 while (numdirtybuffers > lodirtybuffers) {
2621 if (buf_do_flush(NULL) == 0)
2622 break;
2623 kern_yield(PRI_UNCHANGED);
2624 }
2625 lodirtybuffers = lodirtysave;
2626
2627 /*
2628 * Only clear bd_request if we have reached our low water
2629 * mark. The buf_daemon normally waits 1 second and
2630 * then incrementally flushes any dirty buffers that have
2631 * built up, within reason.
2632 *
2633 * If we were unable to hit our low water mark and couldn't
2634 * find any flushable buffers, we sleep half a second.
2635 * Otherwise we loop immediately.
2636 */
2637 mtx_lock(&bdlock);
2638 if (numdirtybuffers <= lodirtybuffers) {
2639 /*
2640 * We reached our low water mark, reset the
2641 * request and sleep until we are needed again.
2642 * The sleep is just so the suspend code works.
2643 */
2644 bd_request = 0;
2645 msleep(&bd_request, &bdlock, PVM, "psleep", hz);
2646 } else {
2647 /*
2648 * We couldn't find any flushable dirty buffers but
2649 * still have too many dirty buffers, we
2650 * have to sleep and try again. (rare)
2651 */
2652 msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
2653 }
2654 }
2655 }
2656
2657 /*
2658 * flushbufqueues:
2659 *
2660 * Try to flush a buffer in the dirty queue. We must be careful to
2661 * free up B_INVAL buffers instead of write them, which NFS is
2662 * particularly sensitive to.
2663 */
2664 static int flushwithdeps = 0;
2665 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
2666 0, "Number of buffers flushed with dependecies that require rollbacks");
2667
2668 static int
2669 flushbufqueues(struct vnode *lvp, int queue, int flushdeps)
2670 {
2671 struct buf *sentinel;
2672 struct vnode *vp;
2673 struct mount *mp;
2674 struct buf *bp;
2675 int hasdeps;
2676 int flushed;
2677 int target;
2678 int error;
2679 bool unlock;
2680
2681 if (lvp == NULL) {
2682 target = numdirtybuffers - lodirtybuffers;
2683 if (flushdeps && target > 2)
2684 target /= 2;
2685 } else
2686 target = flushbufqtarget;
2687 flushed = 0;
2688 bp = NULL;
2689 sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
2690 sentinel->b_qindex = QUEUE_SENTINEL;
2691 mtx_lock(&bqlock);
2692 TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist);
2693 while (flushed != target) {
2694 bp = TAILQ_NEXT(sentinel, b_freelist);
2695 if (bp != NULL) {
2696 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2697 TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
2698 b_freelist);
2699 } else
2700 break;
2701 /*
2702 * Skip sentinels inserted by other invocations of the
2703 * flushbufqueues(), taking care to not reorder them.
2704 */
2705 if (bp->b_qindex == QUEUE_SENTINEL)
2706 continue;
2707 /*
2708 * Only flush the buffers that belong to the
2709 * vnode locked by the curthread.
2710 */
2711 if (lvp != NULL && bp->b_vp != lvp)
2712 continue;
2713 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2714 continue;
2715 if (bp->b_pin_count > 0) {
2716 BUF_UNLOCK(bp);
2717 continue;
2718 }
2719 BO_LOCK(bp->b_bufobj);
2720 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
2721 (bp->b_flags & B_DELWRI) == 0) {
2722 BO_UNLOCK(bp->b_bufobj);
2723 BUF_UNLOCK(bp);
2724 continue;
2725 }
2726 BO_UNLOCK(bp->b_bufobj);
2727 if (bp->b_flags & B_INVAL) {
2728 bremfreel(bp);
2729 mtx_unlock(&bqlock);
2730 brelse(bp);
2731 flushed++;
2732 numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
2733 mtx_lock(&bqlock);
2734 continue;
2735 }
2736
2737 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
2738 if (flushdeps == 0) {
2739 BUF_UNLOCK(bp);
2740 continue;
2741 }
2742 hasdeps = 1;
2743 } else
2744 hasdeps = 0;
2745 /*
2746 * We must hold the lock on a vnode before writing
2747 * one of its buffers. Otherwise we may confuse, or
2748 * in the case of a snapshot vnode, deadlock the
2749 * system.
2750 *
2751 * The lock order here is the reverse of the normal
2752 * of vnode followed by buf lock. This is ok because
2753 * the NOWAIT will prevent deadlock.
2754 */
2755 vp = bp->b_vp;
2756 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2757 BUF_UNLOCK(bp);
2758 continue;
2759 }
2760 if (lvp == NULL) {
2761 unlock = true;
2762 error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
2763 } else {
2764 ASSERT_VOP_LOCKED(vp, "getbuf");
2765 unlock = false;
2766 error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
2767 vn_lock(vp, LK_TRYUPGRADE);
2768 }
2769 if (error == 0) {
2770 mtx_unlock(&bqlock);
2771 CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
2772 bp, bp->b_vp, bp->b_flags);
2773 if (curproc == bufdaemonproc)
2774 vfs_bio_awrite(bp);
2775 else {
2776 bremfree(bp);
2777 bwrite(bp);
2778 notbufdflashes++;
2779 }
2780 vn_finished_write(mp);
2781 if (unlock)
2782 VOP_UNLOCK(vp, 0);
2783 flushwithdeps += hasdeps;
2784 flushed++;
2785
2786 /*
2787 * Sleeping on runningbufspace while holding
2788 * vnode lock leads to deadlock.
2789 */
2790 if (curproc == bufdaemonproc)
2791 waitrunningbufspace();
2792 numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
2793 mtx_lock(&bqlock);
2794 continue;
2795 }
2796 vn_finished_write(mp);
2797 BUF_UNLOCK(bp);
2798 }
2799 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2800 mtx_unlock(&bqlock);
2801 free(sentinel, M_TEMP);
2802 return (flushed);
2803 }
2804
2805 /*
2806 * Check to see if a block is currently memory resident.
2807 */
2808 struct buf *
2809 incore(struct bufobj *bo, daddr_t blkno)
2810 {
2811 struct buf *bp;
2812
2813 BO_LOCK(bo);
2814 bp = gbincore(bo, blkno);
2815 BO_UNLOCK(bo);
2816 return (bp);
2817 }
2818
2819 /*
2820 * Returns true if no I/O is needed to access the
2821 * associated VM object. This is like incore except
2822 * it also hunts around in the VM system for the data.
2823 */
2824
2825 static int
2826 inmem(struct vnode * vp, daddr_t blkno)
2827 {
2828 vm_object_t obj;
2829 vm_offset_t toff, tinc, size;
2830 vm_page_t m;
2831 vm_ooffset_t off;
2832
2833 ASSERT_VOP_LOCKED(vp, "inmem");
2834
2835 if (incore(&vp->v_bufobj, blkno))
2836 return 1;
2837 if (vp->v_mount == NULL)
2838 return 0;
2839 obj = vp->v_object;
2840 if (obj == NULL)
2841 return (0);
2842
2843 size = PAGE_SIZE;
2844 if (size > vp->v_mount->mnt_stat.f_iosize)
2845 size = vp->v_mount->mnt_stat.f_iosize;
2846 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
2847
2848 VM_OBJECT_LOCK(obj);
2849 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
2850 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
2851 if (!m)
2852 goto notinmem;
2853 tinc = size;
2854 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
2855 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
2856 if (vm_page_is_valid(m,
2857 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
2858 goto notinmem;
2859 }
2860 VM_OBJECT_UNLOCK(obj);
2861 return 1;
2862
2863 notinmem:
2864 VM_OBJECT_UNLOCK(obj);
2865 return (0);
2866 }
2867
2868 /*
2869 * Set the dirty range for a buffer based on the status of the dirty
2870 * bits in the pages comprising the buffer. The range is limited
2871 * to the size of the buffer.
2872 *
2873 * Tell the VM system that the pages associated with this buffer
2874 * are clean. This is used for delayed writes where the data is
2875 * going to go to disk eventually without additional VM intevention.
2876 *
2877 * Note that while we only really need to clean through to b_bcount, we
2878 * just go ahead and clean through to b_bufsize.
2879 */
2880 static void
2881 vfs_clean_pages_dirty_buf(struct buf *bp)
2882 {
2883 vm_ooffset_t foff, noff, eoff;
2884 vm_page_t m;
2885 int i;
2886
2887 if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
2888 return;
2889
2890 foff = bp->b_offset;
2891 KASSERT(bp->b_offset != NOOFFSET,
2892 ("vfs_clean_pages_dirty_buf: no buffer offset"));
2893
2894 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
2895 vfs_drain_busy_pages(bp);
2896 vfs_setdirty_locked_object(bp);
2897 for (i = 0; i < bp->b_npages; i++) {
2898 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2899 eoff = noff;
2900 if (eoff > bp->b_offset + bp->b_bufsize)
2901 eoff = bp->b_offset + bp->b_bufsize;
2902 m = bp->b_pages[i];
2903 vfs_page_set_validclean(bp, foff, m);
2904 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
2905 foff = noff;
2906 }
2907 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
2908 }
2909
2910 static void
2911 vfs_setdirty_locked_object(struct buf *bp)
2912 {
2913 vm_object_t object;
2914 int i;
2915
2916 object = bp->b_bufobj->bo_object;
2917 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2918
2919 /*
2920 * We qualify the scan for modified pages on whether the
2921 * object has been flushed yet.
2922 */
2923 if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
2924 vm_offset_t boffset;
2925 vm_offset_t eoffset;
2926
2927 /*
2928 * test the pages to see if they have been modified directly
2929 * by users through the VM system.
2930 */
2931 for (i = 0; i < bp->b_npages; i++)
2932 vm_page_test_dirty(bp->b_pages[i]);
2933
2934 /*
2935 * Calculate the encompassing dirty range, boffset and eoffset,
2936 * (eoffset - boffset) bytes.
2937 */
2938
2939 for (i = 0; i < bp->b_npages; i++) {
2940 if (bp->b_pages[i]->dirty)
2941 break;
2942 }
2943 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2944
2945 for (i = bp->b_npages - 1; i >= 0; --i) {
2946 if (bp->b_pages[i]->dirty) {
2947 break;
2948 }
2949 }
2950 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2951
2952 /*
2953 * Fit it to the buffer.
2954 */
2955
2956 if (eoffset > bp->b_bcount)
2957 eoffset = bp->b_bcount;
2958
2959 /*
2960 * If we have a good dirty range, merge with the existing
2961 * dirty range.
2962 */
2963
2964 if (boffset < eoffset) {
2965 if (bp->b_dirtyoff > boffset)
2966 bp->b_dirtyoff = boffset;
2967 if (bp->b_dirtyend < eoffset)
2968 bp->b_dirtyend = eoffset;
2969 }
2970 }
2971 }
2972
2973 /*
2974 * Allocate the KVA mapping for an existing buffer. It handles the
2975 * cases of both B_UNMAPPED buffer, and buffer with the preallocated
2976 * KVA which is not mapped (B_KVAALLOC).
2977 */
2978 static void
2979 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
2980 {
2981 struct buf *scratch_bp;
2982 int bsize, maxsize, need_mapping, need_kva;
2983 off_t offset;
2984
2985 need_mapping = (bp->b_flags & B_UNMAPPED) != 0 &&
2986 (gbflags & GB_UNMAPPED) == 0;
2987 need_kva = (bp->b_flags & (B_KVAALLOC | B_UNMAPPED)) == B_UNMAPPED &&
2988 (gbflags & GB_KVAALLOC) != 0;
2989 if (!need_mapping && !need_kva)
2990 return;
2991
2992 BUF_CHECK_UNMAPPED(bp);
2993
2994 if (need_mapping && (bp->b_flags & B_KVAALLOC) != 0) {
2995 /*
2996 * Buffer is not mapped, but the KVA was already
2997 * reserved at the time of the instantiation. Use the
2998 * allocated space.
2999 */
3000 bp->b_flags &= ~B_KVAALLOC;
3001 KASSERT(bp->b_kvaalloc != 0, ("kvaalloc == 0"));
3002 bp->b_kvabase = bp->b_kvaalloc;
3003 atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize);
3004 goto has_addr;
3005 }
3006
3007 /*
3008 * Calculate the amount of the address space we would reserve
3009 * if the buffer was mapped.
3010 */
3011 bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3012 offset = blkno * bsize;
3013 maxsize = size + (offset & PAGE_MASK);
3014 maxsize = imax(maxsize, bsize);
3015
3016 mapping_loop:
3017 if (allocbufkva(bp, maxsize, gbflags)) {
3018 /*
3019 * Request defragmentation. getnewbuf() returns us the
3020 * allocated space by the scratch buffer KVA.
3021 */
3022 scratch_bp = getnewbuf(bp->b_vp, 0, 0, size, maxsize, gbflags |
3023 (GB_UNMAPPED | GB_KVAALLOC));
3024 if (scratch_bp == NULL) {
3025 if ((gbflags & GB_NOWAIT_BD) != 0) {
3026 /*
3027 * XXXKIB: defragmentation cannot
3028 * succeed, not sure what else to do.
3029 */
3030 panic("GB_NOWAIT_BD and B_UNMAPPED %p", bp);
3031 }
3032 atomic_add_int(&mappingrestarts, 1);
3033 goto mapping_loop;
3034 }
3035 KASSERT((scratch_bp->b_flags & B_KVAALLOC) != 0,
3036 ("scratch bp !B_KVAALLOC %p", scratch_bp));
3037 setbufkva(bp, (vm_offset_t)scratch_bp->b_kvaalloc,
3038 scratch_bp->b_kvasize, gbflags);
3039
3040 /* Get rid of the scratch buffer. */
3041 scratch_bp->b_kvasize = 0;
3042 scratch_bp->b_flags |= B_INVAL;
3043 scratch_bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC);
3044 brelse(scratch_bp);
3045 }
3046 if (!need_mapping)
3047 return;
3048
3049 has_addr:
3050 bp->b_saveaddr = bp->b_kvabase;
3051 bp->b_data = bp->b_saveaddr; /* b_offset is handled by bpmap_qenter */
3052 bp->b_flags &= ~B_UNMAPPED;
3053 BUF_CHECK_MAPPED(bp);
3054 bpmap_qenter(bp);
3055 }
3056
3057 /*
3058 * getblk:
3059 *
3060 * Get a block given a specified block and offset into a file/device.
3061 * The buffers B_DONE bit will be cleared on return, making it almost
3062 * ready for an I/O initiation. B_INVAL may or may not be set on
3063 * return. The caller should clear B_INVAL prior to initiating a
3064 * READ.
3065 *
3066 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3067 * an existing buffer.
3068 *
3069 * For a VMIO buffer, B_CACHE is modified according to the backing VM.
3070 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3071 * and then cleared based on the backing VM. If the previous buffer is
3072 * non-0-sized but invalid, B_CACHE will be cleared.
3073 *
3074 * If getblk() must create a new buffer, the new buffer is returned with
3075 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3076 * case it is returned with B_INVAL clear and B_CACHE set based on the
3077 * backing VM.
3078 *
3079 * getblk() also forces a bwrite() for any B_DELWRI buffer whos
3080 * B_CACHE bit is clear.
3081 *
3082 * What this means, basically, is that the caller should use B_CACHE to
3083 * determine whether the buffer is fully valid or not and should clear
3084 * B_INVAL prior to issuing a read. If the caller intends to validate
3085 * the buffer by loading its data area with something, the caller needs
3086 * to clear B_INVAL. If the caller does this without issuing an I/O,
3087 * the caller should set B_CACHE ( as an optimization ), else the caller
3088 * should issue the I/O and biodone() will set B_CACHE if the I/O was
3089 * a write attempt or if it was a successfull read. If the caller
3090 * intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3091 * prior to issuing the READ. biodone() will *not* clear B_INVAL.
3092 */
3093 struct buf *
3094 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3095 int flags)
3096 {
3097 struct buf *bp;
3098 struct bufobj *bo;
3099 int bsize, error, maxsize, vmio;
3100 off_t offset;
3101
3102 CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3103 KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3104 ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3105 ASSERT_VOP_LOCKED(vp, "getblk");
3106 if (size > MAXBSIZE)
3107 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
3108 if (!unmapped_buf_allowed)
3109 flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3110
3111 bo = &vp->v_bufobj;
3112 loop:
3113 /*
3114 * Block if we are low on buffers. Certain processes are allowed
3115 * to completely exhaust the buffer cache.
3116 *
3117 * If this check ever becomes a bottleneck it may be better to
3118 * move it into the else, when gbincore() fails. At the moment
3119 * it isn't a problem.
3120 */
3121 if (numfreebuffers == 0) {
3122 if (TD_IS_IDLETHREAD(curthread))
3123 return NULL;
3124 mtx_lock(&nblock);
3125 needsbuffer |= VFS_BIO_NEED_ANY;
3126 mtx_unlock(&nblock);
3127 }
3128
3129 BO_LOCK(bo);
3130 bp = gbincore(bo, blkno);
3131 if (bp != NULL) {
3132 int lockflags;
3133 /*
3134 * Buffer is in-core. If the buffer is not busy, it must
3135 * be on a queue.
3136 */
3137 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
3138
3139 if (flags & GB_LOCK_NOWAIT)
3140 lockflags |= LK_NOWAIT;
3141
3142 error = BUF_TIMELOCK(bp, lockflags,
3143 BO_MTX(bo), "getblk", slpflag, slptimeo);
3144
3145 /*
3146 * If we slept and got the lock we have to restart in case
3147 * the buffer changed identities.
3148 */
3149 if (error == ENOLCK)
3150 goto loop;
3151 /* We timed out or were interrupted. */
3152 else if (error)
3153 return (NULL);
3154 /* If recursed, assume caller knows the rules. */
3155 else if (BUF_LOCKRECURSED(bp))
3156 goto end;
3157
3158 /*
3159 * The buffer is locked. B_CACHE is cleared if the buffer is
3160 * invalid. Otherwise, for a non-VMIO buffer, B_CACHE is set
3161 * and for a VMIO buffer B_CACHE is adjusted according to the
3162 * backing VM cache.
3163 */
3164 if (bp->b_flags & B_INVAL)
3165 bp->b_flags &= ~B_CACHE;
3166 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
3167 bp->b_flags |= B_CACHE;
3168 BO_LOCK(bo);
3169 bremfree(bp);
3170 BO_UNLOCK(bo);
3171
3172 /*
3173 * check for size inconsistencies for non-VMIO case.
3174 */
3175 if (bp->b_bcount != size) {
3176 if ((bp->b_flags & B_VMIO) == 0 ||
3177 (size > bp->b_kvasize)) {
3178 if (bp->b_flags & B_DELWRI) {
3179 /*
3180 * If buffer is pinned and caller does
3181 * not want sleep waiting for it to be
3182 * unpinned, bail out
3183 * */
3184 if (bp->b_pin_count > 0) {
3185 if (flags & GB_LOCK_NOWAIT) {
3186 bqrelse(bp);
3187 return (NULL);
3188 } else {
3189 bunpin_wait(bp);
3190 }
3191 }
3192 bp->b_flags |= B_NOCACHE;
3193 bwrite(bp);
3194 } else {
3195 if (LIST_EMPTY(&bp->b_dep)) {
3196 bp->b_flags |= B_RELBUF;
3197 brelse(bp);
3198 } else {
3199 bp->b_flags |= B_NOCACHE;
3200 bwrite(bp);
3201 }
3202 }
3203 goto loop;
3204 }
3205 }
3206
3207 /*
3208 * Handle the case of unmapped buffer which should
3209 * become mapped, or the buffer for which KVA
3210 * reservation is requested.
3211 */
3212 bp_unmapped_get_kva(bp, blkno, size, flags);
3213
3214 /*
3215 * If the size is inconsistant in the VMIO case, we can resize
3216 * the buffer. This might lead to B_CACHE getting set or
3217 * cleared. If the size has not changed, B_CACHE remains
3218 * unchanged from its previous state.
3219 */
3220 if (bp->b_bcount != size)
3221 allocbuf(bp, size);
3222
3223 KASSERT(bp->b_offset != NOOFFSET,
3224 ("getblk: no buffer offset"));
3225
3226 /*
3227 * A buffer with B_DELWRI set and B_CACHE clear must
3228 * be committed before we can return the buffer in
3229 * order to prevent the caller from issuing a read
3230 * ( due to B_CACHE not being set ) and overwriting
3231 * it.
3232 *
3233 * Most callers, including NFS and FFS, need this to
3234 * operate properly either because they assume they
3235 * can issue a read if B_CACHE is not set, or because
3236 * ( for example ) an uncached B_DELWRI might loop due
3237 * to softupdates re-dirtying the buffer. In the latter
3238 * case, B_CACHE is set after the first write completes,
3239 * preventing further loops.
3240 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE
3241 * above while extending the buffer, we cannot allow the
3242 * buffer to remain with B_CACHE set after the write
3243 * completes or it will represent a corrupt state. To
3244 * deal with this we set B_NOCACHE to scrap the buffer
3245 * after the write.
3246 *
3247 * We might be able to do something fancy, like setting
3248 * B_CACHE in bwrite() except if B_DELWRI is already set,
3249 * so the below call doesn't set B_CACHE, but that gets real
3250 * confusing. This is much easier.
3251 */
3252
3253 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
3254 bp->b_flags |= B_NOCACHE;
3255 bwrite(bp);
3256 goto loop;
3257 }
3258 bp->b_flags &= ~B_DONE;
3259 } else {
3260 /*
3261 * Buffer is not in-core, create new buffer. The buffer
3262 * returned by getnewbuf() is locked. Note that the returned
3263 * buffer is also considered valid (not marked B_INVAL).
3264 */
3265 BO_UNLOCK(bo);
3266 /*
3267 * If the user does not want us to create the buffer, bail out
3268 * here.
3269 */
3270 if (flags & GB_NOCREAT)
3271 return NULL;
3272 bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
3273 offset = blkno * bsize;
3274 vmio = vp->v_object != NULL;
3275 if (vmio) {
3276 maxsize = size + (offset & PAGE_MASK);
3277 } else {
3278 maxsize = size;
3279 /* Do not allow non-VMIO notmapped buffers. */
3280 flags &= ~GB_UNMAPPED;
3281 }
3282 maxsize = imax(maxsize, bsize);
3283
3284 bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags);
3285 if (bp == NULL) {
3286 if (slpflag || slptimeo)
3287 return NULL;
3288 goto loop;
3289 }
3290
3291 /*
3292 * This code is used to make sure that a buffer is not
3293 * created while the getnewbuf routine is blocked.
3294 * This can be a problem whether the vnode is locked or not.
3295 * If the buffer is created out from under us, we have to
3296 * throw away the one we just created.
3297 *
3298 * Note: this must occur before we associate the buffer
3299 * with the vp especially considering limitations in
3300 * the splay tree implementation when dealing with duplicate
3301 * lblkno's.
3302 */
3303 BO_LOCK(bo);
3304 if (gbincore(bo, blkno)) {
3305 BO_UNLOCK(bo);
3306 bp->b_flags |= B_INVAL;
3307 brelse(bp);
3308 goto loop;
3309 }
3310
3311 /*
3312 * Insert the buffer into the hash, so that it can
3313 * be found by incore.
3314 */
3315 bp->b_blkno = bp->b_lblkno = blkno;
3316 bp->b_offset = offset;
3317 bgetvp(vp, bp);
3318 BO_UNLOCK(bo);
3319
3320 /*
3321 * set B_VMIO bit. allocbuf() the buffer bigger. Since the
3322 * buffer size starts out as 0, B_CACHE will be set by
3323 * allocbuf() for the VMIO case prior to it testing the
3324 * backing store for validity.
3325 */
3326
3327 if (vmio) {
3328 bp->b_flags |= B_VMIO;
3329 KASSERT(vp->v_object == bp->b_bufobj->bo_object,
3330 ("ARGH! different b_bufobj->bo_object %p %p %p\n",
3331 bp, vp->v_object, bp->b_bufobj->bo_object));
3332 } else {
3333 bp->b_flags &= ~B_VMIO;
3334 KASSERT(bp->b_bufobj->bo_object == NULL,
3335 ("ARGH! has b_bufobj->bo_object %p %p\n",
3336 bp, bp->b_bufobj->bo_object));
3337 BUF_CHECK_MAPPED(bp);
3338 }
3339
3340 allocbuf(bp, size);
3341 bp->b_flags &= ~B_DONE;
3342 }
3343 CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
3344 BUF_ASSERT_HELD(bp);
3345 end:
3346 KASSERT(bp->b_bufobj == bo,
3347 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3348 return (bp);
3349 }
3350
3351 /*
3352 * Get an empty, disassociated buffer of given size. The buffer is initially
3353 * set to B_INVAL.
3354 */
3355 struct buf *
3356 geteblk(int size, int flags)
3357 {
3358 struct buf *bp;
3359 int maxsize;
3360
3361 maxsize = (size + BKVAMASK) & ~BKVAMASK;
3362 while ((bp = getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) {
3363 if ((flags & GB_NOWAIT_BD) &&
3364 (curthread->td_pflags & TDP_BUFNEED) != 0)
3365 return (NULL);
3366 }
3367 allocbuf(bp, size);
3368 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
3369 BUF_ASSERT_HELD(bp);
3370 return (bp);
3371 }
3372
3373
3374 /*
3375 * This code constitutes the buffer memory from either anonymous system
3376 * memory (in the case of non-VMIO operations) or from an associated
3377 * VM object (in the case of VMIO operations). This code is able to
3378 * resize a buffer up or down.
3379 *
3380 * Note that this code is tricky, and has many complications to resolve
3381 * deadlock or inconsistant data situations. Tread lightly!!!
3382 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
3383 * the caller. Calling this code willy nilly can result in the loss of data.
3384 *
3385 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with
3386 * B_CACHE for the non-VMIO case.
3387 */
3388
3389 int
3390 allocbuf(struct buf *bp, int size)
3391 {
3392 int newbsize, mbsize;
3393 int i;
3394
3395 BUF_ASSERT_HELD(bp);
3396
3397 if (bp->b_kvasize < size)
3398 panic("allocbuf: buffer too small");
3399
3400 if ((bp->b_flags & B_VMIO) == 0) {
3401 caddr_t origbuf;
3402 int origbufsize;
3403 /*
3404 * Just get anonymous memory from the kernel. Don't
3405 * mess with B_CACHE.
3406 */
3407 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
3408 if (bp->b_flags & B_MALLOC)
3409 newbsize = mbsize;
3410 else
3411 newbsize = round_page(size);
3412
3413 if (newbsize < bp->b_bufsize) {
3414 /*
3415 * malloced buffers are not shrunk
3416 */
3417 if (bp->b_flags & B_MALLOC) {
3418 if (newbsize) {
3419 bp->b_bcount = size;
3420 } else {
3421 free(bp->b_data, M_BIOBUF);
3422 if (bp->b_bufsize) {
3423 atomic_subtract_long(
3424 &bufmallocspace,
3425 bp->b_bufsize);
3426 bufspacewakeup();
3427 bp->b_bufsize = 0;
3428 }
3429 bp->b_saveaddr = bp->b_kvabase;
3430 bp->b_data = bp->b_saveaddr;
3431 bp->b_bcount = 0;
3432 bp->b_flags &= ~B_MALLOC;
3433 }
3434 return 1;
3435 }
3436 vm_hold_free_pages(bp, newbsize);
3437 } else if (newbsize > bp->b_bufsize) {
3438 /*
3439 * We only use malloced memory on the first allocation.
3440 * and revert to page-allocated memory when the buffer
3441 * grows.
3442 */
3443 /*
3444 * There is a potential smp race here that could lead
3445 * to bufmallocspace slightly passing the max. It
3446 * is probably extremely rare and not worth worrying
3447 * over.
3448 */
3449 if ( (bufmallocspace < maxbufmallocspace) &&
3450 (bp->b_bufsize == 0) &&
3451 (mbsize <= PAGE_SIZE/2)) {
3452
3453 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
3454 bp->b_bufsize = mbsize;
3455 bp->b_bcount = size;
3456 bp->b_flags |= B_MALLOC;
3457 atomic_add_long(&bufmallocspace, mbsize);
3458 return 1;
3459 }
3460 origbuf = NULL;
3461 origbufsize = 0;
3462 /*
3463 * If the buffer is growing on its other-than-first allocation,
3464 * then we revert to the page-allocation scheme.
3465 */
3466 if (bp->b_flags & B_MALLOC) {
3467 origbuf = bp->b_data;
3468 origbufsize = bp->b_bufsize;
3469 bp->b_data = bp->b_kvabase;
3470 if (bp->b_bufsize) {
3471 atomic_subtract_long(&bufmallocspace,
3472 bp->b_bufsize);
3473 bufspacewakeup();
3474 bp->b_bufsize = 0;
3475 }
3476 bp->b_flags &= ~B_MALLOC;
3477 newbsize = round_page(newbsize);
3478 }
3479 vm_hold_load_pages(
3480 bp,
3481 (vm_offset_t) bp->b_data + bp->b_bufsize,
3482 (vm_offset_t) bp->b_data + newbsize);
3483 if (origbuf) {
3484 bcopy(origbuf, bp->b_data, origbufsize);
3485 free(origbuf, M_BIOBUF);
3486 }
3487 }
3488 } else {
3489 int desiredpages;
3490
3491 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
3492 desiredpages = (size == 0) ? 0 :
3493 num_pages((bp->b_offset & PAGE_MASK) + newbsize);
3494
3495 if (bp->b_flags & B_MALLOC)
3496 panic("allocbuf: VMIO buffer can't be malloced");
3497 /*
3498 * Set B_CACHE initially if buffer is 0 length or will become
3499 * 0-length.
3500 */
3501 if (size == 0 || bp->b_bufsize == 0)
3502 bp->b_flags |= B_CACHE;
3503
3504 if (newbsize < bp->b_bufsize) {
3505 /*
3506 * DEV_BSIZE aligned new buffer size is less then the
3507 * DEV_BSIZE aligned existing buffer size. Figure out
3508 * if we have to remove any pages.
3509 */
3510 if (desiredpages < bp->b_npages) {
3511 vm_page_t m;
3512
3513 if ((bp->b_flags & B_UNMAPPED) == 0) {
3514 BUF_CHECK_MAPPED(bp);
3515 pmap_qremove((vm_offset_t)trunc_page(
3516 (vm_offset_t)bp->b_data) +
3517 (desiredpages << PAGE_SHIFT),
3518 (bp->b_npages - desiredpages));
3519 } else
3520 BUF_CHECK_UNMAPPED(bp);
3521 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3522 for (i = desiredpages; i < bp->b_npages; i++) {
3523 /*
3524 * the page is not freed here -- it
3525 * is the responsibility of
3526 * vnode_pager_setsize
3527 */
3528 m = bp->b_pages[i];
3529 KASSERT(m != bogus_page,
3530 ("allocbuf: bogus page found"));
3531 while (vm_page_sleep_if_busy(m, TRUE,
3532 "biodep"))
3533 continue;
3534
3535 bp->b_pages[i] = NULL;
3536 vm_page_lock(m);
3537 vm_page_unwire(m, 0);
3538 vm_page_unlock(m);
3539 }
3540 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3541 bp->b_npages = desiredpages;
3542 }
3543 } else if (size > bp->b_bcount) {
3544 /*
3545 * We are growing the buffer, possibly in a
3546 * byte-granular fashion.
3547 */
3548 vm_object_t obj;
3549 vm_offset_t toff;
3550 vm_offset_t tinc;
3551
3552 /*
3553 * Step 1, bring in the VM pages from the object,
3554 * allocating them if necessary. We must clear
3555 * B_CACHE if these pages are not valid for the
3556 * range covered by the buffer.
3557 */
3558
3559 obj = bp->b_bufobj->bo_object;
3560
3561 VM_OBJECT_LOCK(obj);
3562 while (bp->b_npages < desiredpages) {
3563 vm_page_t m;
3564
3565 /*
3566 * We must allocate system pages since blocking
3567 * here could interfere with paging I/O, no
3568 * matter which process we are.
3569 *
3570 * We can only test VPO_BUSY here. Blocking on
3571 * m->busy might lead to a deadlock:
3572 * vm_fault->getpages->cluster_read->allocbuf
3573 * Thus, we specify VM_ALLOC_IGN_SBUSY.
3574 */
3575 m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) +
3576 bp->b_npages, VM_ALLOC_NOBUSY |
3577 VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
3578 VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY |
3579 VM_ALLOC_COUNT(desiredpages - bp->b_npages));
3580 if (m->valid == 0)
3581 bp->b_flags &= ~B_CACHE;
3582 bp->b_pages[bp->b_npages] = m;
3583 ++bp->b_npages;
3584 }
3585
3586 /*
3587 * Step 2. We've loaded the pages into the buffer,
3588 * we have to figure out if we can still have B_CACHE
3589 * set. Note that B_CACHE is set according to the
3590 * byte-granular range ( bcount and size ), new the
3591 * aligned range ( newbsize ).
3592 *
3593 * The VM test is against m->valid, which is DEV_BSIZE
3594 * aligned. Needless to say, the validity of the data
3595 * needs to also be DEV_BSIZE aligned. Note that this
3596 * fails with NFS if the server or some other client
3597 * extends the file's EOF. If our buffer is resized,
3598 * B_CACHE may remain set! XXX
3599 */
3600
3601 toff = bp->b_bcount;
3602 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
3603
3604 while ((bp->b_flags & B_CACHE) && toff < size) {
3605 vm_pindex_t pi;
3606
3607 if (tinc > (size - toff))
3608 tinc = size - toff;
3609
3610 pi = ((bp->b_offset & PAGE_MASK) + toff) >>
3611 PAGE_SHIFT;
3612
3613 vfs_buf_test_cache(
3614 bp,
3615 bp->b_offset,
3616 toff,
3617 tinc,
3618 bp->b_pages[pi]
3619 );
3620 toff += tinc;
3621 tinc = PAGE_SIZE;
3622 }
3623 VM_OBJECT_UNLOCK(obj);
3624
3625 /*
3626 * Step 3, fixup the KVM pmap.
3627 */
3628 if ((bp->b_flags & B_UNMAPPED) == 0)
3629 bpmap_qenter(bp);
3630 else
3631 BUF_CHECK_UNMAPPED(bp);
3632 }
3633 }
3634 if (newbsize < bp->b_bufsize)
3635 bufspacewakeup();
3636 bp->b_bufsize = newbsize; /* actual buffer allocation */
3637 bp->b_bcount = size; /* requested buffer size */
3638 return 1;
3639 }
3640
3641 extern int inflight_transient_maps;
3642
3643 void
3644 biodone(struct bio *bp)
3645 {
3646 struct mtx *mtxp;
3647 void (*done)(struct bio *);
3648 vm_offset_t start, end;
3649 int transient;
3650
3651 mtxp = mtx_pool_find(mtxpool_sleep, bp);
3652 mtx_lock(mtxp);
3653 bp->bio_flags |= BIO_DONE;
3654 if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
3655 start = trunc_page((vm_offset_t)bp->bio_data);
3656 end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
3657 transient = 1;
3658 } else {
3659 transient = 0;
3660 start = end = 0;
3661 }
3662 done = bp->bio_done;
3663 if (done == NULL)
3664 wakeup(bp);
3665 mtx_unlock(mtxp);
3666 if (done != NULL)
3667 done(bp);
3668 if (transient) {
3669 pmap_qremove(start, OFF_TO_IDX(end - start));
3670 vm_map_remove(bio_transient_map, start, end);
3671 atomic_add_int(&inflight_transient_maps, -1);
3672 }
3673 }
3674
3675 /*
3676 * Wait for a BIO to finish.
3677 *
3678 * XXX: resort to a timeout for now. The optimal locking (if any) for this
3679 * case is not yet clear.
3680 */
3681 int
3682 biowait(struct bio *bp, const char *wchan)
3683 {
3684 struct mtx *mtxp;
3685
3686 mtxp = mtx_pool_find(mtxpool_sleep, bp);
3687 mtx_lock(mtxp);
3688 while ((bp->bio_flags & BIO_DONE) == 0)
3689 msleep(bp, mtxp, PRIBIO, wchan, hz / 10);
3690 mtx_unlock(mtxp);
3691 if (bp->bio_error != 0)
3692 return (bp->bio_error);
3693 if (!(bp->bio_flags & BIO_ERROR))
3694 return (0);
3695 return (EIO);
3696 }
3697
3698 void
3699 biofinish(struct bio *bp, struct devstat *stat, int error)
3700 {
3701
3702 if (error) {
3703 bp->bio_error = error;
3704 bp->bio_flags |= BIO_ERROR;
3705 }
3706 if (stat != NULL)
3707 devstat_end_transaction_bio(stat, bp);
3708 biodone(bp);
3709 }
3710
3711 /*
3712 * bufwait:
3713 *
3714 * Wait for buffer I/O completion, returning error status. The buffer
3715 * is left locked and B_DONE on return. B_EINTR is converted into an EINTR
3716 * error and cleared.
3717 */
3718 int
3719 bufwait(struct buf *bp)
3720 {
3721 if (bp->b_iocmd == BIO_READ)
3722 bwait(bp, PRIBIO, "biord");
3723 else
3724 bwait(bp, PRIBIO, "biowr");
3725 if (bp->b_flags & B_EINTR) {
3726 bp->b_flags &= ~B_EINTR;
3727 return (EINTR);
3728 }
3729 if (bp->b_ioflags & BIO_ERROR) {
3730 return (bp->b_error ? bp->b_error : EIO);
3731 } else {
3732 return (0);
3733 }
3734 }
3735
3736 /*
3737 * Call back function from struct bio back up to struct buf.
3738 */
3739 static void
3740 bufdonebio(struct bio *bip)
3741 {
3742 struct buf *bp;
3743
3744 bp = bip->bio_caller2;
3745 bp->b_resid = bp->b_bcount - bip->bio_completed;
3746 bp->b_resid = bip->bio_resid; /* XXX: remove */
3747 bp->b_ioflags = bip->bio_flags;
3748 bp->b_error = bip->bio_error;
3749 if (bp->b_error)
3750 bp->b_ioflags |= BIO_ERROR;
3751 bufdone(bp);
3752 g_destroy_bio(bip);
3753 }
3754
3755 void
3756 dev_strategy(struct cdev *dev, struct buf *bp)
3757 {
3758 struct cdevsw *csw;
3759 int ref;
3760
3761 KASSERT(dev->si_refcount > 0,
3762 ("dev_strategy on un-referenced struct cdev *(%s) %p",
3763 devtoname(dev), dev));
3764
3765 csw = dev_refthread(dev, &ref);
3766 dev_strategy_csw(dev, csw, bp);
3767 dev_relthread(dev, ref);
3768 }
3769
3770 void
3771 dev_strategy_csw(struct cdev *dev, struct cdevsw *csw, struct buf *bp)
3772 {
3773 struct bio *bip;
3774
3775 KASSERT(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE,
3776 ("b_iocmd botch"));
3777 KASSERT(((dev->si_flags & SI_ETERNAL) != 0 && csw != NULL) ||
3778 dev->si_threadcount > 0,
3779 ("dev_strategy_csw threadcount cdev *(%s) %p", devtoname(dev),
3780 dev));
3781 if (csw == NULL) {
3782 bp->b_error = ENXIO;
3783 bp->b_ioflags = BIO_ERROR;
3784 bufdone(bp);
3785 return;
3786 }
3787 for (;;) {
3788 bip = g_new_bio();
3789 if (bip != NULL)
3790 break;
3791 /* Try again later */
3792 tsleep(&bp, PRIBIO, "dev_strat", hz/10);
3793 }
3794 bip->bio_cmd = bp->b_iocmd;
3795 bip->bio_offset = bp->b_iooffset;
3796 bip->bio_length = bp->b_bcount;
3797 bip->bio_bcount = bp->b_bcount; /* XXX: remove */
3798 bdata2bio(bp, bip);
3799 bip->bio_done = bufdonebio;
3800 bip->bio_caller2 = bp;
3801 bip->bio_dev = dev;
3802 (*csw->d_strategy)(bip);
3803 }
3804
3805 /*
3806 * bufdone:
3807 *
3808 * Finish I/O on a buffer, optionally calling a completion function.
3809 * This is usually called from an interrupt so process blocking is
3810 * not allowed.
3811 *
3812 * biodone is also responsible for setting B_CACHE in a B_VMIO bp.
3813 * In a non-VMIO bp, B_CACHE will be set on the next getblk()
3814 * assuming B_INVAL is clear.
3815 *
3816 * For the VMIO case, we set B_CACHE if the op was a read and no
3817 * read error occured, or if the op was a write. B_CACHE is never
3818 * set if the buffer is invalid or otherwise uncacheable.
3819 *
3820 * biodone does not mess with B_INVAL, allowing the I/O routine or the
3821 * initiator to leave B_INVAL set to brelse the buffer out of existance
3822 * in the biodone routine.
3823 */
3824 void
3825 bufdone(struct buf *bp)
3826 {
3827 struct bufobj *dropobj;
3828 void (*biodone)(struct buf *);
3829
3830 CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
3831 dropobj = NULL;
3832
3833 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
3834 BUF_ASSERT_HELD(bp);
3835
3836 runningbufwakeup(bp);
3837 if (bp->b_iocmd == BIO_WRITE)
3838 dropobj = bp->b_bufobj;
3839 /* call optional completion function if requested */
3840 if (bp->b_iodone != NULL) {
3841 biodone = bp->b_iodone;
3842 bp->b_iodone = NULL;
3843 (*biodone) (bp);
3844 if (dropobj)
3845 bufobj_wdrop(dropobj);
3846 return;
3847 }
3848
3849 bufdone_finish(bp);
3850
3851 if (dropobj)
3852 bufobj_wdrop(dropobj);
3853 }
3854
3855 void
3856 bufdone_finish(struct buf *bp)
3857 {
3858 BUF_ASSERT_HELD(bp);
3859
3860 if (!LIST_EMPTY(&bp->b_dep))
3861 buf_complete(bp);
3862
3863 if (bp->b_flags & B_VMIO) {
3864 vm_ooffset_t foff;
3865 vm_page_t m;
3866 vm_object_t obj;
3867 struct vnode *vp;
3868 int bogus, i, iosize;
3869
3870 obj = bp->b_bufobj->bo_object;
3871 KASSERT(obj->paging_in_progress >= bp->b_npages,
3872 ("biodone_finish: paging in progress(%d) < b_npages(%d)",
3873 obj->paging_in_progress, bp->b_npages));
3874
3875 vp = bp->b_vp;
3876 KASSERT(vp->v_holdcnt > 0,
3877 ("biodone_finish: vnode %p has zero hold count", vp));
3878 KASSERT(vp->v_object != NULL,
3879 ("biodone_finish: vnode %p has no vm_object", vp));
3880
3881 foff = bp->b_offset;
3882 KASSERT(bp->b_offset != NOOFFSET,
3883 ("biodone_finish: bp %p has no buffer offset", bp));
3884
3885 /*
3886 * Set B_CACHE if the op was a normal read and no error
3887 * occured. B_CACHE is set for writes in the b*write()
3888 * routines.
3889 */
3890 iosize = bp->b_bcount - bp->b_resid;
3891 if (bp->b_iocmd == BIO_READ &&
3892 !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
3893 !(bp->b_ioflags & BIO_ERROR)) {
3894 bp->b_flags |= B_CACHE;
3895 }
3896 bogus = 0;
3897 VM_OBJECT_LOCK(obj);
3898 for (i = 0; i < bp->b_npages; i++) {
3899 int bogusflag = 0;
3900 int resid;
3901
3902 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
3903 if (resid > iosize)
3904 resid = iosize;
3905
3906 /*
3907 * cleanup bogus pages, restoring the originals
3908 */
3909 m = bp->b_pages[i];
3910 if (m == bogus_page) {
3911 bogus = bogusflag = 1;
3912 m = vm_page_lookup(obj, OFF_TO_IDX(foff));
3913 if (m == NULL)
3914 panic("biodone: page disappeared!");
3915 bp->b_pages[i] = m;
3916 }
3917 KASSERT(OFF_TO_IDX(foff) == m->pindex,
3918 ("biodone_finish: foff(%jd)/pindex(%ju) mismatch",
3919 (intmax_t)foff, (uintmax_t)m->pindex));
3920
3921 /*
3922 * In the write case, the valid and clean bits are
3923 * already changed correctly ( see bdwrite() ), so we
3924 * only need to do this here in the read case.
3925 */
3926 if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
3927 KASSERT((m->dirty & vm_page_bits(foff &
3928 PAGE_MASK, resid)) == 0, ("bufdone_finish:"
3929 " page %p has unexpected dirty bits", m));
3930 vfs_page_set_valid(bp, foff, m);
3931 }
3932
3933 vm_page_io_finish(m);
3934 vm_object_pip_subtract(obj, 1);
3935 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3936 iosize -= resid;
3937 }
3938 vm_object_pip_wakeupn(obj, 0);
3939 VM_OBJECT_UNLOCK(obj);
3940 if (bogus && (bp->b_flags & B_UNMAPPED) == 0) {
3941 BUF_CHECK_MAPPED(bp);
3942 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3943 bp->b_pages, bp->b_npages);
3944 }
3945 }
3946
3947 /*
3948 * For asynchronous completions, release the buffer now. The brelse
3949 * will do a wakeup there if necessary - so no need to do a wakeup
3950 * here in the async case. The sync case always needs to do a wakeup.
3951 */
3952
3953 if (bp->b_flags & B_ASYNC) {
3954 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
3955 brelse(bp);
3956 else
3957 bqrelse(bp);
3958 } else
3959 bdone(bp);
3960 }
3961
3962 /*
3963 * This routine is called in lieu of iodone in the case of
3964 * incomplete I/O. This keeps the busy status for pages
3965 * consistant.
3966 */
3967 void
3968 vfs_unbusy_pages(struct buf *bp)
3969 {
3970 int i;
3971 vm_object_t obj;
3972 vm_page_t m;
3973
3974 runningbufwakeup(bp);
3975 if (!(bp->b_flags & B_VMIO))
3976 return;
3977
3978 obj = bp->b_bufobj->bo_object;
3979 VM_OBJECT_LOCK(obj);
3980 for (i = 0; i < bp->b_npages; i++) {
3981 m = bp->b_pages[i];
3982 if (m == bogus_page) {
3983 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
3984 if (!m)
3985 panic("vfs_unbusy_pages: page missing\n");
3986 bp->b_pages[i] = m;
3987 if ((bp->b_flags & B_UNMAPPED) == 0) {
3988 BUF_CHECK_MAPPED(bp);
3989 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3990 bp->b_pages, bp->b_npages);
3991 } else
3992 BUF_CHECK_UNMAPPED(bp);
3993 }
3994 vm_object_pip_subtract(obj, 1);
3995 vm_page_io_finish(m);
3996 }
3997 vm_object_pip_wakeupn(obj, 0);
3998 VM_OBJECT_UNLOCK(obj);
3999 }
4000
4001 /*
4002 * vfs_page_set_valid:
4003 *
4004 * Set the valid bits in a page based on the supplied offset. The
4005 * range is restricted to the buffer's size.
4006 *
4007 * This routine is typically called after a read completes.
4008 */
4009 static void
4010 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4011 {
4012 vm_ooffset_t eoff;
4013
4014 /*
4015 * Compute the end offset, eoff, such that [off, eoff) does not span a
4016 * page boundary and eoff is not greater than the end of the buffer.
4017 * The end of the buffer, in this case, is our file EOF, not the
4018 * allocation size of the buffer.
4019 */
4020 eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4021 if (eoff > bp->b_offset + bp->b_bcount)
4022 eoff = bp->b_offset + bp->b_bcount;
4023
4024 /*
4025 * Set valid range. This is typically the entire buffer and thus the
4026 * entire page.
4027 */
4028 if (eoff > off)
4029 vm_page_set_valid(m, off & PAGE_MASK, eoff - off);
4030 }
4031
4032 /*
4033 * vfs_page_set_validclean:
4034 *
4035 * Set the valid bits and clear the dirty bits in a page based on the
4036 * supplied offset. The range is restricted to the buffer's size.
4037 */
4038 static void
4039 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4040 {
4041 vm_ooffset_t soff, eoff;
4042
4043 /*
4044 * Start and end offsets in buffer. eoff - soff may not cross a
4045 * page boundry or cross the end of the buffer. The end of the
4046 * buffer, in this case, is our file EOF, not the allocation size
4047 * of the buffer.
4048 */
4049 soff = off;
4050 eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4051 if (eoff > bp->b_offset + bp->b_bcount)
4052 eoff = bp->b_offset + bp->b_bcount;
4053
4054 /*
4055 * Set valid range. This is typically the entire buffer and thus the
4056 * entire page.
4057 */
4058 if (eoff > soff) {
4059 vm_page_set_validclean(
4060 m,
4061 (vm_offset_t) (soff & PAGE_MASK),
4062 (vm_offset_t) (eoff - soff)
4063 );
4064 }
4065 }
4066
4067 /*
4068 * Ensure that all buffer pages are not busied by VPO_BUSY flag. If
4069 * any page is busy, drain the flag.
4070 */
4071 static void
4072 vfs_drain_busy_pages(struct buf *bp)
4073 {
4074 vm_page_t m;
4075 int i, last_busied;
4076
4077 VM_OBJECT_LOCK_ASSERT(bp->b_bufobj->bo_object, MA_OWNED);
4078 last_busied = 0;
4079 for (i = 0; i < bp->b_npages; i++) {
4080 m = bp->b_pages[i];
4081 if ((m->oflags & VPO_BUSY) != 0) {
4082 for (; last_busied < i; last_busied++)
4083 vm_page_busy(bp->b_pages[last_busied]);
4084 while ((m->oflags & VPO_BUSY) != 0)
4085 vm_page_sleep(m, "vbpage");
4086 }
4087 }
4088 for (i = 0; i < last_busied; i++)
4089 vm_page_wakeup(bp->b_pages[i]);
4090 }
4091
4092 /*
4093 * This routine is called before a device strategy routine.
4094 * It is used to tell the VM system that paging I/O is in
4095 * progress, and treat the pages associated with the buffer
4096 * almost as being VPO_BUSY. Also the object paging_in_progress
4097 * flag is handled to make sure that the object doesn't become
4098 * inconsistant.
4099 *
4100 * Since I/O has not been initiated yet, certain buffer flags
4101 * such as BIO_ERROR or B_INVAL may be in an inconsistant state
4102 * and should be ignored.
4103 */
4104 void
4105 vfs_busy_pages(struct buf *bp, int clear_modify)
4106 {
4107 int i, bogus;
4108 vm_object_t obj;
4109 vm_ooffset_t foff;
4110 vm_page_t m;
4111
4112 if (!(bp->b_flags & B_VMIO))
4113 return;
4114
4115 obj = bp->b_bufobj->bo_object;
4116 foff = bp->b_offset;
4117 KASSERT(bp->b_offset != NOOFFSET,
4118 ("vfs_busy_pages: no buffer offset"));
4119 VM_OBJECT_LOCK(obj);
4120 vfs_drain_busy_pages(bp);
4121 if (bp->b_bufsize != 0)
4122 vfs_setdirty_locked_object(bp);
4123 bogus = 0;
4124 for (i = 0; i < bp->b_npages; i++) {
4125 m = bp->b_pages[i];
4126
4127 if ((bp->b_flags & B_CLUSTER) == 0) {
4128 vm_object_pip_add(obj, 1);
4129 vm_page_io_start(m);
4130 }
4131 /*
4132 * When readying a buffer for a read ( i.e
4133 * clear_modify == 0 ), it is important to do
4134 * bogus_page replacement for valid pages in
4135 * partially instantiated buffers. Partially
4136 * instantiated buffers can, in turn, occur when
4137 * reconstituting a buffer from its VM backing store
4138 * base. We only have to do this if B_CACHE is
4139 * clear ( which causes the I/O to occur in the
4140 * first place ). The replacement prevents the read
4141 * I/O from overwriting potentially dirty VM-backed
4142 * pages. XXX bogus page replacement is, uh, bogus.
4143 * It may not work properly with small-block devices.
4144 * We need to find a better way.
4145 */
4146 if (clear_modify) {
4147 pmap_remove_write(m);
4148 vfs_page_set_validclean(bp, foff, m);
4149 } else if (m->valid == VM_PAGE_BITS_ALL &&
4150 (bp->b_flags & B_CACHE) == 0) {
4151 bp->b_pages[i] = bogus_page;
4152 bogus++;
4153 }
4154 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4155 }
4156 VM_OBJECT_UNLOCK(obj);
4157 if (bogus && (bp->b_flags & B_UNMAPPED) == 0) {
4158 BUF_CHECK_MAPPED(bp);
4159 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4160 bp->b_pages, bp->b_npages);
4161 }
4162 }
4163
4164 /*
4165 * vfs_bio_set_valid:
4166 *
4167 * Set the range within the buffer to valid. The range is
4168 * relative to the beginning of the buffer, b_offset. Note that
4169 * b_offset itself may be offset from the beginning of the first
4170 * page.
4171 */
4172 void
4173 vfs_bio_set_valid(struct buf *bp, int base, int size)
4174 {
4175 int i, n;
4176 vm_page_t m;
4177
4178 if (!(bp->b_flags & B_VMIO))
4179 return;
4180
4181 /*
4182 * Fixup base to be relative to beginning of first page.
4183 * Set initial n to be the maximum number of bytes in the
4184 * first page that can be validated.
4185 */
4186 base += (bp->b_offset & PAGE_MASK);
4187 n = PAGE_SIZE - (base & PAGE_MASK);
4188
4189 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
4190 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4191 m = bp->b_pages[i];
4192 if (n > size)
4193 n = size;
4194 vm_page_set_valid(m, base & PAGE_MASK, n);
4195 base += n;
4196 size -= n;
4197 n = PAGE_SIZE;
4198 }
4199 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
4200 }
4201
4202 /*
4203 * vfs_bio_clrbuf:
4204 *
4205 * If the specified buffer is a non-VMIO buffer, clear the entire
4206 * buffer. If the specified buffer is a VMIO buffer, clear and
4207 * validate only the previously invalid portions of the buffer.
4208 * This routine essentially fakes an I/O, so we need to clear
4209 * BIO_ERROR and B_INVAL.
4210 *
4211 * Note that while we only theoretically need to clear through b_bcount,
4212 * we go ahead and clear through b_bufsize.
4213 */
4214 void
4215 vfs_bio_clrbuf(struct buf *bp)
4216 {
4217 int i, j, mask, sa, ea, slide;
4218
4219 if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4220 clrbuf(bp);
4221 return;
4222 }
4223 bp->b_flags &= ~B_INVAL;
4224 bp->b_ioflags &= ~BIO_ERROR;
4225 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
4226 if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
4227 (bp->b_offset & PAGE_MASK) == 0) {
4228 if (bp->b_pages[0] == bogus_page)
4229 goto unlock;
4230 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
4231 VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
4232 if ((bp->b_pages[0]->valid & mask) == mask)
4233 goto unlock;
4234 if ((bp->b_pages[0]->valid & mask) == 0) {
4235 pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
4236 bp->b_pages[0]->valid |= mask;
4237 goto unlock;
4238 }
4239 }
4240 sa = bp->b_offset & PAGE_MASK;
4241 slide = 0;
4242 for (i = 0; i < bp->b_npages; i++, sa = 0) {
4243 slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4244 ea = slide & PAGE_MASK;
4245 if (ea == 0)
4246 ea = PAGE_SIZE;
4247 if (bp->b_pages[i] == bogus_page)
4248 continue;
4249 j = sa / DEV_BSIZE;
4250 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
4251 VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
4252 if ((bp->b_pages[i]->valid & mask) == mask)
4253 continue;
4254 if ((bp->b_pages[i]->valid & mask) == 0)
4255 pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4256 else {
4257 for (; sa < ea; sa += DEV_BSIZE, j++) {
4258 if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4259 pmap_zero_page_area(bp->b_pages[i],
4260 sa, DEV_BSIZE);
4261 }
4262 }
4263 }
4264 bp->b_pages[i]->valid |= mask;
4265 }
4266 unlock:
4267 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
4268 bp->b_resid = 0;
4269 }
4270
4271 void
4272 vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4273 {
4274 vm_page_t m;
4275 int i, n;
4276
4277 if ((bp->b_flags & B_UNMAPPED) == 0) {
4278 BUF_CHECK_MAPPED(bp);
4279 bzero(bp->b_data + base, size);
4280 } else {
4281 BUF_CHECK_UNMAPPED(bp);
4282 n = PAGE_SIZE - (base & PAGE_MASK);
4283 VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
4284 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4285 m = bp->b_pages[i];
4286 if (n > size)
4287 n = size;
4288 pmap_zero_page_area(m, base & PAGE_MASK, n);
4289 base += n;
4290 size -= n;
4291 n = PAGE_SIZE;
4292 }
4293 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
4294 }
4295 }
4296
4297 /*
4298 * vm_hold_load_pages and vm_hold_free_pages get pages into
4299 * a buffers address space. The pages are anonymous and are
4300 * not associated with a file object.
4301 */
4302 static void
4303 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4304 {
4305 vm_offset_t pg;
4306 vm_page_t p;
4307 int index;
4308
4309 BUF_CHECK_MAPPED(bp);
4310
4311 to = round_page(to);
4312 from = round_page(from);
4313 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4314
4315 for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4316 tryagain:
4317 /*
4318 * note: must allocate system pages since blocking here
4319 * could interfere with paging I/O, no matter which
4320 * process we are.
4321 */
4322 p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
4323 VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
4324 if (p == NULL) {
4325 VM_WAIT;
4326 goto tryagain;
4327 }
4328 pmap_qenter(pg, &p, 1);
4329 bp->b_pages[index] = p;
4330 }
4331 bp->b_npages = index;
4332 }
4333
4334 /* Return pages associated with this buf to the vm system */
4335 static void
4336 vm_hold_free_pages(struct buf *bp, int newbsize)
4337 {
4338 vm_offset_t from;
4339 vm_page_t p;
4340 int index, newnpages;
4341
4342 BUF_CHECK_MAPPED(bp);
4343
4344 from = round_page((vm_offset_t)bp->b_data + newbsize);
4345 newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4346 if (bp->b_npages > newnpages)
4347 pmap_qremove(from, bp->b_npages - newnpages);
4348 for (index = newnpages; index < bp->b_npages; index++) {
4349 p = bp->b_pages[index];
4350 bp->b_pages[index] = NULL;
4351 if (p->busy != 0)
4352 printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
4353 (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
4354 p->wire_count--;
4355 vm_page_free(p);
4356 atomic_subtract_int(&cnt.v_wire_count, 1);
4357 }
4358 bp->b_npages = newnpages;
4359 }
4360
4361 /*
4362 * Map an IO request into kernel virtual address space.
4363 *
4364 * All requests are (re)mapped into kernel VA space.
4365 * Notice that we use b_bufsize for the size of the buffer
4366 * to be mapped. b_bcount might be modified by the driver.
4367 *
4368 * Note that even if the caller determines that the address space should
4369 * be valid, a race or a smaller-file mapped into a larger space may
4370 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
4371 * check the return value.
4372 */
4373 int
4374 vmapbuf(struct buf *bp, int mapbuf)
4375 {
4376 caddr_t kva;
4377 vm_prot_t prot;
4378 int pidx;
4379
4380 if (bp->b_bufsize < 0)
4381 return (-1);
4382 prot = VM_PROT_READ;
4383 if (bp->b_iocmd == BIO_READ)
4384 prot |= VM_PROT_WRITE; /* Less backwards than it looks */
4385 if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
4386 (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
4387 btoc(MAXPHYS))) < 0)
4388 return (-1);
4389 bp->b_npages = pidx;
4390 if (mapbuf || !unmapped_buf_allowed) {
4391 pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
4392 kva = bp->b_saveaddr;
4393 bp->b_saveaddr = bp->b_data;
4394 bp->b_data = kva + (((vm_offset_t)bp->b_data) & PAGE_MASK);
4395 bp->b_flags &= ~B_UNMAPPED;
4396 } else {
4397 bp->b_flags |= B_UNMAPPED;
4398 bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
4399 bp->b_saveaddr = bp->b_data;
4400 bp->b_data = unmapped_buf;
4401 }
4402 return(0);
4403 }
4404
4405 /*
4406 * Free the io map PTEs associated with this IO operation.
4407 * We also invalidate the TLB entries and restore the original b_addr.
4408 */
4409 void
4410 vunmapbuf(struct buf *bp)
4411 {
4412 int npages;
4413
4414 npages = bp->b_npages;
4415 if (bp->b_flags & B_UNMAPPED)
4416 bp->b_flags &= ~B_UNMAPPED;
4417 else
4418 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
4419 vm_page_unhold_pages(bp->b_pages, npages);
4420
4421 bp->b_data = bp->b_saveaddr;
4422 }
4423
4424 void
4425 bdone(struct buf *bp)
4426 {
4427 struct mtx *mtxp;
4428
4429 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4430 mtx_lock(mtxp);
4431 bp->b_flags |= B_DONE;
4432 wakeup(bp);
4433 mtx_unlock(mtxp);
4434 }
4435
4436 void
4437 bwait(struct buf *bp, u_char pri, const char *wchan)
4438 {
4439 struct mtx *mtxp;
4440
4441 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4442 mtx_lock(mtxp);
4443 while ((bp->b_flags & B_DONE) == 0)
4444 msleep(bp, mtxp, pri, wchan, 0);
4445 mtx_unlock(mtxp);
4446 }
4447
4448 int
4449 bufsync(struct bufobj *bo, int waitfor)
4450 {
4451
4452 return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread));
4453 }
4454
4455 void
4456 bufstrategy(struct bufobj *bo, struct buf *bp)
4457 {
4458 int i = 0;
4459 struct vnode *vp;
4460
4461 vp = bp->b_vp;
4462 KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
4463 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
4464 ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
4465 i = VOP_STRATEGY(vp, bp);
4466 KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
4467 }
4468
4469 void
4470 bufobj_wrefl(struct bufobj *bo)
4471 {
4472
4473 KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4474 ASSERT_BO_LOCKED(bo);
4475 bo->bo_numoutput++;
4476 }
4477
4478 void
4479 bufobj_wref(struct bufobj *bo)
4480 {
4481
4482 KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4483 BO_LOCK(bo);
4484 bo->bo_numoutput++;
4485 BO_UNLOCK(bo);
4486 }
4487
4488 void
4489 bufobj_wdrop(struct bufobj *bo)
4490 {
4491
4492 KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
4493 BO_LOCK(bo);
4494 KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
4495 if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
4496 bo->bo_flag &= ~BO_WWAIT;
4497 wakeup(&bo->bo_numoutput);
4498 }
4499 BO_UNLOCK(bo);
4500 }
4501
4502 int
4503 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
4504 {
4505 int error;
4506
4507 KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
4508 ASSERT_BO_LOCKED(bo);
4509 error = 0;
4510 while (bo->bo_numoutput) {
4511 bo->bo_flag |= BO_WWAIT;
4512 error = msleep(&bo->bo_numoutput, BO_MTX(bo),
4513 slpflag | (PRIBIO + 1), "bo_wwait", timeo);
4514 if (error)
4515 break;
4516 }
4517 return (error);
4518 }
4519
4520 void
4521 bpin(struct buf *bp)
4522 {
4523 struct mtx *mtxp;
4524
4525 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4526 mtx_lock(mtxp);
4527 bp->b_pin_count++;
4528 mtx_unlock(mtxp);
4529 }
4530
4531 void
4532 bunpin(struct buf *bp)
4533 {
4534 struct mtx *mtxp;
4535
4536 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4537 mtx_lock(mtxp);
4538 if (--bp->b_pin_count == 0)
4539 wakeup(bp);
4540 mtx_unlock(mtxp);
4541 }
4542
4543 void
4544 bunpin_wait(struct buf *bp)
4545 {
4546 struct mtx *mtxp;
4547
4548 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4549 mtx_lock(mtxp);
4550 while (bp->b_pin_count > 0)
4551 msleep(bp, mtxp, PRIBIO, "bwunpin", 0);
4552 mtx_unlock(mtxp);
4553 }
4554
4555 /*
4556 * Set bio_data or bio_ma for struct bio from the struct buf.
4557 */
4558 void
4559 bdata2bio(struct buf *bp, struct bio *bip)
4560 {
4561
4562 if ((bp->b_flags & B_UNMAPPED) != 0) {
4563 KASSERT(unmapped_buf_allowed, ("unmapped"));
4564 bip->bio_ma = bp->b_pages;
4565 bip->bio_ma_n = bp->b_npages;
4566 bip->bio_data = unmapped_buf;
4567 bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
4568 bip->bio_flags |= BIO_UNMAPPED;
4569 KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
4570 PAGE_SIZE == bp->b_npages,
4571 ("Buffer %p too short: %d %jd %d", bp, bip->bio_ma_offset,
4572 (uintmax_t)bip->bio_length, bip->bio_ma_n));
4573 } else {
4574 bip->bio_data = bp->b_data;
4575 bip->bio_ma = NULL;
4576 }
4577 }
4578
4579 #include "opt_ddb.h"
4580 #ifdef DDB
4581 #include <ddb/ddb.h>
4582
4583 /* DDB command to show buffer data */
4584 DB_SHOW_COMMAND(buffer, db_show_buffer)
4585 {
4586 /* get args */
4587 struct buf *bp = (struct buf *)addr;
4588
4589 if (!have_addr) {
4590 db_printf("usage: show buffer <addr>\n");
4591 return;
4592 }
4593
4594 db_printf("buf at %p\n", bp);
4595 db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n",
4596 (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
4597 PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
4598 db_printf(
4599 "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
4600 "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, "
4601 "b_dep = %p\n",
4602 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
4603 bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
4604 (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
4605 if (bp->b_npages) {
4606 int i;
4607 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
4608 for (i = 0; i < bp->b_npages; i++) {
4609 vm_page_t m;
4610 m = bp->b_pages[i];
4611 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
4612 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
4613 if ((i + 1) < bp->b_npages)
4614 db_printf(",");
4615 }
4616 db_printf("\n");
4617 }
4618 db_printf(" ");
4619 BUF_LOCKPRINTINFO(bp);
4620 }
4621
4622 DB_SHOW_COMMAND(lockedbufs, lockedbufs)
4623 {
4624 struct buf *bp;
4625 int i;
4626
4627 for (i = 0; i < nbuf; i++) {
4628 bp = &buf[i];
4629 if (BUF_ISLOCKED(bp)) {
4630 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4631 db_printf("\n");
4632 }
4633 }
4634 }
4635
4636 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
4637 {
4638 struct vnode *vp;
4639 struct buf *bp;
4640
4641 if (!have_addr) {
4642 db_printf("usage: show vnodebufs <addr>\n");
4643 return;
4644 }
4645 vp = (struct vnode *)addr;
4646 db_printf("Clean buffers:\n");
4647 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
4648 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4649 db_printf("\n");
4650 }
4651 db_printf("Dirty buffers:\n");
4652 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
4653 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4654 db_printf("\n");
4655 }
4656 }
4657
4658 DB_COMMAND(countfreebufs, db_coundfreebufs)
4659 {
4660 struct buf *bp;
4661 int i, used = 0, nfree = 0;
4662
4663 if (have_addr) {
4664 db_printf("usage: countfreebufs\n");
4665 return;
4666 }
4667
4668 for (i = 0; i < nbuf; i++) {
4669 bp = &buf[i];
4670 if ((bp->b_vflags & BV_INFREECNT) != 0)
4671 nfree++;
4672 else
4673 used++;
4674 }
4675
4676 db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
4677 nfree + used);
4678 db_printf("numfreebuffers is %d\n", numfreebuffers);
4679 }
4680 #endif /* DDB */
Cache object: 677e46838adfb6ba02ab1d212f2bf98b
|