FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_bio.c
1 /* $NetBSD: vfs_bio.c,v 1.167.2.1 2007/10/24 22:32:39 xtraeme Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
37 */
38
39 /*-
40 * Copyright (c) 1994 Christopher G. Demetriou
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
71 */
72
73 /*
74 * Some references:
75 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76 * Leffler, et al.: The Design and Implementation of the 4.3BSD
77 * UNIX Operating System (Addison Welley, 1989)
78 */
79
80 #include "fs_ffs.h"
81 #include "opt_bufcache.h"
82 #include "opt_softdep.h"
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.167.2.1 2007/10/24 22:32:39 xtraeme Exp $");
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/kernel.h>
90 #include <sys/proc.h>
91 #include <sys/buf.h>
92 #include <sys/vnode.h>
93 #include <sys/mount.h>
94 #include <sys/malloc.h>
95 #include <sys/resourcevar.h>
96 #include <sys/sysctl.h>
97 #include <sys/conf.h>
98 #include <sys/kauth.h>
99
100 #include <uvm/uvm.h>
101
102 #include <miscfs/specfs/specdev.h>
103
104 #ifndef BUFPAGES
105 # define BUFPAGES 0
106 #endif
107
108 #ifdef BUFCACHE
109 # if (BUFCACHE < 5) || (BUFCACHE > 95)
110 # error BUFCACHE is not between 5 and 95
111 # endif
112 #else
113 # define BUFCACHE 15
114 #endif
115
116 u_int nbuf; /* XXX - for softdep_lockedbufs */
117 u_int bufpages = BUFPAGES; /* optional hardwired count */
118 u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */
119
120 /* Function prototypes */
121 struct bqueue;
122
123 static void buf_setwm(void);
124 static int buf_trim(void);
125 static void *bufpool_page_alloc(struct pool *, int);
126 static void bufpool_page_free(struct pool *, void *);
127 static inline struct buf *bio_doread(struct vnode *, daddr_t, int,
128 kauth_cred_t, int);
129 static struct buf *getnewbuf(int, int, int);
130 static int buf_lotsfree(void);
131 static int buf_canrelease(void);
132 static inline u_long buf_mempoolidx(u_long);
133 static inline u_long buf_roundsize(u_long);
134 static inline caddr_t buf_malloc(size_t);
135 static void buf_mrelease(caddr_t, size_t);
136 static inline void binsheadfree(struct buf *, struct bqueue *);
137 static inline void binstailfree(struct buf *, struct bqueue *);
138 int count_lock_queue(void); /* XXX */
139 #ifdef DEBUG
140 static int checkfreelist(struct buf *, struct bqueue *);
141 #endif
142
143 /*
144 * Definitions for the buffer hash lists.
145 */
146 #define BUFHASH(dvp, lbn) \
147 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
148 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
149 u_long bufhash;
150 #if !defined(SOFTDEP) || !defined(FFS)
151 struct bio_ops bioops; /* I/O operation notification */
152 #endif
153
154 /*
155 * Insq/Remq for the buffer hash lists.
156 */
157 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
158 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
159
160 /*
161 * Definitions for the buffer free lists.
162 */
163 #define BQUEUES 3 /* number of free buffer queues */
164
165 #define BQ_LOCKED 0 /* super-blocks &c */
166 #define BQ_LRU 1 /* lru, useful buffers */
167 #define BQ_AGE 2 /* rubbish */
168
169 struct bqueue {
170 TAILQ_HEAD(, buf) bq_queue;
171 uint64_t bq_bytes;
172 } bufqueues[BQUEUES];
173 int needbuffer;
174
175 /*
176 * Buffer queue lock.
177 * Take this lock first if also taking some buffer's b_interlock.
178 */
179 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
180
181 /*
182 * Buffer pool for I/O buffers.
183 */
184 static POOL_INIT(bufpool, sizeof(struct buf), 0, 0, 0, "bufpl",
185 &pool_allocator_nointr);
186
187
188 /* XXX - somewhat gross.. */
189 #if MAXBSIZE == 0x2000
190 #define NMEMPOOLS 5
191 #elif MAXBSIZE == 0x4000
192 #define NMEMPOOLS 6
193 #elif MAXBSIZE == 0x8000
194 #define NMEMPOOLS 7
195 #else
196 #define NMEMPOOLS 8
197 #endif
198
199 #define MEMPOOL_INDEX_OFFSET 9 /* smallest pool is 512 bytes */
200 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
201 #error update vfs_bio buffer memory parameters
202 #endif
203
204 /* Buffer memory pools */
205 static struct pool bmempools[NMEMPOOLS];
206
207 struct vm_map *buf_map;
208
209 /*
210 * Buffer memory pool allocator.
211 */
212 static void *
213 bufpool_page_alloc(struct pool *pp, int flags)
214 {
215
216 return (void *)uvm_km_alloc(buf_map,
217 MAXBSIZE, MAXBSIZE,
218 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)
219 | UVM_KMF_WIRED);
220 }
221
222 static void
223 bufpool_page_free(struct pool *pp, void *v)
224 {
225
226 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED);
227 }
228
229 static struct pool_allocator bufmempool_allocator = {
230 .pa_alloc = bufpool_page_alloc,
231 .pa_free = bufpool_page_free,
232 .pa_pagesz = MAXBSIZE,
233 };
234
235 /* Buffer memory management variables */
236 uint64_t bufmem_valimit;
237 uint64_t bufmem_hiwater;
238 uint64_t bufmem_lowater;
239 uint64_t bufmem;
240
241 /*
242 * MD code can call this to set a hard limit on the amount
243 * of virtual memory used by the buffer cache.
244 */
245 int
246 buf_setvalimit(vsize_t sz)
247 {
248
249 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
250 if (sz < NMEMPOOLS * MAXBSIZE)
251 return EINVAL;
252
253 bufmem_valimit = sz;
254 return 0;
255 }
256
257 static void
258 buf_setwm(void)
259 {
260
261 bufmem_hiwater = buf_memcalc();
262 /* lowater is approx. 2% of memory (with bufcache = 15) */
263 #define BUFMEM_WMSHIFT 3
264 #define BUFMEM_HIWMMIN (64 * 1024 << BUFMEM_WMSHIFT)
265 if (bufmem_hiwater < BUFMEM_HIWMMIN)
266 /* Ensure a reasonable minimum value */
267 bufmem_hiwater = BUFMEM_HIWMMIN;
268 bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT;
269 }
270
271 #ifdef DEBUG
272 int debug_verify_freelist = 0;
273 static int
274 checkfreelist(struct buf *bp, struct bqueue *dp)
275 {
276 struct buf *b;
277
278 TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) {
279 if (b == bp)
280 return 1;
281 }
282 return 0;
283 }
284 #endif
285
286 /*
287 * Insq/Remq for the buffer hash lists.
288 * Call with buffer queue locked.
289 */
290 static inline void
291 binsheadfree(struct buf *bp, struct bqueue *dp)
292 {
293
294 KASSERT(bp->b_freelistindex == -1);
295 TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist);
296 dp->bq_bytes += bp->b_bufsize;
297 bp->b_freelistindex = dp - bufqueues;
298 }
299
300 static inline void
301 binstailfree(struct buf *bp, struct bqueue *dp)
302 {
303
304 KASSERT(bp->b_freelistindex == -1);
305 TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist);
306 dp->bq_bytes += bp->b_bufsize;
307 bp->b_freelistindex = dp - bufqueues;
308 }
309
310 void
311 bremfree(struct buf *bp)
312 {
313 struct bqueue *dp;
314 int bqidx = bp->b_freelistindex;
315
316 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
317
318 KASSERT(bqidx != -1);
319 dp = &bufqueues[bqidx];
320 KDASSERT(!debug_verify_freelist || checkfreelist(bp, dp));
321 KASSERT(dp->bq_bytes >= bp->b_bufsize);
322 TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist);
323 dp->bq_bytes -= bp->b_bufsize;
324 #if defined(DIAGNOSTIC)
325 bp->b_freelistindex = -1;
326 #endif /* defined(DIAGNOSTIC) */
327 }
328
329 u_long
330 buf_memcalc(void)
331 {
332 u_long n;
333
334 /*
335 * Determine the upper bound of memory to use for buffers.
336 *
337 * - If bufpages is specified, use that as the number
338 * pages.
339 *
340 * - Otherwise, use bufcache as the percentage of
341 * physical memory.
342 */
343 if (bufpages != 0) {
344 n = bufpages;
345 } else {
346 if (bufcache < 5) {
347 printf("forcing bufcache %d -> 5", bufcache);
348 bufcache = 5;
349 }
350 if (bufcache > 95) {
351 printf("forcing bufcache %d -> 95", bufcache);
352 bufcache = 95;
353 }
354 n = physmem / 100 * bufcache;
355 }
356
357 n <<= PAGE_SHIFT;
358 if (bufmem_valimit != 0 && n > bufmem_valimit)
359 n = bufmem_valimit;
360
361 return (n);
362 }
363
364 /*
365 * Initialize buffers and hash links for buffers.
366 */
367 void
368 bufinit(void)
369 {
370 struct bqueue *dp;
371 int use_std;
372 u_int i;
373
374 /*
375 * Initialize buffer cache memory parameters.
376 */
377 bufmem = 0;
378 buf_setwm();
379
380 if (bufmem_valimit != 0) {
381 vaddr_t minaddr = 0, maxaddr;
382 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
383 bufmem_valimit, 0, FALSE, 0);
384 if (buf_map == NULL)
385 panic("bufinit: cannot allocate submap");
386 } else
387 buf_map = kernel_map;
388
389 /* On "small" machines use small pool page sizes where possible */
390 use_std = (physmem < atop(16*1024*1024));
391
392 /*
393 * Also use them on systems that can map the pool pages using
394 * a direct-mapped segment.
395 */
396 #ifdef PMAP_MAP_POOLPAGE
397 use_std = 1;
398 #endif
399
400 bufmempool_allocator.pa_backingmap = buf_map;
401 for (i = 0; i < NMEMPOOLS; i++) {
402 struct pool_allocator *pa;
403 struct pool *pp = &bmempools[i];
404 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
405 char *name = malloc(8, M_TEMP, M_WAITOK);
406 if (__predict_true(size >= 1024))
407 (void)snprintf(name, 8, "buf%dk", size / 1024);
408 else
409 (void)snprintf(name, 8, "buf%db", size);
410 pa = (size <= PAGE_SIZE && use_std)
411 ? &pool_allocator_nointr
412 : &bufmempool_allocator;
413 pool_init(pp, size, 0, 0, 0, name, pa);
414 pool_setlowat(pp, 1);
415 pool_sethiwat(pp, 1);
416 }
417
418 /* Initialize the buffer queues */
419 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
420 TAILQ_INIT(&dp->bq_queue);
421 dp->bq_bytes = 0;
422 }
423
424 /*
425 * Estimate hash table size based on the amount of memory we
426 * intend to use for the buffer cache. The average buffer
427 * size is dependent on our clients (i.e. filesystems).
428 *
429 * For now, use an empirical 3K per buffer.
430 */
431 nbuf = (bufmem_hiwater / 1024) / 3;
432 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
433 }
434
435 static int
436 buf_lotsfree(void)
437 {
438 int try, thresh;
439 struct lwp *l = curlwp;
440
441 /* Always allocate if doing copy on write */
442 if (l->l_flag & L_COWINPROGRESS)
443 return 1;
444
445 /* Always allocate if less than the low water mark. */
446 if (bufmem < bufmem_lowater)
447 return 1;
448
449 /* Never allocate if greater than the high water mark. */
450 if (bufmem > bufmem_hiwater)
451 return 0;
452
453 /* If there's anything on the AGE list, it should be eaten. */
454 if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL)
455 return 0;
456
457 /*
458 * The probabily of getting a new allocation is inversely
459 * proportional to the current size of the cache, using
460 * a granularity of 16 steps.
461 */
462 try = random() & 0x0000000fL;
463
464 /* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */
465 thresh = (bufmem - bufmem_lowater) /
466 ((bufmem_hiwater - bufmem_lowater) / 16);
467
468 if (try >= thresh)
469 return 1;
470
471 /* Otherwise don't allocate. */
472 return 0;
473 }
474
475 /*
476 * Return estimate of bytes we think need to be
477 * released to help resolve low memory conditions.
478 *
479 * => called at splbio.
480 * => called with bqueue_slock held.
481 */
482 static int
483 buf_canrelease(void)
484 {
485 int pagedemand, ninvalid = 0;
486
487 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
488
489 if (bufmem < bufmem_lowater)
490 return 0;
491
492 if (bufmem > bufmem_hiwater)
493 return bufmem - bufmem_hiwater;
494
495 ninvalid += bufqueues[BQ_AGE].bq_bytes;
496
497 pagedemand = uvmexp.freetarg - uvmexp.free;
498 if (pagedemand < 0)
499 return ninvalid;
500 return MAX(ninvalid, MIN(2 * MAXBSIZE,
501 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE)));
502 }
503
504 /*
505 * Buffer memory allocation helper functions
506 */
507 static inline u_long
508 buf_mempoolidx(u_long size)
509 {
510 u_int n = 0;
511
512 size -= 1;
513 size >>= MEMPOOL_INDEX_OFFSET;
514 while (size) {
515 size >>= 1;
516 n += 1;
517 }
518 if (n >= NMEMPOOLS)
519 panic("buf mem pool index %d", n);
520 return n;
521 }
522
523 static inline u_long
524 buf_roundsize(u_long size)
525 {
526 /* Round up to nearest power of 2 */
527 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
528 }
529
530 static inline caddr_t
531 buf_malloc(size_t size)
532 {
533 u_int n = buf_mempoolidx(size);
534 caddr_t addr;
535 int s;
536
537 while (1) {
538 addr = pool_get(&bmempools[n], PR_NOWAIT);
539 if (addr != NULL)
540 break;
541
542 /* No memory, see if we can free some. If so, try again */
543 if (buf_drain(1) > 0)
544 continue;
545
546 /* Wait for buffers to arrive on the LRU queue */
547 s = splbio();
548 simple_lock(&bqueue_slock);
549 needbuffer = 1;
550 ltsleep(&needbuffer, PNORELOCK | (PRIBIO + 1),
551 "buf_malloc", 0, &bqueue_slock);
552 splx(s);
553 }
554
555 return addr;
556 }
557
558 static void
559 buf_mrelease(caddr_t addr, size_t size)
560 {
561
562 pool_put(&bmempools[buf_mempoolidx(size)], addr);
563 }
564
565 /*
566 * bread()/breadn() helper.
567 */
568 static inline struct buf *
569 bio_doread(struct vnode *vp, daddr_t blkno, int size, kauth_cred_t cred,
570 int async)
571 {
572 struct buf *bp;
573 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
574 struct proc *p = l->l_proc;
575 struct mount *mp;
576
577 bp = getblk(vp, blkno, size, 0, 0);
578
579 #ifdef DIAGNOSTIC
580 if (bp == NULL) {
581 panic("bio_doread: no such buf");
582 }
583 #endif
584
585 /*
586 * If buffer does not have data valid, start a read.
587 * Note that if buffer is B_INVAL, getblk() won't return it.
588 * Therefore, it's valid if its I/O has completed or been delayed.
589 */
590 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
591 /* Start I/O for the buffer. */
592 SET(bp->b_flags, B_READ | async);
593 if (async)
594 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
595 else
596 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
597 VOP_STRATEGY(vp, bp);
598
599 /* Pay for the read. */
600 p->p_stats->p_ru.ru_inblock++;
601 } else if (async) {
602 brelse(bp);
603 }
604
605 if (vp->v_type == VBLK)
606 mp = vp->v_specmountpoint;
607 else
608 mp = vp->v_mount;
609
610 /*
611 * Collect statistics on synchronous and asynchronous reads.
612 * Reads from block devices are charged to their associated
613 * filesystem (if any).
614 */
615 if (mp != NULL) {
616 if (async == 0)
617 mp->mnt_stat.f_syncreads++;
618 else
619 mp->mnt_stat.f_asyncreads++;
620 }
621
622 return (bp);
623 }
624
625 /*
626 * Read a disk block.
627 * This algorithm described in Bach (p.54).
628 */
629 int
630 bread(struct vnode *vp, daddr_t blkno, int size, kauth_cred_t cred,
631 struct buf **bpp)
632 {
633 struct buf *bp;
634
635 /* Get buffer for block. */
636 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
637
638 /* Wait for the read to complete, and return result. */
639 return (biowait(bp));
640 }
641
642 /*
643 * Read-ahead multiple disk blocks. The first is sync, the rest async.
644 * Trivial modification to the breada algorithm presented in Bach (p.55).
645 */
646 int
647 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
648 int *rasizes, int nrablks, kauth_cred_t cred, struct buf **bpp)
649 {
650 struct buf *bp;
651 int i;
652
653 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
654
655 /*
656 * For each of the read-ahead blocks, start a read, if necessary.
657 */
658 for (i = 0; i < nrablks; i++) {
659 /* If it's in the cache, just go on to next one. */
660 if (incore(vp, rablks[i]))
661 continue;
662
663 /* Get a buffer for the read-ahead block */
664 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
665 }
666
667 /* Otherwise, we had to start a read for it; wait until it's valid. */
668 return (biowait(bp));
669 }
670
671 /*
672 * Read with single-block read-ahead. Defined in Bach (p.55), but
673 * implemented as a call to breadn().
674 * XXX for compatibility with old file systems.
675 */
676 int
677 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno,
678 int rabsize, kauth_cred_t cred, struct buf **bpp)
679 {
680
681 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
682 }
683
684 /*
685 * Block write. Described in Bach (p.56)
686 */
687 int
688 bwrite(struct buf *bp)
689 {
690 int rv, sync, wasdelayed, s;
691 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
692 struct proc *p = l->l_proc;
693 struct vnode *vp;
694 struct mount *mp;
695
696 KASSERT(ISSET(bp->b_flags, B_BUSY));
697
698 vp = bp->b_vp;
699 if (vp != NULL) {
700 if (vp->v_type == VBLK)
701 mp = vp->v_specmountpoint;
702 else
703 mp = vp->v_mount;
704 } else {
705 mp = NULL;
706 }
707
708 /*
709 * Remember buffer type, to switch on it later. If the write was
710 * synchronous, but the file system was mounted with MNT_ASYNC,
711 * convert it to a delayed write.
712 * XXX note that this relies on delayed tape writes being converted
713 * to async, not sync writes (which is safe, but ugly).
714 */
715 sync = !ISSET(bp->b_flags, B_ASYNC);
716 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
717 bdwrite(bp);
718 return (0);
719 }
720
721 /*
722 * Collect statistics on synchronous and asynchronous writes.
723 * Writes to block devices are charged to their associated
724 * filesystem (if any).
725 */
726 if (mp != NULL) {
727 if (sync)
728 mp->mnt_stat.f_syncwrites++;
729 else
730 mp->mnt_stat.f_asyncwrites++;
731 }
732
733 s = splbio();
734 simple_lock(&bp->b_interlock);
735
736 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
737
738 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
739
740 /*
741 * Pay for the I/O operation and make sure the buf is on the correct
742 * vnode queue.
743 */
744 if (wasdelayed)
745 reassignbuf(bp, bp->b_vp);
746 else
747 p->p_stats->p_ru.ru_oublock++;
748
749 /* Initiate disk write. Make sure the appropriate party is charged. */
750 V_INCR_NUMOUTPUT(bp->b_vp);
751 simple_unlock(&bp->b_interlock);
752 splx(s);
753
754 if (sync)
755 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
756 else
757 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
758
759 VOP_STRATEGY(vp, bp);
760
761 if (sync) {
762 /* If I/O was synchronous, wait for it to complete. */
763 rv = biowait(bp);
764
765 /* Release the buffer. */
766 brelse(bp);
767
768 return (rv);
769 } else {
770 return (0);
771 }
772 }
773
774 int
775 vn_bwrite(void *v)
776 {
777 struct vop_bwrite_args *ap = v;
778
779 return (bwrite(ap->a_bp));
780 }
781
782 /*
783 * Delayed write.
784 *
785 * The buffer is marked dirty, but is not queued for I/O.
786 * This routine should be used when the buffer is expected
787 * to be modified again soon, typically a small write that
788 * partially fills a buffer.
789 *
790 * NB: magnetic tapes cannot be delayed; they must be
791 * written in the order that the writes are requested.
792 *
793 * Described in Leffler, et al. (pp. 208-213).
794 */
795 void
796 bdwrite(struct buf *bp)
797 {
798 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
799 struct proc *p = l->l_proc;
800 const struct bdevsw *bdev;
801 int s;
802
803 /* If this is a tape block, write the block now. */
804 bdev = bdevsw_lookup(bp->b_dev);
805 if (bdev != NULL && bdev->d_type == D_TAPE) {
806 bawrite(bp);
807 return;
808 }
809
810 /*
811 * If the block hasn't been seen before:
812 * (1) Mark it as having been seen,
813 * (2) Charge for the write,
814 * (3) Make sure it's on its vnode's correct block list.
815 */
816 s = splbio();
817 simple_lock(&bp->b_interlock);
818
819 KASSERT(ISSET(bp->b_flags, B_BUSY));
820
821 if (!ISSET(bp->b_flags, B_DELWRI)) {
822 SET(bp->b_flags, B_DELWRI);
823 p->p_stats->p_ru.ru_oublock++;
824 reassignbuf(bp, bp->b_vp);
825 }
826
827 /* Otherwise, the "write" is done, so mark and release the buffer. */
828 CLR(bp->b_flags, B_DONE);
829 simple_unlock(&bp->b_interlock);
830 splx(s);
831
832 brelse(bp);
833 }
834
835 /*
836 * Asynchronous block write; just an asynchronous bwrite().
837 */
838 void
839 bawrite(struct buf *bp)
840 {
841 int s;
842
843 s = splbio();
844 simple_lock(&bp->b_interlock);
845
846 KASSERT(ISSET(bp->b_flags, B_BUSY));
847
848 SET(bp->b_flags, B_ASYNC);
849 simple_unlock(&bp->b_interlock);
850 splx(s);
851 VOP_BWRITE(bp);
852 }
853
854 /*
855 * Same as first half of bdwrite, mark buffer dirty, but do not release it.
856 * Call at splbio() and with the buffer interlock locked.
857 * Note: called only from biodone() through ffs softdep's bioops.io_complete()
858 */
859 void
860 bdirty(struct buf *bp)
861 {
862 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
863 struct proc *p = l->l_proc;
864
865 LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
866 KASSERT(ISSET(bp->b_flags, B_BUSY));
867
868 CLR(bp->b_flags, B_AGE);
869
870 if (!ISSET(bp->b_flags, B_DELWRI)) {
871 SET(bp->b_flags, B_DELWRI);
872 p->p_stats->p_ru.ru_oublock++;
873 reassignbuf(bp, bp->b_vp);
874 }
875 }
876
877 /*
878 * Release a buffer on to the free lists.
879 * Described in Bach (p. 46).
880 */
881 void
882 brelse(struct buf *bp)
883 {
884 struct bqueue *bufq;
885 int s;
886
887 /* Block disk interrupts. */
888 s = splbio();
889 simple_lock(&bqueue_slock);
890 simple_lock(&bp->b_interlock);
891
892 KASSERT(ISSET(bp->b_flags, B_BUSY));
893 KASSERT(!ISSET(bp->b_flags, B_CALL));
894
895 /* Wake up any processes waiting for any buffer to become free. */
896 if (needbuffer) {
897 needbuffer = 0;
898 wakeup(&needbuffer);
899 }
900
901 /* Wake up any proceeses waiting for _this_ buffer to become free. */
902 if (ISSET(bp->b_flags, B_WANTED)) {
903 CLR(bp->b_flags, B_WANTED|B_AGE);
904 wakeup(bp);
905 }
906
907 /*
908 * Determine which queue the buffer should be on, then put it there.
909 */
910
911 /* If it's locked, don't report an error; try again later. */
912 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
913 CLR(bp->b_flags, B_ERROR);
914
915 /* If it's not cacheable, or an error, mark it invalid. */
916 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
917 SET(bp->b_flags, B_INVAL);
918
919 if (ISSET(bp->b_flags, B_VFLUSH)) {
920 /*
921 * This is a delayed write buffer that was just flushed to
922 * disk. It is still on the LRU queue. If it's become
923 * invalid, then we need to move it to a different queue;
924 * otherwise leave it in its current position.
925 */
926 CLR(bp->b_flags, B_VFLUSH);
927 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
928 KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
929 goto already_queued;
930 } else {
931 bremfree(bp);
932 }
933 }
934
935 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
936 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
937 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
938
939 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
940 /*
941 * If it's invalid or empty, dissociate it from its vnode
942 * and put on the head of the appropriate queue.
943 */
944 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
945 (*bioops.io_deallocate)(bp);
946 CLR(bp->b_flags, B_DONE|B_DELWRI);
947 if (bp->b_vp) {
948 reassignbuf(bp, bp->b_vp);
949 brelvp(bp);
950 }
951 if (bp->b_bufsize <= 0)
952 /* no data */
953 goto already_queued;
954 else
955 /* invalid data */
956 bufq = &bufqueues[BQ_AGE];
957 binsheadfree(bp, bufq);
958 } else {
959 /*
960 * It has valid data. Put it on the end of the appropriate
961 * queue, so that it'll stick around for as long as possible.
962 * If buf is AGE, but has dependencies, must put it on last
963 * bufqueue to be scanned, ie LRU. This protects against the
964 * livelock where BQ_AGE only has buffers with dependencies,
965 * and we thus never get to the dependent buffers in BQ_LRU.
966 */
967 if (ISSET(bp->b_flags, B_LOCKED))
968 /* locked in core */
969 bufq = &bufqueues[BQ_LOCKED];
970 else if (!ISSET(bp->b_flags, B_AGE))
971 /* valid data */
972 bufq = &bufqueues[BQ_LRU];
973 else {
974 /* stale but valid data */
975 int has_deps;
976
977 if (LIST_FIRST(&bp->b_dep) != NULL &&
978 bioops.io_countdeps)
979 has_deps = (*bioops.io_countdeps)(bp, 0);
980 else
981 has_deps = 0;
982 bufq = has_deps ? &bufqueues[BQ_LRU] :
983 &bufqueues[BQ_AGE];
984 }
985 binstailfree(bp, bufq);
986 }
987
988 already_queued:
989 /* Unlock the buffer. */
990 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
991 SET(bp->b_flags, B_CACHE);
992
993 /* Allow disk interrupts. */
994 simple_unlock(&bp->b_interlock);
995 simple_unlock(&bqueue_slock);
996 splx(s);
997 if (bp->b_bufsize <= 0) {
998 #ifdef DEBUG
999 memset((char *)bp, 0, sizeof(*bp));
1000 #endif
1001 pool_put(&bufpool, bp);
1002 }
1003 }
1004
1005 /*
1006 * Determine if a block is in the cache.
1007 * Just look on what would be its hash chain. If it's there, return
1008 * a pointer to it, unless it's marked invalid. If it's marked invalid,
1009 * we normally don't return the buffer, unless the caller explicitly
1010 * wants us to.
1011 */
1012 struct buf *
1013 incore(struct vnode *vp, daddr_t blkno)
1014 {
1015 struct buf *bp;
1016
1017 /* Search hash chain */
1018 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
1019 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
1020 !ISSET(bp->b_flags, B_INVAL))
1021 return (bp);
1022 }
1023
1024 return (NULL);
1025 }
1026
1027 /*
1028 * Get a block of requested size that is associated with
1029 * a given vnode and block offset. If it is found in the
1030 * block cache, mark it as having been found, make it busy
1031 * and return it. Otherwise, return an empty block of the
1032 * correct size. It is up to the caller to insure that the
1033 * cached blocks be of the correct size.
1034 */
1035 struct buf *
1036 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1037 {
1038 struct buf *bp;
1039 int s, err;
1040 int preserve;
1041
1042 start:
1043 s = splbio();
1044 simple_lock(&bqueue_slock);
1045 bp = incore(vp, blkno);
1046 if (bp != NULL) {
1047 simple_lock(&bp->b_interlock);
1048 if (ISSET(bp->b_flags, B_BUSY)) {
1049 simple_unlock(&bqueue_slock);
1050 if (curproc == uvm.pagedaemon_proc) {
1051 simple_unlock(&bp->b_interlock);
1052 splx(s);
1053 return NULL;
1054 }
1055 SET(bp->b_flags, B_WANTED);
1056 err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
1057 "getblk", slptimeo, &bp->b_interlock);
1058 splx(s);
1059 if (err)
1060 return (NULL);
1061 goto start;
1062 }
1063 #ifdef DIAGNOSTIC
1064 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
1065 bp->b_bcount < size && vp->v_type != VBLK)
1066 panic("getblk: block size invariant failed");
1067 #endif
1068 SET(bp->b_flags, B_BUSY);
1069 bremfree(bp);
1070 preserve = 1;
1071 } else {
1072 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
1073 simple_unlock(&bqueue_slock);
1074 splx(s);
1075 goto start;
1076 }
1077
1078 binshash(bp, BUFHASH(vp, blkno));
1079 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
1080 bgetvp(vp, bp);
1081 preserve = 0;
1082 }
1083 simple_unlock(&bp->b_interlock);
1084 simple_unlock(&bqueue_slock);
1085 splx(s);
1086 /*
1087 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
1088 * if we re-size buffers here.
1089 */
1090 if (ISSET(bp->b_flags, B_LOCKED)) {
1091 KASSERT(bp->b_bufsize >= size);
1092 } else {
1093 allocbuf(bp, size, preserve);
1094 }
1095 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1096 return (bp);
1097 }
1098
1099 /*
1100 * Get an empty, disassociated buffer of given size.
1101 */
1102 struct buf *
1103 geteblk(int size)
1104 {
1105 struct buf *bp;
1106 int s;
1107
1108 s = splbio();
1109 simple_lock(&bqueue_slock);
1110 while ((bp = getnewbuf(0, 0, 0)) == 0)
1111 ;
1112
1113 SET(bp->b_flags, B_INVAL);
1114 binshash(bp, &invalhash);
1115 simple_unlock(&bqueue_slock);
1116 simple_unlock(&bp->b_interlock);
1117 splx(s);
1118 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1119 allocbuf(bp, size, 0);
1120 return (bp);
1121 }
1122
1123 /*
1124 * Expand or contract the actual memory allocated to a buffer.
1125 *
1126 * If the buffer shrinks, data is lost, so it's up to the
1127 * caller to have written it out *first*; this routine will not
1128 * start a write. If the buffer grows, it's the callers
1129 * responsibility to fill out the buffer's additional contents.
1130 */
1131 void
1132 allocbuf(struct buf *bp, int size, int preserve)
1133 {
1134 vsize_t oldsize, desired_size;
1135 caddr_t addr;
1136 int s, delta;
1137
1138 desired_size = buf_roundsize(size);
1139 if (desired_size > MAXBSIZE)
1140 printf("allocbuf: buffer larger than MAXBSIZE requested");
1141
1142 bp->b_bcount = size;
1143
1144 oldsize = bp->b_bufsize;
1145 if (oldsize == desired_size)
1146 return;
1147
1148 /*
1149 * If we want a buffer of a different size, re-allocate the
1150 * buffer's memory; copy old content only if needed.
1151 */
1152 addr = buf_malloc(desired_size);
1153 if (preserve)
1154 memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1155 if (bp->b_data != NULL)
1156 buf_mrelease(bp->b_data, oldsize);
1157 bp->b_data = addr;
1158 bp->b_bufsize = desired_size;
1159
1160 /*
1161 * Update overall buffer memory counter (protected by bqueue_slock)
1162 */
1163 delta = (long)desired_size - (long)oldsize;
1164
1165 s = splbio();
1166 simple_lock(&bqueue_slock);
1167 if ((bufmem += delta) > bufmem_hiwater) {
1168 /*
1169 * Need to trim overall memory usage.
1170 */
1171 while (buf_canrelease()) {
1172 if (curcpu()->ci_schedstate.spc_flags &
1173 SPCF_SHOULDYIELD) {
1174 simple_unlock(&bqueue_slock);
1175 splx(s);
1176 preempt(1);
1177 s = splbio();
1178 simple_lock(&bqueue_slock);
1179 }
1180
1181 if (buf_trim() == 0)
1182 break;
1183 }
1184 }
1185
1186 simple_unlock(&bqueue_slock);
1187 splx(s);
1188 }
1189
1190 /*
1191 * Find a buffer which is available for use.
1192 * Select something from a free list.
1193 * Preference is to AGE list, then LRU list.
1194 *
1195 * Called at splbio and with buffer queues locked.
1196 * Return buffer locked.
1197 */
1198 struct buf *
1199 getnewbuf(int slpflag, int slptimeo, int from_bufq)
1200 {
1201 struct buf *bp;
1202
1203 start:
1204 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1205
1206 /*
1207 * Get a new buffer from the pool; but use NOWAIT because
1208 * we have the buffer queues locked.
1209 */
1210 if (!from_bufq && buf_lotsfree() &&
1211 (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1212 memset((char *)bp, 0, sizeof(*bp));
1213 BUF_INIT(bp);
1214 bp->b_dev = NODEV;
1215 bp->b_vnbufs.le_next = NOLIST;
1216 bp->b_flags = B_BUSY;
1217 simple_lock(&bp->b_interlock);
1218 #if defined(DIAGNOSTIC)
1219 bp->b_freelistindex = -1;
1220 #endif /* defined(DIAGNOSTIC) */
1221 return (bp);
1222 }
1223
1224 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL ||
1225 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) {
1226 simple_lock(&bp->b_interlock);
1227 bremfree(bp);
1228 } else {
1229 /*
1230 * XXX: !from_bufq should be removed.
1231 */
1232 if (!from_bufq || curproc != uvm.pagedaemon_proc) {
1233 /* wait for a free buffer of any kind */
1234 needbuffer = 1;
1235 ltsleep(&needbuffer, slpflag|(PRIBIO + 1),
1236 "getnewbuf", slptimeo, &bqueue_slock);
1237 }
1238 return (NULL);
1239 }
1240
1241 #ifdef DIAGNOSTIC
1242 if (bp->b_bufsize <= 0)
1243 panic("buffer %p: on queue but empty", bp);
1244 #endif
1245
1246 if (ISSET(bp->b_flags, B_VFLUSH)) {
1247 /*
1248 * This is a delayed write buffer being flushed to disk. Make
1249 * sure it gets aged out of the queue when it's finished, and
1250 * leave it off the LRU queue.
1251 */
1252 CLR(bp->b_flags, B_VFLUSH);
1253 SET(bp->b_flags, B_AGE);
1254 simple_unlock(&bp->b_interlock);
1255 goto start;
1256 }
1257
1258 /* Buffer is no longer on free lists. */
1259 SET(bp->b_flags, B_BUSY);
1260
1261 /*
1262 * If buffer was a delayed write, start it and return NULL
1263 * (since we might sleep while starting the write).
1264 */
1265 if (ISSET(bp->b_flags, B_DELWRI)) {
1266 /*
1267 * This buffer has gone through the LRU, so make sure it gets
1268 * reused ASAP.
1269 */
1270 SET(bp->b_flags, B_AGE);
1271 simple_unlock(&bp->b_interlock);
1272 simple_unlock(&bqueue_slock);
1273 bawrite(bp);
1274 simple_lock(&bqueue_slock);
1275 return (NULL);
1276 }
1277
1278 /* disassociate us from our vnode, if we had one... */
1279 if (bp->b_vp)
1280 brelvp(bp);
1281
1282 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1283 (*bioops.io_deallocate)(bp);
1284
1285 /* clear out various other fields */
1286 bp->b_flags = B_BUSY;
1287 bp->b_dev = NODEV;
1288 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1289 bp->b_iodone = 0;
1290 bp->b_error = 0;
1291 bp->b_resid = 0;
1292 bp->b_bcount = 0;
1293
1294 bremhash(bp);
1295 return (bp);
1296 }
1297
1298 /*
1299 * Attempt to free an aged buffer off the queues.
1300 * Called at splbio and with queue lock held.
1301 * Returns the amount of buffer memory freed.
1302 */
1303 static int
1304 buf_trim(void)
1305 {
1306 struct buf *bp;
1307 long size = 0;
1308
1309 /* Instruct getnewbuf() to get buffers off the queues */
1310 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1311 return 0;
1312
1313 KASSERT(!ISSET(bp->b_flags, B_WANTED));
1314 simple_unlock(&bp->b_interlock);
1315 size = bp->b_bufsize;
1316 bufmem -= size;
1317 simple_unlock(&bqueue_slock);
1318 if (size > 0) {
1319 buf_mrelease(bp->b_data, size);
1320 bp->b_bcount = bp->b_bufsize = 0;
1321 }
1322 /* brelse() will return the buffer to the global buffer pool */
1323 brelse(bp);
1324 simple_lock(&bqueue_slock);
1325 return size;
1326 }
1327
1328 int
1329 buf_drain(int n)
1330 {
1331 int s, size = 0, sz;
1332
1333 s = splbio();
1334 simple_lock(&bqueue_slock);
1335
1336 while (size < n && bufmem > bufmem_lowater) {
1337 sz = buf_trim();
1338 if (sz <= 0)
1339 break;
1340 size += sz;
1341 }
1342
1343 simple_unlock(&bqueue_slock);
1344 splx(s);
1345 return size;
1346 }
1347
1348 /*
1349 * Wait for operations on the buffer to complete.
1350 * When they do, extract and return the I/O's error value.
1351 */
1352 int
1353 biowait(struct buf *bp)
1354 {
1355 int s, error;
1356
1357 s = splbio();
1358 simple_lock(&bp->b_interlock);
1359 while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1360 ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1361
1362 /* check errors. */
1363 if (ISSET(bp->b_flags, B_ERROR))
1364 error = bp->b_error ? bp->b_error : EIO;
1365 else
1366 error = 0;
1367
1368 simple_unlock(&bp->b_interlock);
1369 splx(s);
1370 return (error);
1371 }
1372
1373 /*
1374 * Mark I/O complete on a buffer.
1375 *
1376 * If a callback has been requested, e.g. the pageout
1377 * daemon, do so. Otherwise, awaken waiting processes.
1378 *
1379 * [ Leffler, et al., says on p.247:
1380 * "This routine wakes up the blocked process, frees the buffer
1381 * for an asynchronous write, or, for a request by the pagedaemon
1382 * process, invokes a procedure specified in the buffer structure" ]
1383 *
1384 * In real life, the pagedaemon (or other system processes) wants
1385 * to do async stuff to, and doesn't want the buffer brelse()'d.
1386 * (for swap pager, that puts swap buffers on the free lists (!!!),
1387 * for the vn device, that puts malloc'd buffers on the free lists!)
1388 */
1389 void
1390 biodone(struct buf *bp)
1391 {
1392 int s = splbio();
1393
1394 simple_lock(&bp->b_interlock);
1395 if (ISSET(bp->b_flags, B_DONE))
1396 panic("biodone already");
1397 SET(bp->b_flags, B_DONE); /* note that it's done */
1398 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1399
1400 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1401 (*bioops.io_complete)(bp);
1402
1403 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
1404 vwakeup(bp);
1405
1406 /*
1407 * If necessary, call out. Unlock the buffer before calling
1408 * iodone() as the buffer isn't valid any more when it return.
1409 */
1410 if (ISSET(bp->b_flags, B_CALL)) {
1411 CLR(bp->b_flags, B_CALL); /* but note callout done */
1412 simple_unlock(&bp->b_interlock);
1413 (*bp->b_iodone)(bp);
1414 } else {
1415 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */
1416 simple_unlock(&bp->b_interlock);
1417 brelse(bp);
1418 } else { /* or just wakeup the buffer */
1419 CLR(bp->b_flags, B_WANTED);
1420 wakeup(bp);
1421 simple_unlock(&bp->b_interlock);
1422 }
1423 }
1424
1425 splx(s);
1426 }
1427
1428 /*
1429 * Return a count of buffers on the "locked" queue.
1430 */
1431 int
1432 count_lock_queue(void)
1433 {
1434 struct buf *bp;
1435 int n = 0;
1436
1437 simple_lock(&bqueue_slock);
1438 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist)
1439 n++;
1440 simple_unlock(&bqueue_slock);
1441 return (n);
1442 }
1443
1444 /*
1445 * Wait for all buffers to complete I/O
1446 * Return the number of "stuck" buffers.
1447 */
1448 int
1449 buf_syncwait(void)
1450 {
1451 struct buf *bp;
1452 int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1453
1454 dcount = 10000;
1455 for (iter = 0; iter < 20;) {
1456 s = splbio();
1457 simple_lock(&bqueue_slock);
1458 nbusy = 0;
1459 for (ihash = 0; ihash < bufhash+1; ihash++) {
1460 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1461 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1462 nbusy++;
1463 /*
1464 * With soft updates, some buffers that are
1465 * written will be remarked as dirty until other
1466 * buffers are written.
1467 */
1468 if (bp->b_vp && bp->b_vp->v_mount
1469 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1470 && (bp->b_flags & B_DELWRI)) {
1471 simple_lock(&bp->b_interlock);
1472 bremfree(bp);
1473 bp->b_flags |= B_BUSY;
1474 nbusy++;
1475 simple_unlock(&bp->b_interlock);
1476 simple_unlock(&bqueue_slock);
1477 bawrite(bp);
1478 if (dcount-- <= 0) {
1479 printf("softdep ");
1480 splx(s);
1481 goto fail;
1482 }
1483 simple_lock(&bqueue_slock);
1484 }
1485 }
1486 }
1487
1488 simple_unlock(&bqueue_slock);
1489 splx(s);
1490
1491 if (nbusy == 0)
1492 break;
1493 if (nbusy_prev == 0)
1494 nbusy_prev = nbusy;
1495 printf("%d ", nbusy);
1496 tsleep(&nbusy, PRIBIO, "bflush",
1497 (iter == 0) ? 1 : hz / 25 * iter);
1498 if (nbusy >= nbusy_prev) /* we didn't flush anything */
1499 iter++;
1500 else
1501 nbusy_prev = nbusy;
1502 }
1503
1504 if (nbusy) {
1505 fail:;
1506 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1507 printf("giving up\nPrinting vnodes for busy buffers\n");
1508 s = splbio();
1509 for (ihash = 0; ihash < bufhash+1; ihash++) {
1510 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1511 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1512 vprint(NULL, bp->b_vp);
1513 }
1514 }
1515 splx(s);
1516 #endif
1517 }
1518
1519 return nbusy;
1520 }
1521
1522 static void
1523 sysctl_fillbuf(struct buf *i, struct buf_sysctl *o)
1524 {
1525
1526 o->b_flags = i->b_flags;
1527 o->b_error = i->b_error;
1528 o->b_prio = i->b_prio;
1529 o->b_dev = i->b_dev;
1530 o->b_bufsize = i->b_bufsize;
1531 o->b_bcount = i->b_bcount;
1532 o->b_resid = i->b_resid;
1533 o->b_addr = PTRTOUINT64(i->b_un.b_addr);
1534 o->b_blkno = i->b_blkno;
1535 o->b_rawblkno = i->b_rawblkno;
1536 o->b_iodone = PTRTOUINT64(i->b_iodone);
1537 o->b_proc = PTRTOUINT64(i->b_proc);
1538 o->b_vp = PTRTOUINT64(i->b_vp);
1539 o->b_saveaddr = PTRTOUINT64(i->b_saveaddr);
1540 o->b_lblkno = i->b_lblkno;
1541 }
1542
1543 #define KERN_BUFSLOP 20
1544 static int
1545 sysctl_dobuf(SYSCTLFN_ARGS)
1546 {
1547 struct buf *bp;
1548 struct buf_sysctl bs;
1549 char *dp;
1550 u_int i, op, arg;
1551 size_t len, needed, elem_size, out_size;
1552 int error, s, elem_count;
1553
1554 if (namelen == 1 && name[0] == CTL_QUERY)
1555 return (sysctl_query(SYSCTLFN_CALL(rnode)));
1556
1557 if (namelen != 4)
1558 return (EINVAL);
1559
1560 dp = oldp;
1561 len = (oldp != NULL) ? *oldlenp : 0;
1562 op = name[0];
1563 arg = name[1];
1564 elem_size = name[2];
1565 elem_count = name[3];
1566 out_size = MIN(sizeof(bs), elem_size);
1567
1568 /*
1569 * at the moment, these are just "placeholders" to make the
1570 * API for retrieving kern.buf data more extensible in the
1571 * future.
1572 *
1573 * XXX kern.buf currently has "netbsd32" issues. hopefully
1574 * these will be resolved at a later point.
1575 */
1576 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL ||
1577 elem_size < 1 || elem_count < 0)
1578 return (EINVAL);
1579
1580 error = 0;
1581 needed = 0;
1582 s = splbio();
1583 simple_lock(&bqueue_slock);
1584 for (i = 0; i < BQUEUES; i++) {
1585 TAILQ_FOREACH(bp, &bufqueues[i].bq_queue, b_freelist) {
1586 if (len >= elem_size && elem_count > 0) {
1587 sysctl_fillbuf(bp, &bs);
1588 error = copyout(&bs, dp, out_size);
1589 if (error)
1590 goto cleanup;
1591 dp += elem_size;
1592 len -= elem_size;
1593 }
1594 if (elem_count > 0) {
1595 needed += elem_size;
1596 if (elem_count != INT_MAX)
1597 elem_count--;
1598 }
1599 }
1600 }
1601 cleanup:
1602 simple_unlock(&bqueue_slock);
1603 splx(s);
1604
1605 *oldlenp = needed;
1606 if (oldp == NULL)
1607 *oldlenp += KERN_BUFSLOP * sizeof(struct buf);
1608
1609 return (error);
1610 }
1611
1612 static void
1613 sysctl_bufvm_common(void)
1614 {
1615 int64_t t;
1616
1617 /* Drain until below new high water mark */
1618 while ((t = (int64_t)bufmem - (int64_t)bufmem_hiwater) >= 0) {
1619 if (buf_drain(t / (2 * 1024)) <= 0)
1620 break;
1621 }
1622 }
1623
1624 static int
1625 sysctl_bufcache_update(SYSCTLFN_ARGS)
1626 {
1627 int t, error;
1628 struct sysctlnode node;
1629
1630 node = *rnode;
1631 node.sysctl_data = &t;
1632 t = *(int *)rnode->sysctl_data;
1633 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1634 if (error || newp == NULL)
1635 return (error);
1636
1637 if (t < 0 || t > 100)
1638 return EINVAL;
1639 bufcache = t;
1640 buf_setwm();
1641
1642 sysctl_bufvm_common();
1643 return 0;
1644 }
1645
1646 static int
1647 sysctl_bufvm_update(SYSCTLFN_ARGS)
1648 {
1649 int64_t t;
1650 int error;
1651 struct sysctlnode node;
1652
1653 node = *rnode;
1654 node.sysctl_data = &t;
1655 t = *(int64_t *)rnode->sysctl_data;
1656 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1657 if (error || newp == NULL)
1658 return (error);
1659
1660 if (t < 0)
1661 return EINVAL;
1662 if (rnode->sysctl_data == &bufmem_lowater) {
1663 if (bufmem_hiwater - t < 16)
1664 return (EINVAL);
1665 bufmem_lowater = t;
1666 } else if (rnode->sysctl_data == &bufmem_hiwater) {
1667 if (t - bufmem_lowater < 16)
1668 return (EINVAL);
1669 bufmem_hiwater = t;
1670 } else
1671 return (EINVAL);
1672
1673 sysctl_bufvm_common();
1674
1675 return 0;
1676 }
1677
1678 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1679 {
1680
1681 sysctl_createv(clog, 0, NULL, NULL,
1682 CTLFLAG_PERMANENT,
1683 CTLTYPE_NODE, "kern", NULL,
1684 NULL, 0, NULL, 0,
1685 CTL_KERN, CTL_EOL);
1686 sysctl_createv(clog, 0, NULL, NULL,
1687 CTLFLAG_PERMANENT,
1688 CTLTYPE_NODE, "buf",
1689 SYSCTL_DESCR("Kernel buffer cache information"),
1690 sysctl_dobuf, 0, NULL, 0,
1691 CTL_KERN, KERN_BUF, CTL_EOL);
1692 }
1693
1694 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup")
1695 {
1696
1697 sysctl_createv(clog, 0, NULL, NULL,
1698 CTLFLAG_PERMANENT,
1699 CTLTYPE_NODE, "vm", NULL,
1700 NULL, 0, NULL, 0,
1701 CTL_VM, CTL_EOL);
1702
1703 sysctl_createv(clog, 0, NULL, NULL,
1704 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1705 CTLTYPE_INT, "bufcache",
1706 SYSCTL_DESCR("Percentage of physical memory to use for "
1707 "buffer cache"),
1708 sysctl_bufcache_update, 0, &bufcache, 0,
1709 CTL_VM, CTL_CREATE, CTL_EOL);
1710 sysctl_createv(clog, 0, NULL, NULL,
1711 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1712 CTLTYPE_QUAD, "bufmem",
1713 SYSCTL_DESCR("Amount of kernel memory used by buffer "
1714 "cache"),
1715 NULL, 0, &bufmem, 0,
1716 CTL_VM, CTL_CREATE, CTL_EOL);
1717 sysctl_createv(clog, 0, NULL, NULL,
1718 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1719 CTLTYPE_QUAD, "bufmem_lowater",
1720 SYSCTL_DESCR("Minimum amount of kernel memory to "
1721 "reserve for buffer cache"),
1722 sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1723 CTL_VM, CTL_CREATE, CTL_EOL);
1724 sysctl_createv(clog, 0, NULL, NULL,
1725 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1726 CTLTYPE_QUAD, "bufmem_hiwater",
1727 SYSCTL_DESCR("Maximum amount of kernel memory to use "
1728 "for buffer cache"),
1729 sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1730 CTL_VM, CTL_CREATE, CTL_EOL);
1731 }
1732
1733 #ifdef DEBUG
1734 /*
1735 * Print out statistics on the current allocation of the buffer pool.
1736 * Can be enabled to print out on every ``sync'' by setting "syncprt"
1737 * in vfs_syscalls.c using sysctl.
1738 */
1739 void
1740 vfs_bufstats(void)
1741 {
1742 int s, i, j, count;
1743 struct buf *bp;
1744 struct bqueue *dp;
1745 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1746 static const char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1747
1748 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1749 count = 0;
1750 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1751 counts[j] = 0;
1752 s = splbio();
1753 TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) {
1754 counts[bp->b_bufsize/PAGE_SIZE]++;
1755 count++;
1756 }
1757 splx(s);
1758 printf("%s: total-%d", bname[i], count);
1759 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1760 if (counts[j] != 0)
1761 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1762 printf("\n");
1763 }
1764 }
1765 #endif /* DEBUG */
1766
1767 /* ------------------------------ */
1768
1769 static POOL_INIT(bufiopool, sizeof(struct buf), 0, 0, 0, "biopl", NULL);
1770
1771 static struct buf *
1772 getiobuf1(int prflags)
1773 {
1774 struct buf *bp;
1775 int s;
1776
1777 s = splbio();
1778 bp = pool_get(&bufiopool, prflags);
1779 splx(s);
1780 if (bp != NULL) {
1781 BUF_INIT(bp);
1782 }
1783 return bp;
1784 }
1785
1786 struct buf *
1787 getiobuf(void)
1788 {
1789
1790 return getiobuf1(PR_WAITOK);
1791 }
1792
1793 struct buf *
1794 getiobuf_nowait(void)
1795 {
1796
1797 return getiobuf1(PR_NOWAIT);
1798 }
1799
1800 void
1801 putiobuf(struct buf *bp)
1802 {
1803 int s;
1804
1805 s = splbio();
1806 pool_put(&bufiopool, bp);
1807 splx(s);
1808 }
1809
1810 /*
1811 * nestiobuf_iodone: b_iodone callback for nested buffers.
1812 */
1813
1814 void
1815 nestiobuf_iodone(struct buf *bp)
1816 {
1817 struct buf *mbp = bp->b_private;
1818 int error;
1819 int donebytes;
1820
1821 KASSERT(bp->b_bcount <= bp->b_bufsize);
1822 KASSERT(mbp != bp);
1823
1824 error = 0;
1825 if ((bp->b_flags & B_ERROR) != 0) {
1826 error = EIO;
1827 /* check if an error code was returned */
1828 if (bp->b_error)
1829 error = bp->b_error;
1830 } else if ((bp->b_bcount < bp->b_bufsize) || (bp->b_resid > 0)) {
1831 /*
1832 * Not all got transfered, raise an error. We have no way to
1833 * propagate these conditions to mbp.
1834 */
1835 error = EIO;
1836 }
1837
1838 donebytes = bp->b_bufsize;
1839
1840 putiobuf(bp);
1841 nestiobuf_done(mbp, donebytes, error);
1842 }
1843
1844 /*
1845 * nestiobuf_setup: setup a "nested" buffer.
1846 *
1847 * => 'mbp' is a "master" buffer which is being divided into sub pieces.
1848 * => 'bp' should be a buffer allocated by getiobuf or getiobuf_nowait.
1849 * => 'offset' is a byte offset in the master buffer.
1850 * => 'size' is a size in bytes of this nested buffer.
1851 */
1852
1853 void
1854 nestiobuf_setup(struct buf *mbp, struct buf *bp, int offset, size_t size)
1855 {
1856 const int b_read = mbp->b_flags & B_READ;
1857 struct vnode *vp = mbp->b_vp;
1858
1859 KASSERT(mbp->b_bcount >= offset + size);
1860 bp->b_vp = vp;
1861 bp->b_flags = B_BUSY | B_CALL | B_ASYNC | b_read;
1862 bp->b_iodone = nestiobuf_iodone;
1863 bp->b_data = mbp->b_data + offset;
1864 bp->b_resid = bp->b_bcount = size;
1865 bp->b_bufsize = bp->b_bcount;
1866 bp->b_private = mbp;
1867 BIO_COPYPRIO(bp, mbp);
1868 if (!b_read && vp != NULL) {
1869 int s;
1870
1871 s = splbio();
1872 V_INCR_NUMOUTPUT(vp);
1873 splx(s);
1874 }
1875 }
1876
1877 /*
1878 * nestiobuf_done: propagate completion to the master buffer.
1879 *
1880 * => 'donebytes' specifies how many bytes in the 'mbp' is completed.
1881 * => 'error' is an errno(2) that 'donebytes' has been completed with.
1882 */
1883
1884 void
1885 nestiobuf_done(struct buf *mbp, int donebytes, int error)
1886 {
1887 int s;
1888
1889 if (donebytes == 0) {
1890 return;
1891 }
1892 s = splbio();
1893 KASSERT(mbp->b_resid >= donebytes);
1894 if (error) {
1895 mbp->b_flags |= B_ERROR;
1896 mbp->b_error = error;
1897 }
1898 mbp->b_resid -= donebytes;
1899 if (mbp->b_resid == 0) {
1900 if ((mbp->b_flags & B_ERROR) != 0) {
1901 mbp->b_resid = mbp->b_bcount; /* be conservative */
1902 }
1903 biodone(mbp);
1904 }
1905 splx(s);
1906 }
Cache object: a61f2457adc08acbf27de4264afa826b
|