FreeBSD/Linux Kernel Cross Reference
sys/ufs/lfs/lfs_bio.c
1 /* $NetBSD: lfs_bio.c,v 1.77 2004/01/28 10:54:23 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant@hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.77 2004/01/28 10:54:23 yamt Exp $");
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/buf.h>
76 #include <sys/vnode.h>
77 #include <sys/resourcevar.h>
78 #include <sys/mount.h>
79 #include <sys/kernel.h>
80
81 #include <ufs/ufs/inode.h>
82 #include <ufs/ufs/ufsmount.h>
83 #include <ufs/ufs/ufs_extern.h>
84
85 #include <ufs/lfs/lfs.h>
86 #include <ufs/lfs/lfs_extern.h>
87
88 #include <uvm/uvm.h>
89
90 /* Macros to clear/set/test flags. */
91 # define SET(t, f) (t) |= (f)
92 # define CLR(t, f) (t) &= ~(f)
93 # define ISSET(t, f) ((t) & (f))
94
95 /*
96 * LFS block write function.
97 *
98 * XXX
99 * No write cost accounting is done.
100 * This is almost certainly wrong for synchronous operations and NFS.
101 *
102 * protected by lfs_subsys_lock.
103 */
104 int locked_queue_count = 0; /* Count of locked-down buffers. */
105 long locked_queue_bytes = 0L; /* Total size of locked buffers. */
106 int lfs_subsys_pages = 0L; /* Total number LFS-written pages */
107 int lfs_writing = 0; /* Set if already kicked off a writer
108 because of buffer space */
109 /* Lock for aboves */
110 struct simplelock lfs_subsys_lock = SIMPLELOCK_INITIALIZER;
111
112 extern int lfs_dostats;
113
114 /*
115 * reserved number/bytes of locked buffers
116 */
117 int locked_queue_rcount = 0;
118 long locked_queue_rbytes = 0L;
119
120 int lfs_fits_buf(struct lfs *, int, int);
121 int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
122 int, int);
123 int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2, int);
124
125 int
126 lfs_fits_buf(struct lfs *fs, int n, int bytes)
127 {
128 int count_fit, bytes_fit;
129
130 LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
131
132 count_fit =
133 (locked_queue_count + locked_queue_rcount + n < LFS_WAIT_BUFS);
134 bytes_fit =
135 (locked_queue_bytes + locked_queue_rbytes + bytes < LFS_WAIT_BYTES);
136
137 #ifdef DEBUG_LFS
138 if (!count_fit) {
139 printf("lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
140 locked_queue_count, locked_queue_rcount,
141 n, LFS_WAIT_BUFS);
142 }
143 if (!bytes_fit) {
144 printf("lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
145 locked_queue_bytes, locked_queue_rbytes,
146 bytes, LFS_WAIT_BYTES);
147 }
148 #endif /* DEBUG_LFS */
149
150 return (count_fit && bytes_fit);
151 }
152
153 /* ARGSUSED */
154 int
155 lfs_reservebuf(struct lfs *fs, struct vnode *vp, struct vnode *vp2,
156 int n, int bytes)
157 {
158 KASSERT(locked_queue_rcount >= 0);
159 KASSERT(locked_queue_rbytes >= 0);
160
161 simple_lock(&lfs_subsys_lock);
162 while (n > 0 && !lfs_fits_buf(fs, n, bytes)) {
163 int error;
164
165 lfs_flush(fs, 0);
166
167 error = ltsleep(&locked_queue_count, PCATCH | PUSER,
168 "lfsresbuf", hz * LFS_BUFWAIT, &lfs_subsys_lock);
169 if (error && error != EWOULDBLOCK) {
170 simple_unlock(&lfs_subsys_lock);
171 return error;
172 }
173 }
174
175 locked_queue_rcount += n;
176 locked_queue_rbytes += bytes;
177
178 simple_unlock(&lfs_subsys_lock);
179
180 KASSERT(locked_queue_rcount >= 0);
181 KASSERT(locked_queue_rbytes >= 0);
182
183 return 0;
184 }
185
186 /*
187 * Try to reserve some blocks, prior to performing a sensitive operation that
188 * requires the vnode lock to be honored. If there is not enough space, give
189 * up the vnode lock temporarily and wait for the space to become available.
190 *
191 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
192 *
193 * XXX YAMT - it isn't safe to unlock vp here
194 * because the node might be modified while we sleep.
195 * (eg. cached states like i_offset might be stale,
196 * the vnode might be truncated, etc..)
197 * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
198 * or rearrange vnodeop interface to leave vnode locking to file system
199 * specific code so that each file systems can have their own vnode locking and
200 * vnode re-using strategies.
201 */
202 int
203 lfs_reserveavail(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
204 {
205 CLEANERINFO *cip;
206 struct buf *bp;
207 int error, slept;
208
209 slept = 0;
210 while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail)) {
211 #if 0
212 /*
213 * XXX ideally, we should unlock vnodes here
214 * because we might sleep very long time.
215 */
216 VOP_UNLOCK(vp, 0);
217 if (vp2 != NULL) {
218 VOP_UNLOCK(vp2, 0);
219 }
220 #else
221 /*
222 * XXX since we'll sleep for cleaner with vnode lock holding,
223 * deadlock will occur if cleaner tries to lock the vnode.
224 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
225 */
226 #endif
227
228 if (!slept) {
229 #ifdef DEBUG
230 printf("lfs_reserve: waiting for %ld (bfree = %d,"
231 " est_bfree = %d)\n",
232 fsb + fs->lfs_ravail, fs->lfs_bfree,
233 LFS_EST_BFREE(fs));
234 #endif
235 }
236 ++slept;
237
238 /* Wake up the cleaner */
239 LFS_CLEANERINFO(cip, fs, bp);
240 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
241 wakeup(&lfs_allclean_wakeup);
242 wakeup(&fs->lfs_nextseg);
243
244 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
245 0);
246 #if 0
247 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
248 vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
249 #endif
250 if (error)
251 return error;
252 }
253 #ifdef DEBUG
254 if (slept)
255 printf("lfs_reserve: woke up\n");
256 #endif
257 fs->lfs_ravail += fsb;
258
259 return 0;
260 }
261
262 #ifdef DIAGNOSTIC
263 int lfs_rescount;
264 int lfs_rescountdirop;
265 #endif
266
267 int
268 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
269 {
270 int error;
271 int cantwait;
272
273 KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
274 KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
275 KASSERT(vp2 == NULL || !(VTOI(vp2)->i_flag & IN_ADIROP));
276 KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
277
278 cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
279 #ifdef DIAGNOSTIC
280 if (cantwait) {
281 if (fsb > 0)
282 lfs_rescountdirop++;
283 else if (fsb < 0)
284 lfs_rescountdirop--;
285 if (lfs_rescountdirop < 0)
286 panic("lfs_rescountdirop");
287 }
288 else {
289 if (fsb > 0)
290 lfs_rescount++;
291 else if (fsb < 0)
292 lfs_rescount--;
293 if (lfs_rescount < 0)
294 panic("lfs_rescount");
295 }
296 #endif
297 if (cantwait)
298 return 0;
299
300 /*
301 * XXX
302 * vref vnodes here so that cleaner doesn't try to reuse them.
303 * (see XXX comment in lfs_reserveavail)
304 */
305 lfs_vref(vp);
306 if (vp2 != NULL) {
307 lfs_vref(vp2);
308 }
309
310 error = lfs_reserveavail(fs, vp, vp2, fsb);
311 if (error)
312 goto done;
313
314 /*
315 * XXX just a guess. should be more precise.
316 */
317 error = lfs_reservebuf(fs, vp, vp2,
318 fragstoblks(fs, fsb), fsbtob(fs, fsb));
319 if (error)
320 lfs_reserveavail(fs, vp, vp2, -fsb);
321
322 done:
323 lfs_vunref(vp);
324 if (vp2 != NULL) {
325 lfs_vunref(vp2);
326 }
327
328 return error;
329 }
330
331 int
332 lfs_bwrite(void *v)
333 {
334 struct vop_bwrite_args /* {
335 struct buf *a_bp;
336 } */ *ap = v;
337 struct buf *bp = ap->a_bp;
338
339 #ifdef DIAGNOSTIC
340 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
341 panic("bawrite LFS buffer");
342 }
343 #endif /* DIAGNOSTIC */
344 return lfs_bwrite_ext(bp,0);
345 }
346
347 /*
348 * Determine if there is enough room currently available to write fsb
349 * blocks. We need enough blocks for the new blocks, the current
350 * inode blocks (including potentially the ifile inode), a summary block,
351 * and the segment usage table, plus an ifile block.
352 */
353 int
354 lfs_fits(struct lfs *fs, int fsb)
355 {
356 int needed;
357
358 needed = fsb + btofsb(fs, fs->lfs_sumsize) +
359 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
360 1) << (fs->lfs_blktodb - fs->lfs_fsbtodb));
361
362 if (needed >= fs->lfs_avail) {
363 #ifdef DEBUG
364 printf("lfs_fits: no fit: fsb = %d, uinodes = %d, "
365 "needed = %d, avail = %d\n",
366 fsb, fs->lfs_uinodes, needed, fs->lfs_avail);
367 #endif
368 return 0;
369 }
370 return 1;
371 }
372
373 int
374 lfs_availwait(struct lfs *fs, int fsb)
375 {
376 int error;
377 CLEANERINFO *cip;
378 struct buf *cbp;
379
380 /* Push cleaner blocks through regardless */
381 simple_lock(&fs->lfs_interlock);
382 if (fs->lfs_seglock &&
383 fs->lfs_lockpid == curproc->p_pid &&
384 fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
385 simple_unlock(&fs->lfs_interlock);
386 return 0;
387 }
388 simple_unlock(&fs->lfs_interlock);
389
390 while (!lfs_fits(fs, fsb)) {
391 /*
392 * Out of space, need cleaner to run.
393 * Update the cleaner info, then wake it up.
394 * Note the cleanerinfo block is on the ifile
395 * so it CANT_WAIT.
396 */
397 LFS_CLEANERINFO(cip, fs, cbp);
398 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
399
400 printf("lfs_availwait: out of available space, "
401 "waiting on cleaner\n");
402
403 wakeup(&lfs_allclean_wakeup);
404 wakeup(&fs->lfs_nextseg);
405 #ifdef DIAGNOSTIC
406 if (fs->lfs_seglock && fs->lfs_lockpid == curproc->p_pid)
407 panic("lfs_availwait: deadlock");
408 #endif
409 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
410 if (error)
411 return (error);
412 }
413 return 0;
414 }
415
416 int
417 lfs_bwrite_ext(struct buf *bp, int flags)
418 {
419 struct lfs *fs;
420 struct inode *ip;
421 int fsb, s;
422
423 KASSERT(bp->b_flags & B_BUSY);
424 KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
425 KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_DELWRI);
426 KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_LOCKED);
427
428 /*
429 * Don't write *any* blocks if we're mounted read-only.
430 * In particular the cleaner can't write blocks either.
431 */
432 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly) {
433 bp->b_flags &= ~(B_DELWRI | B_READ | B_ERROR);
434 LFS_UNLOCK_BUF(bp);
435 if (LFS_IS_MALLOC_BUF(bp))
436 bp->b_flags &= ~B_BUSY;
437 else
438 brelse(bp);
439 return EROFS;
440 }
441
442 /*
443 * Set the delayed write flag and use reassignbuf to move the buffer
444 * from the clean list to the dirty one.
445 *
446 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
447 * the buffer onto the LOCKED free list. This is necessary, otherwise
448 * getnewbuf() would try to reclaim the buffers using bawrite, which
449 * isn't going to work.
450 *
451 * XXX we don't let meta-data writes run out of space because they can
452 * come from the segment writer. We need to make sure that there is
453 * enough space reserved so that there's room to write meta-data
454 * blocks.
455 */
456 if (!(bp->b_flags & B_LOCKED)) {
457 fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
458 fsb = fragstofsb(fs, numfrags(fs, bp->b_bcount));
459
460 ip = VTOI(bp->b_vp);
461 if (flags & BW_CLEAN) {
462 LFS_SET_UINO(ip, IN_CLEANING);
463 } else {
464 LFS_SET_UINO(ip, IN_MODIFIED);
465 }
466 fs->lfs_avail -= fsb;
467 bp->b_flags |= B_DELWRI;
468
469 LFS_LOCK_BUF(bp);
470 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
471 s = splbio();
472 reassignbuf(bp, bp->b_vp);
473 splx(s);
474 }
475
476 if (bp->b_flags & B_CALL)
477 bp->b_flags &= ~B_BUSY;
478 else
479 brelse(bp);
480
481 return (0);
482 }
483
484 void
485 lfs_flush_fs(struct lfs *fs, int flags)
486 {
487 if (fs->lfs_ronly)
488 return;
489
490 lfs_writer_enter(fs, "fldirop");
491
492 if (lfs_dostats)
493 ++lfs_stats.flush_invoked;
494 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
495
496 lfs_writer_leave(fs);
497 }
498
499 /*
500 * XXX
501 * This routine flushes buffers out of the B_LOCKED queue when LFS has too
502 * many locked down. Eventually the pageout daemon will simply call LFS
503 * when pages need to be reclaimed. Note, we have one static count of locked
504 * buffers, so we can't have more than a single file system. To make this
505 * work for multiple file systems, put the count into the mount structure.
506 *
507 * called and return with lfs_subsys_lock held.
508 */
509 void
510 lfs_flush(struct lfs *fs, int flags)
511 {
512 struct mount *mp, *nmp;
513
514 LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
515 KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
516
517 if (lfs_dostats)
518 ++lfs_stats.write_exceeded;
519 if (lfs_writing && flags == 0) {/* XXX flags */
520 #ifdef DEBUG_LFS
521 printf("lfs_flush: not flushing because another flush is active\n");
522 #endif
523 return;
524 }
525 while (lfs_writing && (flags & SEGM_WRITERD))
526 ltsleep(&lfs_writing, PRIBIO + 1, "lfsflush", 0,
527 &lfs_subsys_lock);
528 lfs_writing = 1;
529
530 lfs_subsys_pages = 0; /* XXXUBC need a better way to count this */
531 simple_unlock(&lfs_subsys_lock);
532 wakeup(&lfs_subsys_pages);
533
534 simple_lock(&mountlist_slock);
535 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
536 mp = nmp) {
537 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
538 nmp = CIRCLEQ_NEXT(mp, mnt_list);
539 continue;
540 }
541 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
542 MFSNAMELEN) == 0)
543 lfs_flush_fs(VFSTOUFS(mp)->um_lfs, flags);
544 simple_lock(&mountlist_slock);
545 nmp = CIRCLEQ_NEXT(mp, mnt_list);
546 vfs_unbusy(mp);
547 }
548 simple_unlock(&mountlist_slock);
549 LFS_DEBUG_COUNTLOCKED("flush");
550
551 simple_lock(&lfs_subsys_lock);
552 KASSERT(lfs_writing);
553 lfs_writing = 0;
554 wakeup(&lfs_writing);
555 }
556
557 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
558 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
559
560 /*
561 * make sure that we don't have too many locked buffers.
562 * flush buffers if needed.
563 */
564 int
565 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
566 {
567 int error;
568 struct lfs *fs;
569 struct inode *ip;
570
571 error = 0;
572 ip = VTOI(vp);
573
574 /* If out of buffers, wait on writer */
575 /* XXX KS - if it's the Ifile, we're probably the cleaner! */
576 if (ip->i_number == LFS_IFILE_INUM)
577 return 0;
578 /* If we're being called from inside a dirop, don't sleep */
579 if (ip->i_flag & IN_ADIROP)
580 return 0;
581
582 fs = ip->i_lfs;
583
584 /*
585 * If we would flush below, but dirops are active, sleep.
586 * Note that a dirop cannot ever reach this code!
587 */
588 simple_lock(&lfs_subsys_lock);
589 while (fs->lfs_dirops > 0 &&
590 (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
591 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
592 lfs_subsys_pages > LFS_MAX_PAGES ||
593 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
594 {
595 ++fs->lfs_diropwait;
596 ltsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
597 &lfs_subsys_lock);
598 --fs->lfs_diropwait;
599 }
600
601 #ifdef DEBUG_LFS_FLUSH
602 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
603 printf("lqc = %d, max %d\n",
604 locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS);
605 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
606 printf("lqb = %ld, max %ld\n",
607 locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES);
608 if (lfs_subsys_pages > LFS_MAX_PAGES)
609 printf("lssp = %d, max %d\n", lfs_subsys_pages, LFS_MAX_PAGES);
610 if (lfs_dirvcount > LFS_MAX_DIROP)
611 printf("ldvc = %d, max %d\n", lfs_dirvcount, LFS_MAX_DIROP);
612 if (fs->lfs_diropwait > 0)
613 printf("ldvw = %d\n", fs->lfs_diropwait);
614 #endif
615
616 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
617 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
618 lfs_subsys_pages > LFS_MAX_PAGES ||
619 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
620 lfs_flush(fs, flags);
621 }
622
623 while (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS ||
624 locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES ||
625 lfs_subsys_pages > LFS_WAIT_PAGES ||
626 lfs_dirvcount > LFS_MAX_DIROP) {
627 simple_unlock(&lfs_subsys_lock);
628 if (lfs_dostats)
629 ++lfs_stats.wait_exceeded;
630 #ifdef DEBUG_LFS
631 printf("lfs_check: waiting: count=%d, bytes=%ld\n",
632 locked_queue_count, locked_queue_bytes);
633 #endif
634 error = tsleep(&locked_queue_count, PCATCH | PUSER,
635 "buffers", hz * LFS_BUFWAIT);
636 if (error != EWOULDBLOCK) {
637 simple_lock(&lfs_subsys_lock);
638 break;
639 }
640 /*
641 * lfs_flush might not flush all the buffers, if some of the
642 * inodes were locked or if most of them were Ifile blocks
643 * and we weren't asked to checkpoint. Try flushing again
644 * to keep us from blocking indefinitely.
645 */
646 simple_lock(&lfs_subsys_lock);
647 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
648 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES) {
649 lfs_flush(fs, flags | SEGM_CKP);
650 }
651 }
652 simple_unlock(&lfs_subsys_lock);
653 return (error);
654 }
655
656 /*
657 * Allocate a new buffer header.
658 */
659 struct buf *
660 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
661 {
662 struct buf *bp;
663 size_t nbytes;
664 int s;
665
666 nbytes = roundup(size, fsbtob(fs, 1));
667
668 s = splbio();
669 bp = pool_get(&bufpool, PR_WAITOK);
670 splx(s);
671 memset(bp, 0, sizeof(struct buf));
672 BUF_INIT(bp);
673 if (nbytes) {
674 bp->b_data = lfs_malloc(fs, nbytes, type);
675 /* memset(bp->b_data, 0, nbytes); */
676 }
677 #ifdef DIAGNOSTIC
678 if (vp == NULL)
679 panic("vp is NULL in lfs_newbuf");
680 if (bp == NULL)
681 panic("bp is NULL after malloc in lfs_newbuf");
682 #endif
683 s = splbio();
684 bgetvp(vp, bp);
685 splx(s);
686
687 bp->b_bufsize = size;
688 bp->b_bcount = size;
689 bp->b_lblkno = daddr;
690 bp->b_blkno = daddr;
691 bp->b_error = 0;
692 bp->b_resid = 0;
693 bp->b_iodone = lfs_callback;
694 bp->b_flags |= B_BUSY | B_CALL | B_NOCACHE;
695 bp->b_private = fs;
696
697 return (bp);
698 }
699
700 void
701 lfs_freebuf(struct lfs *fs, struct buf *bp)
702 {
703 int s;
704
705 s = splbio();
706 if (bp->b_vp)
707 brelvp(bp);
708 if (!(bp->b_flags & B_INVAL)) { /* B_INVAL indicates a "fake" buffer */
709 lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
710 bp->b_data = NULL;
711 }
712 pool_put(&bufpool, bp);
713 splx(s);
714 }
715
716 /*
717 * Definitions for the buffer free lists.
718 */
719 #define BQUEUES 4 /* number of free buffer queues */
720
721 #define BQ_LOCKED 0 /* super-blocks &c */
722 #define BQ_LRU 1 /* lru, useful buffers */
723 #define BQ_AGE 2 /* rubbish */
724 #define BQ_EMPTY 3 /* buffer headers with no memory */
725
726 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
727 extern struct simplelock bqueue_slock;
728
729 /*
730 * Return a count of buffers on the "locked" queue.
731 * Don't count malloced buffers, since they don't detract from the total.
732 */
733 void
734 lfs_countlocked(int *count, long *bytes, char *msg)
735 {
736 struct buf *bp;
737 int n = 0;
738 long int size = 0L;
739 int s;
740
741 s = splbio();
742 simple_lock(&bqueue_slock);
743 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist) {
744 KASSERT(!(bp->b_flags & B_CALL));
745 n++;
746 size += bp->b_bufsize;
747 #ifdef DEBUG_LOCKED_LIST
748 if (n > nbuf)
749 panic("lfs_countlocked: this can't happen: more"
750 " buffers locked than exist");
751 #endif
752 }
753 #ifdef DEBUG_LOCKED_LIST
754 /* Theoretically this function never really does anything */
755 if (n != *count)
756 printf("lfs_countlocked: %s: adjusted buf count from %d to %d\n",
757 msg, *count, n);
758 if (size != *bytes)
759 printf("lfs_countlocked: %s: adjusted byte count from %ld to %ld\n",
760 msg, *bytes, size);
761 #endif
762 *count = n;
763 *bytes = size;
764 simple_unlock(&bqueue_slock);
765 splx(s);
766 return;
767 }
Cache object: 4f921c71b69c35051f907d2e86df8742
|