1 /*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/5.3/sys/kern/vfs_cluster.c 136588 2004-10-16 08:43:07Z cvs2svn $");
36
37 #include "opt_debug_cluster.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/proc.h>
43 #include <sys/bio.h>
44 #include <sys/buf.h>
45 #include <sys/vnode.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/resourcevar.h>
49 #include <sys/vmmeter.h>
50 #include <vm/vm.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <sys/sysctl.h>
54
55 #if defined(CLUSTERDEBUG)
56 static int rcluster= 0;
57 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0,
58 "Debug VFS clustering code");
59 #endif
60
61 static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer");
62
63 static struct cluster_save *
64 cluster_collectbufs(struct vnode *vp, struct buf *last_bp);
65 static struct buf *
66 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
67 daddr_t blkno, long size, int run, struct buf *fbp);
68
69 static int write_behind = 1;
70 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
71 "Cluster write-behind; 0: disable, 1: enable, 2: backed off");
72
73 static int read_max = 8;
74 SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0,
75 "Cluster read-ahead max block count");
76
77 /* Page expended to mark partially backed buffers */
78 extern vm_page_t bogus_page;
79
80 /*
81 * Number of physical bufs (pbufs) this subsystem is allowed.
82 * Manipulated by vm_pager.c
83 */
84 extern int cluster_pbuf_freecnt;
85
86 /*
87 * Read data to a buf, including read-ahead if we find this to be beneficial.
88 * cluster_read replaces bread.
89 */
90 int
91 cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
92 struct vnode *vp;
93 u_quad_t filesize;
94 daddr_t lblkno;
95 long size;
96 struct ucred *cred;
97 long totread;
98 int seqcount;
99 struct buf **bpp;
100 {
101 struct buf *bp, *rbp, *reqbp;
102 daddr_t blkno, origblkno;
103 int maxra, racluster;
104 int error, ncontig;
105 int i;
106
107 error = 0;
108
109 /*
110 * Try to limit the amount of read-ahead by a few
111 * ad-hoc parameters. This needs work!!!
112 */
113 racluster = vp->v_mount->mnt_iosize_max / size;
114 maxra = seqcount;
115 maxra = min(read_max, maxra);
116 maxra = min(nbuf/8, maxra);
117 if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize)
118 maxra = (filesize / size) - lblkno;
119
120 /*
121 * get the requested block
122 */
123 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0, 0);
124 origblkno = lblkno;
125
126 /*
127 * if it is in the cache, then check to see if the reads have been
128 * sequential. If they have, then try some read-ahead, otherwise
129 * back-off on prospective read-aheads.
130 */
131 if (bp->b_flags & B_CACHE) {
132 if (!seqcount) {
133 return 0;
134 } else if ((bp->b_flags & B_RAM) == 0) {
135 return 0;
136 } else {
137 int s;
138 bp->b_flags &= ~B_RAM;
139 /*
140 * We do the spl here so that there is no window
141 * between the incore and the b_usecount increment
142 * below. We opt to keep the spl out of the loop
143 * for efficiency.
144 */
145 s = splbio();
146 VI_LOCK(vp);
147 for (i = 1; i < maxra; i++) {
148 /*
149 * Stop if the buffer does not exist or it
150 * is invalid (about to go away?)
151 */
152 rbp = gbincore(vp, lblkno+i);
153 if (rbp == NULL || (rbp->b_flags & B_INVAL))
154 break;
155
156 /*
157 * Set another read-ahead mark so we know
158 * to check again.
159 */
160 if (((i % racluster) == (racluster - 1)) ||
161 (i == (maxra - 1)))
162 rbp->b_flags |= B_RAM;
163 }
164 VI_UNLOCK(vp);
165 splx(s);
166 if (i >= maxra) {
167 return 0;
168 }
169 lblkno += i;
170 }
171 reqbp = bp = NULL;
172 /*
173 * If it isn't in the cache, then get a chunk from
174 * disk if sequential, otherwise just get the block.
175 */
176 } else {
177 off_t firstread = bp->b_offset;
178 int nblks;
179
180 KASSERT(bp->b_offset != NOOFFSET,
181 ("cluster_read: no buffer offset"));
182
183 ncontig = 0;
184
185 /*
186 * Compute the total number of blocks that we should read
187 * synchronously.
188 */
189 if (firstread + totread > filesize)
190 totread = filesize - firstread;
191 nblks = howmany(totread, size);
192 if (nblks > racluster)
193 nblks = racluster;
194
195 /*
196 * Now compute the number of contiguous blocks.
197 */
198 if (nblks > 1) {
199 error = VOP_BMAP(vp, lblkno, NULL,
200 &blkno, &ncontig, NULL);
201 /*
202 * If this failed to map just do the original block.
203 */
204 if (error || blkno == -1)
205 ncontig = 0;
206 }
207
208 /*
209 * If we have contiguous data available do a cluster
210 * otherwise just read the requested block.
211 */
212 if (ncontig) {
213 /* Account for our first block. */
214 ncontig = min(ncontig + 1, nblks);
215 if (ncontig < nblks)
216 nblks = ncontig;
217 bp = cluster_rbuild(vp, filesize, lblkno,
218 blkno, size, nblks, bp);
219 lblkno += (bp->b_bufsize / size);
220 } else {
221 bp->b_flags |= B_RAM;
222 bp->b_iocmd = BIO_READ;
223 lblkno += 1;
224 }
225 }
226
227 /*
228 * handle the synchronous read so that it is available ASAP.
229 */
230 if (bp) {
231 if ((bp->b_flags & B_CLUSTER) == 0) {
232 vfs_busy_pages(bp, 0);
233 }
234 bp->b_flags &= ~B_INVAL;
235 bp->b_ioflags &= ~BIO_ERROR;
236 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
237 BUF_KERNPROC(bp);
238 bp->b_iooffset = dbtob(bp->b_blkno);
239 error = VOP_STRATEGY(vp, bp);
240 curproc->p_stats->p_ru.ru_inblock++;
241 if (error)
242 return (error);
243 }
244
245 /*
246 * If we have been doing sequential I/O, then do some read-ahead.
247 */
248 while (lblkno < (origblkno + maxra)) {
249 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL);
250 if (error)
251 break;
252
253 if (blkno == -1)
254 break;
255
256 /*
257 * We could throttle ncontig here by maxra but we might as
258 * well read the data if it is contiguous. We're throttled
259 * by racluster anyway.
260 */
261 if (ncontig) {
262 ncontig = min(ncontig + 1, racluster);
263 rbp = cluster_rbuild(vp, filesize, lblkno, blkno,
264 size, ncontig, NULL);
265 lblkno += (rbp->b_bufsize / size);
266 if (rbp->b_flags & B_DELWRI) {
267 bqrelse(rbp);
268 continue;
269 }
270 } else {
271 rbp = getblk(vp, lblkno, size, 0, 0, 0);
272 lblkno += 1;
273 if (rbp->b_flags & B_DELWRI) {
274 bqrelse(rbp);
275 continue;
276 }
277 rbp->b_flags |= B_ASYNC | B_RAM;
278 rbp->b_iocmd = BIO_READ;
279 rbp->b_blkno = blkno;
280 }
281 if (rbp->b_flags & B_CACHE) {
282 rbp->b_flags &= ~B_ASYNC;
283 bqrelse(rbp);
284 continue;
285 }
286 if ((rbp->b_flags & B_CLUSTER) == 0) {
287 vfs_busy_pages(rbp, 0);
288 }
289 rbp->b_flags &= ~B_INVAL;
290 rbp->b_ioflags &= ~BIO_ERROR;
291 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
292 BUF_KERNPROC(rbp);
293 rbp->b_iooffset = dbtob(rbp->b_blkno);
294 (void) VOP_STRATEGY(vp, rbp);
295 curproc->p_stats->p_ru.ru_inblock++;
296 }
297
298 if (reqbp)
299 return (bufwait(reqbp));
300 else
301 return (error);
302 }
303
304 /*
305 * If blocks are contiguous on disk, use this to provide clustered
306 * read ahead. We will read as many blocks as possible sequentially
307 * and then parcel them up into logical blocks in the buffer hash table.
308 */
309 static struct buf *
310 cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
311 struct vnode *vp;
312 u_quad_t filesize;
313 daddr_t lbn;
314 daddr_t blkno;
315 long size;
316 int run;
317 struct buf *fbp;
318 {
319 struct buf *bp, *tbp;
320 daddr_t bn;
321 int i, inc, j;
322
323 GIANT_REQUIRED;
324
325 KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
326 ("cluster_rbuild: size %ld != filesize %jd\n",
327 size, (intmax_t)vp->v_mount->mnt_stat.f_iosize));
328
329 /*
330 * avoid a division
331 */
332 while ((u_quad_t) size * (lbn + run) > filesize) {
333 --run;
334 }
335
336 if (fbp) {
337 tbp = fbp;
338 tbp->b_iocmd = BIO_READ;
339 } else {
340 tbp = getblk(vp, lbn, size, 0, 0, 0);
341 if (tbp->b_flags & B_CACHE)
342 return tbp;
343 tbp->b_flags |= B_ASYNC | B_RAM;
344 tbp->b_iocmd = BIO_READ;
345 }
346
347 tbp->b_blkno = blkno;
348 if( (tbp->b_flags & B_MALLOC) ||
349 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
350 return tbp;
351
352 bp = trypbuf(&cluster_pbuf_freecnt);
353 if (bp == 0)
354 return tbp;
355
356 /*
357 * We are synthesizing a buffer out of vm_page_t's, but
358 * if the block size is not page aligned then the starting
359 * address may not be either. Inherit the b_data offset
360 * from the original buffer.
361 */
362 bp->b_data = (char *)((vm_offset_t)bp->b_data |
363 ((vm_offset_t)tbp->b_data & PAGE_MASK));
364 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
365 bp->b_iocmd = BIO_READ;
366 bp->b_iodone = cluster_callback;
367 bp->b_blkno = blkno;
368 bp->b_lblkno = lbn;
369 bp->b_offset = tbp->b_offset;
370 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset"));
371 pbgetvp(vp, bp);
372
373 TAILQ_INIT(&bp->b_cluster.cluster_head);
374
375 bp->b_bcount = 0;
376 bp->b_bufsize = 0;
377 bp->b_npages = 0;
378
379 inc = btodb(size);
380 for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
381 if (i != 0) {
382 if ((bp->b_npages * PAGE_SIZE) +
383 round_page(size) > vp->v_mount->mnt_iosize_max) {
384 break;
385 }
386
387 tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT);
388
389 /* Don't wait around for locked bufs. */
390 if (tbp == NULL)
391 break;
392
393 /*
394 * Stop scanning if the buffer is fully valid
395 * (marked B_CACHE), or locked (may be doing a
396 * background write), or if the buffer is not
397 * VMIO backed. The clustering code can only deal
398 * with VMIO-backed buffers.
399 */
400 VI_LOCK(bp->b_vp);
401 if ((tbp->b_vflags & BV_BKGRDINPROG) ||
402 (tbp->b_flags & B_CACHE) ||
403 (tbp->b_flags & B_VMIO) == 0) {
404 VI_UNLOCK(bp->b_vp);
405 bqrelse(tbp);
406 break;
407 }
408 VI_UNLOCK(bp->b_vp);
409
410 /*
411 * The buffer must be completely invalid in order to
412 * take part in the cluster. If it is partially valid
413 * then we stop.
414 */
415 VM_OBJECT_LOCK(tbp->b_object);
416 for (j = 0;j < tbp->b_npages; j++) {
417 VM_OBJECT_LOCK_ASSERT(tbp->b_pages[j]->object,
418 MA_OWNED);
419 if (tbp->b_pages[j]->valid)
420 break;
421 }
422 VM_OBJECT_UNLOCK(tbp->b_object);
423 if (j != tbp->b_npages) {
424 bqrelse(tbp);
425 break;
426 }
427
428 /*
429 * Set a read-ahead mark as appropriate
430 */
431 if ((fbp && (i == 1)) || (i == (run - 1)))
432 tbp->b_flags |= B_RAM;
433
434 /*
435 * Set the buffer up for an async read (XXX should
436 * we do this only if we do not wind up brelse()ing?).
437 * Set the block number if it isn't set, otherwise
438 * if it is make sure it matches the block number we
439 * expect.
440 */
441 tbp->b_flags |= B_ASYNC;
442 tbp->b_iocmd = BIO_READ;
443 if (tbp->b_blkno == tbp->b_lblkno) {
444 tbp->b_blkno = bn;
445 } else if (tbp->b_blkno != bn) {
446 brelse(tbp);
447 break;
448 }
449 }
450 /*
451 * XXX fbp from caller may not be B_ASYNC, but we are going
452 * to biodone() it in cluster_callback() anyway
453 */
454 BUF_KERNPROC(tbp);
455 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
456 tbp, b_cluster.cluster_entry);
457 VM_OBJECT_LOCK(tbp->b_object);
458 vm_page_lock_queues();
459 for (j = 0; j < tbp->b_npages; j += 1) {
460 vm_page_t m;
461 m = tbp->b_pages[j];
462 vm_page_io_start(m);
463 vm_object_pip_add(m->object, 1);
464 if ((bp->b_npages == 0) ||
465 (bp->b_pages[bp->b_npages-1] != m)) {
466 bp->b_pages[bp->b_npages] = m;
467 bp->b_npages++;
468 }
469 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
470 tbp->b_pages[j] = bogus_page;
471 }
472 vm_page_unlock_queues();
473 VM_OBJECT_UNLOCK(tbp->b_object);
474 /*
475 * XXX shouldn't this be += size for both, like in
476 * cluster_wbuild()?
477 *
478 * Don't inherit tbp->b_bufsize as it may be larger due to
479 * a non-page-aligned size. Instead just aggregate using
480 * 'size'.
481 */
482 if (tbp->b_bcount != size)
483 printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
484 if (tbp->b_bufsize != size)
485 printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
486 bp->b_bcount += size;
487 bp->b_bufsize += size;
488 }
489
490 /*
491 * Fully valid pages in the cluster are already good and do not need
492 * to be re-read from disk. Replace the page with bogus_page
493 */
494 VM_OBJECT_LOCK(bp->b_object);
495 for (j = 0; j < bp->b_npages; j++) {
496 VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, MA_OWNED);
497 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) ==
498 VM_PAGE_BITS_ALL) {
499 bp->b_pages[j] = bogus_page;
500 }
501 }
502 VM_OBJECT_UNLOCK(bp->b_object);
503 if (bp->b_bufsize > bp->b_kvasize)
504 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
505 bp->b_bufsize, bp->b_kvasize);
506 bp->b_kvasize = bp->b_bufsize;
507
508 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
509 (vm_page_t *)bp->b_pages, bp->b_npages);
510 return (bp);
511 }
512
513 /*
514 * Cleanup after a clustered read or write.
515 * This is complicated by the fact that any of the buffers might have
516 * extra memory (if there were no empty buffer headers at allocbuf time)
517 * that we will need to shift around.
518 */
519 void
520 cluster_callback(bp)
521 struct buf *bp;
522 {
523 struct buf *nbp, *tbp;
524 int error = 0;
525
526 GIANT_REQUIRED;
527
528 /*
529 * Must propogate errors to all the components.
530 */
531 if (bp->b_ioflags & BIO_ERROR)
532 error = bp->b_error;
533
534 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
535 /*
536 * Move memory from the large cluster buffer into the component
537 * buffers and mark IO as done on these.
538 */
539 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
540 tbp; tbp = nbp) {
541 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
542 if (error) {
543 tbp->b_ioflags |= BIO_ERROR;
544 tbp->b_error = error;
545 } else {
546 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
547 tbp->b_flags &= ~B_INVAL;
548 tbp->b_ioflags &= ~BIO_ERROR;
549 /*
550 * XXX the bdwrite()/bqrelse() issued during
551 * cluster building clears B_RELBUF (see bqrelse()
552 * comment). If direct I/O was specified, we have
553 * to restore it here to allow the buffer and VM
554 * to be freed.
555 */
556 if (tbp->b_flags & B_DIRECT)
557 tbp->b_flags |= B_RELBUF;
558 }
559 bufdone(tbp);
560 }
561 relpbuf(bp, &cluster_pbuf_freecnt);
562 }
563
564 /*
565 * cluster_wbuild_wb:
566 *
567 * Implement modified write build for cluster.
568 *
569 * write_behind = 0 write behind disabled
570 * write_behind = 1 write behind normal (default)
571 * write_behind = 2 write behind backed-off
572 */
573
574 static __inline int
575 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len)
576 {
577 int r = 0;
578
579 switch(write_behind) {
580 case 2:
581 if (start_lbn < len)
582 break;
583 start_lbn -= len;
584 /* FALLTHROUGH */
585 case 1:
586 r = cluster_wbuild(vp, size, start_lbn, len);
587 /* FALLTHROUGH */
588 default:
589 /* FALLTHROUGH */
590 break;
591 }
592 return(r);
593 }
594
595 /*
596 * Do clustered write for FFS.
597 *
598 * Three cases:
599 * 1. Write is not sequential (write asynchronously)
600 * Write is sequential:
601 * 2. beginning of cluster - begin cluster
602 * 3. middle of a cluster - add to cluster
603 * 4. end of a cluster - asynchronously write cluster
604 */
605 void
606 cluster_write(bp, filesize, seqcount)
607 struct buf *bp;
608 u_quad_t filesize;
609 int seqcount;
610 {
611 struct vnode *vp;
612 daddr_t lbn;
613 int maxclen, cursize;
614 int lblocksize;
615 int async;
616
617 vp = bp->b_vp;
618 if (vp->v_type == VREG) {
619 async = vp->v_mount->mnt_flag & MNT_ASYNC;
620 lblocksize = vp->v_mount->mnt_stat.f_iosize;
621 } else {
622 async = 0;
623 lblocksize = bp->b_bufsize;
624 }
625 lbn = bp->b_lblkno;
626 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset"));
627
628 /* Initialize vnode to beginning of file. */
629 if (lbn == 0)
630 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
631
632 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
633 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
634 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1;
635 if (vp->v_clen != 0) {
636 /*
637 * Next block is not sequential.
638 *
639 * If we are not writing at end of file, the process
640 * seeked to another point in the file since its last
641 * write, or we have reached our maximum cluster size,
642 * then push the previous cluster. Otherwise try
643 * reallocating to make it sequential.
644 *
645 * Change to algorithm: only push previous cluster if
646 * it was sequential from the point of view of the
647 * seqcount heuristic, otherwise leave the buffer
648 * intact so we can potentially optimize the I/O
649 * later on in the buf_daemon or update daemon
650 * flush.
651 */
652 cursize = vp->v_lastw - vp->v_cstart + 1;
653 if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
654 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
655 if (!async && seqcount > 0) {
656 cluster_wbuild_wb(vp, lblocksize,
657 vp->v_cstart, cursize);
658 }
659 } else {
660 struct buf **bpp, **endbp;
661 struct cluster_save *buflist;
662
663 buflist = cluster_collectbufs(vp, bp);
664 endbp = &buflist->bs_children
665 [buflist->bs_nchildren - 1];
666 if (VOP_REALLOCBLKS(vp, buflist)) {
667 /*
668 * Failed, push the previous cluster
669 * if *really* writing sequentially
670 * in the logical file (seqcount > 1),
671 * otherwise delay it in the hopes that
672 * the low level disk driver can
673 * optimize the write ordering.
674 */
675 for (bpp = buflist->bs_children;
676 bpp < endbp; bpp++)
677 brelse(*bpp);
678 free(buflist, M_SEGMENT);
679 if (seqcount > 1) {
680 cluster_wbuild_wb(vp,
681 lblocksize, vp->v_cstart,
682 cursize);
683 }
684 } else {
685 /*
686 * Succeeded, keep building cluster.
687 */
688 for (bpp = buflist->bs_children;
689 bpp <= endbp; bpp++)
690 bdwrite(*bpp);
691 free(buflist, M_SEGMENT);
692 vp->v_lastw = lbn;
693 vp->v_lasta = bp->b_blkno;
694 return;
695 }
696 }
697 }
698 /*
699 * Consider beginning a cluster. If at end of file, make
700 * cluster as large as possible, otherwise find size of
701 * existing cluster.
702 */
703 if ((vp->v_type == VREG) &&
704 ((u_quad_t) bp->b_offset + lblocksize) != filesize &&
705 (bp->b_blkno == bp->b_lblkno) &&
706 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
707 bp->b_blkno == -1)) {
708 bawrite(bp);
709 vp->v_clen = 0;
710 vp->v_lasta = bp->b_blkno;
711 vp->v_cstart = lbn + 1;
712 vp->v_lastw = lbn;
713 return;
714 }
715 vp->v_clen = maxclen;
716 if (!async && maxclen == 0) { /* I/O not contiguous */
717 vp->v_cstart = lbn + 1;
718 bawrite(bp);
719 } else { /* Wait for rest of cluster */
720 vp->v_cstart = lbn;
721 bdwrite(bp);
722 }
723 } else if (lbn == vp->v_cstart + vp->v_clen) {
724 /*
725 * At end of cluster, write it out if seqcount tells us we
726 * are operating sequentially, otherwise let the buf or
727 * update daemon handle it.
728 */
729 bdwrite(bp);
730 if (seqcount > 1)
731 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1);
732 vp->v_clen = 0;
733 vp->v_cstart = lbn + 1;
734 } else if (vm_page_count_severe()) {
735 /*
736 * We are low on memory, get it going NOW
737 */
738 bawrite(bp);
739 } else {
740 /*
741 * In the middle of a cluster, so just delay the I/O for now.
742 */
743 bdwrite(bp);
744 }
745 vp->v_lastw = lbn;
746 vp->v_lasta = bp->b_blkno;
747 }
748
749
750 /*
751 * This is an awful lot like cluster_rbuild...wish they could be combined.
752 * The last lbn argument is the current block on which I/O is being
753 * performed. Check to see that it doesn't fall in the middle of
754 * the current block (if last_bp == NULL).
755 */
756 int
757 cluster_wbuild(vp, size, start_lbn, len)
758 struct vnode *vp;
759 long size;
760 daddr_t start_lbn;
761 int len;
762 {
763 struct buf *bp, *tbp;
764 int i, j, s;
765 int totalwritten = 0;
766 int dbsize = btodb(size);
767
768 GIANT_REQUIRED;
769
770 while (len > 0) {
771 s = splbio();
772 /*
773 * If the buffer is not delayed-write (i.e. dirty), or it
774 * is delayed-write but either locked or inval, it cannot
775 * partake in the clustered write.
776 */
777 VI_LOCK(vp);
778 if ((tbp = gbincore(vp, start_lbn)) == NULL ||
779 (tbp->b_vflags & BV_BKGRDINPROG)) {
780 VI_UNLOCK(vp);
781 ++start_lbn;
782 --len;
783 splx(s);
784 continue;
785 }
786 if (BUF_LOCK(tbp,
787 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, VI_MTX(vp))) {
788 ++start_lbn;
789 --len;
790 splx(s);
791 continue;
792 }
793 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) {
794 BUF_UNLOCK(tbp);
795 ++start_lbn;
796 --len;
797 splx(s);
798 continue;
799 }
800 bremfree(tbp);
801 tbp->b_flags &= ~B_DONE;
802 splx(s);
803
804 /*
805 * Extra memory in the buffer, punt on this buffer.
806 * XXX we could handle this in most cases, but we would
807 * have to push the extra memory down to after our max
808 * possible cluster size and then potentially pull it back
809 * up if the cluster was terminated prematurely--too much
810 * hassle.
811 */
812 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) !=
813 (B_CLUSTEROK | B_VMIO)) ||
814 (tbp->b_bcount != tbp->b_bufsize) ||
815 (tbp->b_bcount != size) ||
816 (len == 1) ||
817 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
818 totalwritten += tbp->b_bufsize;
819 bawrite(tbp);
820 ++start_lbn;
821 --len;
822 continue;
823 }
824
825 /*
826 * We got a pbuf to make the cluster in.
827 * so initialise it.
828 */
829 TAILQ_INIT(&bp->b_cluster.cluster_head);
830 bp->b_bcount = 0;
831 bp->b_magic = tbp->b_magic;
832 bp->b_op = tbp->b_op;
833 bp->b_bufsize = 0;
834 bp->b_npages = 0;
835 if (tbp->b_wcred != NOCRED)
836 bp->b_wcred = crhold(tbp->b_wcred);
837
838 bp->b_blkno = tbp->b_blkno;
839 bp->b_lblkno = tbp->b_lblkno;
840 bp->b_offset = tbp->b_offset;
841
842 /*
843 * We are synthesizing a buffer out of vm_page_t's, but
844 * if the block size is not page aligned then the starting
845 * address may not be either. Inherit the b_data offset
846 * from the original buffer.
847 */
848 bp->b_data = (char *)((vm_offset_t)bp->b_data |
849 ((vm_offset_t)tbp->b_data & PAGE_MASK));
850 bp->b_flags |= B_CLUSTER |
851 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
852 bp->b_iodone = cluster_callback;
853 pbgetvp(vp, bp);
854 /*
855 * From this location in the file, scan forward to see
856 * if there are buffers with adjacent data that need to
857 * be written as well.
858 */
859 for (i = 0; i < len; ++i, ++start_lbn) {
860 if (i != 0) { /* If not the first buffer */
861 s = splbio();
862 /*
863 * If the adjacent data is not even in core it
864 * can't need to be written.
865 */
866 VI_LOCK(vp);
867 if ((tbp = gbincore(vp, start_lbn)) == NULL ||
868 (tbp->b_vflags & BV_BKGRDINPROG)) {
869 VI_UNLOCK(vp);
870 splx(s);
871 break;
872 }
873
874 /*
875 * If it IS in core, but has different
876 * characteristics, or is locked (which
877 * means it could be undergoing a background
878 * I/O or be in a weird state), then don't
879 * cluster with it.
880 */
881 if (BUF_LOCK(tbp,
882 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
883 VI_MTX(vp))) {
884 splx(s);
885 break;
886 }
887
888 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
889 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
890 != (B_DELWRI | B_CLUSTEROK |
891 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
892 tbp->b_wcred != bp->b_wcred) {
893 BUF_UNLOCK(tbp);
894 splx(s);
895 break;
896 }
897
898 /*
899 * Check that the combined cluster
900 * would make sense with regard to pages
901 * and would not be too large
902 */
903 if ((tbp->b_bcount != size) ||
904 ((bp->b_blkno + (dbsize * i)) !=
905 tbp->b_blkno) ||
906 ((tbp->b_npages + bp->b_npages) >
907 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
908 BUF_UNLOCK(tbp);
909 splx(s);
910 break;
911 }
912 /*
913 * Ok, it's passed all the tests,
914 * so remove it from the free list
915 * and mark it busy. We will use it.
916 */
917 bremfree(tbp);
918 tbp->b_flags &= ~B_DONE;
919 splx(s);
920 } /* end of code for non-first buffers only */
921 /* check for latent dependencies to be handled */
922 if ((LIST_FIRST(&tbp->b_dep)) != NULL) {
923 tbp->b_iocmd = BIO_WRITE;
924 buf_start(tbp);
925 }
926 /*
927 * If the IO is via the VM then we do some
928 * special VM hackery (yuck). Since the buffer's
929 * block size may not be page-aligned it is possible
930 * for a page to be shared between two buffers. We
931 * have to get rid of the duplication when building
932 * the cluster.
933 */
934 if (tbp->b_flags & B_VMIO) {
935 vm_page_t m;
936
937 if (i != 0) { /* if not first buffer */
938 for (j = 0; j < tbp->b_npages; j += 1) {
939 m = tbp->b_pages[j];
940 if (m->flags & PG_BUSY) {
941 bqrelse(tbp);
942 goto finishcluster;
943 }
944 }
945 }
946 if (tbp->b_object != NULL)
947 VM_OBJECT_LOCK(tbp->b_object);
948 vm_page_lock_queues();
949 for (j = 0; j < tbp->b_npages; j += 1) {
950 m = tbp->b_pages[j];
951 vm_page_io_start(m);
952 vm_object_pip_add(m->object, 1);
953 if ((bp->b_npages == 0) ||
954 (bp->b_pages[bp->b_npages - 1] != m)) {
955 bp->b_pages[bp->b_npages] = m;
956 bp->b_npages++;
957 }
958 }
959 vm_page_unlock_queues();
960 if (tbp->b_object != NULL)
961 VM_OBJECT_UNLOCK(tbp->b_object);
962 }
963 bp->b_bcount += size;
964 bp->b_bufsize += size;
965
966 s = splbio();
967 bundirty(tbp);
968 tbp->b_flags &= ~B_DONE;
969 tbp->b_ioflags &= ~BIO_ERROR;
970 tbp->b_flags |= B_ASYNC;
971 tbp->b_iocmd = BIO_WRITE;
972 reassignbuf(tbp); /* put on clean list */
973 VI_LOCK(tbp->b_vp);
974 ++tbp->b_vp->v_numoutput;
975 VI_UNLOCK(tbp->b_vp);
976 splx(s);
977 BUF_KERNPROC(tbp);
978 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
979 tbp, b_cluster.cluster_entry);
980 }
981 finishcluster:
982 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
983 (vm_page_t *) bp->b_pages, bp->b_npages);
984 if (bp->b_bufsize > bp->b_kvasize)
985 panic(
986 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
987 bp->b_bufsize, bp->b_kvasize);
988 bp->b_kvasize = bp->b_bufsize;
989 totalwritten += bp->b_bufsize;
990 bp->b_dirtyoff = 0;
991 bp->b_dirtyend = bp->b_bufsize;
992 bawrite(bp);
993
994 len -= i;
995 }
996 return totalwritten;
997 }
998
999 /*
1000 * Collect together all the buffers in a cluster.
1001 * Plus add one additional buffer.
1002 */
1003 static struct cluster_save *
1004 cluster_collectbufs(vp, last_bp)
1005 struct vnode *vp;
1006 struct buf *last_bp;
1007 {
1008 struct cluster_save *buflist;
1009 struct buf *bp;
1010 daddr_t lbn;
1011 int i, len;
1012
1013 len = vp->v_lastw - vp->v_cstart + 1;
1014 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1015 M_SEGMENT, M_WAITOK);
1016 buflist->bs_nchildren = 0;
1017 buflist->bs_children = (struct buf **) (buflist + 1);
1018 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) {
1019 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp);
1020 buflist->bs_children[i] = bp;
1021 if (bp->b_blkno == bp->b_lblkno)
1022 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno,
1023 NULL, NULL);
1024 }
1025 buflist->bs_children[i] = bp = last_bp;
1026 if (bp->b_blkno == bp->b_lblkno)
1027 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno,
1028 NULL, NULL);
1029 buflist->bs_nchildren = i + 1;
1030 return (buflist);
1031 }
Cache object: 3cc3ceb938ec591b3b00b88e80d3657b
|