1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1993
5 * The Regents of the University of California. All rights reserved.
6 * Modifications/enhancements:
7 * Copyright (c) 1995 John S. Dyson. All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "opt_debug_cluster.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/proc.h>
45 #include <sys/bio.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/racct.h>
51 #include <sys/resourcevar.h>
52 #include <sys/rwlock.h>
53 #include <sys/vmmeter.h>
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 #include <sys/sysctl.h>
58
59 #if defined(CLUSTERDEBUG)
60 static int rcluster= 0;
61 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0,
62 "Debug VFS clustering code");
63 #endif
64
65 static MALLOC_DEFINE(M_SEGMENT, "cl_savebuf", "cluster_save buffer");
66 static uma_zone_t cluster_pbuf_zone;
67
68 static void cluster_init(void *);
69 static struct cluster_save *cluster_collectbufs(struct vnode *vp,
70 struct buf *last_bp, int gbflags);
71 static struct buf *cluster_rbuild(struct vnode *vp, u_quad_t filesize,
72 daddr_t lbn, daddr_t blkno, long size, int run, int gbflags,
73 struct buf *fbp);
74 static void cluster_callback(struct buf *);
75
76 static int write_behind = 1;
77 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
78 "Cluster write-behind; 0: disable, 1: enable, 2: backed off");
79
80 static int read_max = 64;
81 SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0,
82 "Cluster read-ahead max block count");
83
84 static int read_min = 1;
85 SYSCTL_INT(_vfs, OID_AUTO, read_min, CTLFLAG_RW, &read_min, 0,
86 "Cluster read min block count");
87
88 SYSINIT(cluster, SI_SUB_CPU, SI_ORDER_ANY, cluster_init, NULL);
89
90 static void
91 cluster_init(void *dummy)
92 {
93
94 cluster_pbuf_zone = pbuf_zsecond_create("clpbuf", nswbuf / 2);
95 }
96
97 /*
98 * Read data to a buf, including read-ahead if we find this to be beneficial.
99 * cluster_read replaces bread.
100 */
101 int
102 cluster_read(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size,
103 struct ucred *cred, long totread, int seqcount, int gbflags,
104 struct buf **bpp)
105 {
106 struct buf *bp, *rbp, *reqbp;
107 struct bufobj *bo;
108 struct thread *td;
109 daddr_t blkno, origblkno;
110 int maxra, racluster;
111 int error, ncontig;
112 int i;
113
114 error = 0;
115 td = curthread;
116 bo = &vp->v_bufobj;
117 if (!unmapped_buf_allowed)
118 gbflags &= ~GB_UNMAPPED;
119
120 /*
121 * Try to limit the amount of read-ahead by a few
122 * ad-hoc parameters. This needs work!!!
123 */
124 racluster = vp->v_mount->mnt_iosize_max / size;
125 maxra = seqcount;
126 maxra = min(read_max, maxra);
127 maxra = min(nbuf/8, maxra);
128 if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize)
129 maxra = (filesize / size) - lblkno;
130
131 /*
132 * get the requested block
133 */
134 error = getblkx(vp, lblkno, lblkno, size, 0, 0, gbflags, &bp);
135 if (error != 0) {
136 *bpp = NULL;
137 return (error);
138 }
139 gbflags &= ~GB_NOSPARSE;
140 origblkno = lblkno;
141 *bpp = reqbp = bp;
142
143 /*
144 * if it is in the cache, then check to see if the reads have been
145 * sequential. If they have, then try some read-ahead, otherwise
146 * back-off on prospective read-aheads.
147 */
148 if (bp->b_flags & B_CACHE) {
149 if (!seqcount) {
150 return 0;
151 } else if ((bp->b_flags & B_RAM) == 0) {
152 return 0;
153 } else {
154 bp->b_flags &= ~B_RAM;
155 BO_RLOCK(bo);
156 for (i = 1; i < maxra; i++) {
157 /*
158 * Stop if the buffer does not exist or it
159 * is invalid (about to go away?)
160 */
161 rbp = gbincore(&vp->v_bufobj, lblkno+i);
162 if (rbp == NULL || (rbp->b_flags & B_INVAL))
163 break;
164
165 /*
166 * Set another read-ahead mark so we know
167 * to check again. (If we can lock the
168 * buffer without waiting)
169 */
170 if ((((i % racluster) == (racluster - 1)) ||
171 (i == (maxra - 1)))
172 && (0 == BUF_LOCK(rbp,
173 LK_EXCLUSIVE | LK_NOWAIT, NULL))) {
174 rbp->b_flags |= B_RAM;
175 BUF_UNLOCK(rbp);
176 }
177 }
178 BO_RUNLOCK(bo);
179 if (i >= maxra) {
180 return 0;
181 }
182 lblkno += i;
183 }
184 reqbp = bp = NULL;
185 /*
186 * If it isn't in the cache, then get a chunk from
187 * disk if sequential, otherwise just get the block.
188 */
189 } else {
190 off_t firstread = bp->b_offset;
191 int nblks;
192 long minread;
193
194 KASSERT(bp->b_offset != NOOFFSET,
195 ("cluster_read: no buffer offset"));
196
197 ncontig = 0;
198
199 /*
200 * Adjust totread if needed
201 */
202 minread = read_min * size;
203 if (minread > totread)
204 totread = minread;
205
206 /*
207 * Compute the total number of blocks that we should read
208 * synchronously.
209 */
210 if (firstread + totread > filesize)
211 totread = filesize - firstread;
212 nblks = howmany(totread, size);
213 if (nblks > racluster)
214 nblks = racluster;
215
216 /*
217 * Now compute the number of contiguous blocks.
218 */
219 if (nblks > 1) {
220 error = VOP_BMAP(vp, lblkno, NULL,
221 &blkno, &ncontig, NULL);
222 /*
223 * If this failed to map just do the original block.
224 */
225 if (error || blkno == -1)
226 ncontig = 0;
227 }
228
229 /*
230 * If we have contiguous data available do a cluster
231 * otherwise just read the requested block.
232 */
233 if (ncontig) {
234 /* Account for our first block. */
235 ncontig = min(ncontig + 1, nblks);
236 if (ncontig < nblks)
237 nblks = ncontig;
238 bp = cluster_rbuild(vp, filesize, lblkno,
239 blkno, size, nblks, gbflags, bp);
240 lblkno += (bp->b_bufsize / size);
241 } else {
242 bp->b_flags |= B_RAM;
243 bp->b_iocmd = BIO_READ;
244 lblkno += 1;
245 }
246 }
247
248 /*
249 * handle the synchronous read so that it is available ASAP.
250 */
251 if (bp) {
252 if ((bp->b_flags & B_CLUSTER) == 0) {
253 vfs_busy_pages(bp, 0);
254 }
255 bp->b_flags &= ~B_INVAL;
256 bp->b_ioflags &= ~BIO_ERROR;
257 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
258 BUF_KERNPROC(bp);
259 bp->b_iooffset = dbtob(bp->b_blkno);
260 bstrategy(bp);
261 #ifdef RACCT
262 if (racct_enable) {
263 PROC_LOCK(td->td_proc);
264 racct_add_buf(td->td_proc, bp, 0);
265 PROC_UNLOCK(td->td_proc);
266 }
267 #endif /* RACCT */
268 td->td_ru.ru_inblock++;
269 }
270
271 /*
272 * If we have been doing sequential I/O, then do some read-ahead.
273 */
274 while (lblkno < (origblkno + maxra)) {
275 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL);
276 if (error)
277 break;
278
279 if (blkno == -1)
280 break;
281
282 /*
283 * We could throttle ncontig here by maxra but we might as
284 * well read the data if it is contiguous. We're throttled
285 * by racluster anyway.
286 */
287 if (ncontig) {
288 ncontig = min(ncontig + 1, racluster);
289 rbp = cluster_rbuild(vp, filesize, lblkno, blkno,
290 size, ncontig, gbflags, NULL);
291 lblkno += (rbp->b_bufsize / size);
292 if (rbp->b_flags & B_DELWRI) {
293 bqrelse(rbp);
294 continue;
295 }
296 } else {
297 rbp = getblk(vp, lblkno, size, 0, 0, gbflags);
298 lblkno += 1;
299 if (rbp->b_flags & B_DELWRI) {
300 bqrelse(rbp);
301 continue;
302 }
303 rbp->b_flags |= B_ASYNC | B_RAM;
304 rbp->b_iocmd = BIO_READ;
305 rbp->b_blkno = blkno;
306 }
307 if (rbp->b_flags & B_CACHE) {
308 rbp->b_flags &= ~B_ASYNC;
309 bqrelse(rbp);
310 continue;
311 }
312 if ((rbp->b_flags & B_CLUSTER) == 0) {
313 vfs_busy_pages(rbp, 0);
314 }
315 rbp->b_flags &= ~B_INVAL;
316 rbp->b_ioflags &= ~BIO_ERROR;
317 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
318 BUF_KERNPROC(rbp);
319 rbp->b_iooffset = dbtob(rbp->b_blkno);
320 bstrategy(rbp);
321 #ifdef RACCT
322 if (racct_enable) {
323 PROC_LOCK(td->td_proc);
324 racct_add_buf(td->td_proc, rbp, 0);
325 PROC_UNLOCK(td->td_proc);
326 }
327 #endif /* RACCT */
328 td->td_ru.ru_inblock++;
329 }
330
331 if (reqbp) {
332 /*
333 * Like bread, always brelse() the buffer when
334 * returning an error.
335 */
336 error = bufwait(reqbp);
337 if (error != 0) {
338 brelse(reqbp);
339 *bpp = NULL;
340 }
341 }
342 return (error);
343 }
344
345 /*
346 * If blocks are contiguous on disk, use this to provide clustered
347 * read ahead. We will read as many blocks as possible sequentially
348 * and then parcel them up into logical blocks in the buffer hash table.
349 */
350 static struct buf *
351 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
352 daddr_t blkno, long size, int run, int gbflags, struct buf *fbp)
353 {
354 struct buf *bp, *tbp;
355 daddr_t bn;
356 off_t off;
357 long tinc, tsize;
358 int i, inc, j, k, toff;
359
360 KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
361 ("cluster_rbuild: size %ld != f_iosize %jd\n",
362 size, (intmax_t)vp->v_mount->mnt_stat.f_iosize));
363
364 /*
365 * avoid a division
366 */
367 while ((u_quad_t) size * (lbn + run) > filesize) {
368 --run;
369 }
370
371 if (fbp) {
372 tbp = fbp;
373 tbp->b_iocmd = BIO_READ;
374 } else {
375 tbp = getblk(vp, lbn, size, 0, 0, gbflags);
376 if (tbp->b_flags & B_CACHE)
377 return tbp;
378 tbp->b_flags |= B_ASYNC | B_RAM;
379 tbp->b_iocmd = BIO_READ;
380 }
381 tbp->b_blkno = blkno;
382 if( (tbp->b_flags & B_MALLOC) ||
383 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
384 return tbp;
385
386 bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT);
387 if (bp == NULL)
388 return tbp;
389 MPASS((bp->b_flags & B_MAXPHYS) != 0);
390
391 /*
392 * We are synthesizing a buffer out of vm_page_t's, but
393 * if the block size is not page aligned then the starting
394 * address may not be either. Inherit the b_data offset
395 * from the original buffer.
396 */
397 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
398 if ((gbflags & GB_UNMAPPED) != 0) {
399 bp->b_data = unmapped_buf;
400 } else {
401 bp->b_data = (char *)((vm_offset_t)bp->b_data |
402 ((vm_offset_t)tbp->b_data & PAGE_MASK));
403 }
404 bp->b_iocmd = BIO_READ;
405 bp->b_iodone = cluster_callback;
406 bp->b_blkno = blkno;
407 bp->b_lblkno = lbn;
408 bp->b_offset = tbp->b_offset;
409 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset"));
410 pbgetvp(vp, bp);
411
412 TAILQ_INIT(&bp->b_cluster.cluster_head);
413
414 bp->b_bcount = 0;
415 bp->b_bufsize = 0;
416 bp->b_npages = 0;
417
418 inc = btodb(size);
419 for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
420 if (i == 0) {
421 vm_object_pip_add(tbp->b_bufobj->bo_object,
422 tbp->b_npages);
423 vfs_busy_pages_acquire(tbp);
424 } else {
425 if ((bp->b_npages * PAGE_SIZE) +
426 round_page(size) > vp->v_mount->mnt_iosize_max) {
427 break;
428 }
429
430 tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT |
431 (gbflags & GB_UNMAPPED));
432
433 /* Don't wait around for locked bufs. */
434 if (tbp == NULL)
435 break;
436
437 /*
438 * Stop scanning if the buffer is fully valid
439 * (marked B_CACHE), or locked (may be doing a
440 * background write), or if the buffer is not
441 * VMIO backed. The clustering code can only deal
442 * with VMIO-backed buffers. The bo lock is not
443 * required for the BKGRDINPROG check since it
444 * can not be set without the buf lock.
445 */
446 if ((tbp->b_vflags & BV_BKGRDINPROG) ||
447 (tbp->b_flags & B_CACHE) ||
448 (tbp->b_flags & B_VMIO) == 0) {
449 bqrelse(tbp);
450 break;
451 }
452
453 /*
454 * The buffer must be completely invalid in order to
455 * take part in the cluster. If it is partially valid
456 * then we stop.
457 */
458 off = tbp->b_offset;
459 tsize = size;
460 for (j = 0; tsize > 0; j++) {
461 toff = off & PAGE_MASK;
462 tinc = tsize;
463 if (toff + tinc > PAGE_SIZE)
464 tinc = PAGE_SIZE - toff;
465 if (vm_page_trysbusy(tbp->b_pages[j]) == 0)
466 break;
467 if ((tbp->b_pages[j]->valid &
468 vm_page_bits(toff, tinc)) != 0) {
469 vm_page_sunbusy(tbp->b_pages[j]);
470 break;
471 }
472 vm_object_pip_add(tbp->b_bufobj->bo_object, 1);
473 off += tinc;
474 tsize -= tinc;
475 }
476 if (tsize > 0) {
477 clean_sbusy:
478 vm_object_pip_wakeupn(tbp->b_bufobj->bo_object,
479 j);
480 for (k = 0; k < j; k++)
481 vm_page_sunbusy(tbp->b_pages[k]);
482 bqrelse(tbp);
483 break;
484 }
485
486 /*
487 * Set a read-ahead mark as appropriate
488 */
489 if ((fbp && (i == 1)) || (i == (run - 1)))
490 tbp->b_flags |= B_RAM;
491
492 /*
493 * Set the buffer up for an async read (XXX should
494 * we do this only if we do not wind up brelse()ing?).
495 * Set the block number if it isn't set, otherwise
496 * if it is make sure it matches the block number we
497 * expect.
498 */
499 tbp->b_flags |= B_ASYNC;
500 tbp->b_iocmd = BIO_READ;
501 if (tbp->b_blkno == tbp->b_lblkno) {
502 tbp->b_blkno = bn;
503 } else if (tbp->b_blkno != bn) {
504 goto clean_sbusy;
505 }
506 }
507 /*
508 * XXX fbp from caller may not be B_ASYNC, but we are going
509 * to biodone() it in cluster_callback() anyway
510 */
511 BUF_KERNPROC(tbp);
512 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
513 tbp, b_cluster.cluster_entry);
514 for (j = 0; j < tbp->b_npages; j += 1) {
515 vm_page_t m;
516
517 m = tbp->b_pages[j];
518 if ((bp->b_npages == 0) ||
519 (bp->b_pages[bp->b_npages-1] != m)) {
520 bp->b_pages[bp->b_npages] = m;
521 bp->b_npages++;
522 }
523 if (vm_page_all_valid(m))
524 tbp->b_pages[j] = bogus_page;
525 }
526
527 /*
528 * Don't inherit tbp->b_bufsize as it may be larger due to
529 * a non-page-aligned size. Instead just aggregate using
530 * 'size'.
531 */
532 if (tbp->b_bcount != size)
533 printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
534 if (tbp->b_bufsize != size)
535 printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
536 bp->b_bcount += size;
537 bp->b_bufsize += size;
538 }
539
540 /*
541 * Fully valid pages in the cluster are already good and do not need
542 * to be re-read from disk. Replace the page with bogus_page
543 */
544 for (j = 0; j < bp->b_npages; j++) {
545 if (vm_page_all_valid(bp->b_pages[j]))
546 bp->b_pages[j] = bogus_page;
547 }
548 if (bp->b_bufsize > bp->b_kvasize)
549 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
550 bp->b_bufsize, bp->b_kvasize);
551
552 if (buf_mapped(bp)) {
553 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
554 (vm_page_t *)bp->b_pages, bp->b_npages);
555 }
556 return (bp);
557 }
558
559 /*
560 * Cleanup after a clustered read or write.
561 * This is complicated by the fact that any of the buffers might have
562 * extra memory (if there were no empty buffer headers at allocbuf time)
563 * that we will need to shift around.
564 */
565 static void
566 cluster_callback(struct buf *bp)
567 {
568 struct buf *nbp, *tbp;
569 int error = 0;
570
571 /*
572 * Must propagate errors to all the components.
573 */
574 if (bp->b_ioflags & BIO_ERROR)
575 error = bp->b_error;
576
577 if (buf_mapped(bp)) {
578 pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
579 bp->b_npages);
580 }
581 /*
582 * Move memory from the large cluster buffer into the component
583 * buffers and mark IO as done on these.
584 */
585 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
586 tbp; tbp = nbp) {
587 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
588 if (error) {
589 tbp->b_ioflags |= BIO_ERROR;
590 tbp->b_error = error;
591 } else {
592 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
593 tbp->b_flags &= ~B_INVAL;
594 tbp->b_ioflags &= ~BIO_ERROR;
595 /*
596 * XXX the bdwrite()/bqrelse() issued during
597 * cluster building clears B_RELBUF (see bqrelse()
598 * comment). If direct I/O was specified, we have
599 * to restore it here to allow the buffer and VM
600 * to be freed.
601 */
602 if (tbp->b_flags & B_DIRECT)
603 tbp->b_flags |= B_RELBUF;
604 }
605 bufdone(tbp);
606 }
607 pbrelvp(bp);
608 uma_zfree(cluster_pbuf_zone, bp);
609 }
610
611 /*
612 * cluster_wbuild_wb:
613 *
614 * Implement modified write build for cluster.
615 *
616 * write_behind = 0 write behind disabled
617 * write_behind = 1 write behind normal (default)
618 * write_behind = 2 write behind backed-off
619 */
620
621 static __inline int
622 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len,
623 int gbflags)
624 {
625 int r = 0;
626
627 switch (write_behind) {
628 case 2:
629 if (start_lbn < len)
630 break;
631 start_lbn -= len;
632 /* FALLTHROUGH */
633 case 1:
634 r = cluster_wbuild(vp, size, start_lbn, len, gbflags);
635 /* FALLTHROUGH */
636 default:
637 /* FALLTHROUGH */
638 break;
639 }
640 return(r);
641 }
642
643 /*
644 * Do clustered write for FFS.
645 *
646 * Three cases:
647 * 1. Write is not sequential (write asynchronously)
648 * Write is sequential:
649 * 2. beginning of cluster - begin cluster
650 * 3. middle of a cluster - add to cluster
651 * 4. end of a cluster - asynchronously write cluster
652 */
653 void
654 cluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount,
655 int gbflags)
656 {
657 daddr_t lbn;
658 int maxclen, cursize;
659 int lblocksize;
660 int async;
661
662 if (!unmapped_buf_allowed)
663 gbflags &= ~GB_UNMAPPED;
664
665 if (vp->v_type == VREG) {
666 async = DOINGASYNC(vp);
667 lblocksize = vp->v_mount->mnt_stat.f_iosize;
668 } else {
669 async = 0;
670 lblocksize = bp->b_bufsize;
671 }
672 lbn = bp->b_lblkno;
673 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset"));
674
675 /* Initialize vnode to beginning of file. */
676 if (lbn == 0)
677 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
678
679 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
680 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
681 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1;
682 if (vp->v_clen != 0) {
683 /*
684 * Next block is not sequential.
685 *
686 * If we are not writing at end of file, the process
687 * seeked to another point in the file since its last
688 * write, or we have reached our maximum cluster size,
689 * then push the previous cluster. Otherwise try
690 * reallocating to make it sequential.
691 *
692 * Change to algorithm: only push previous cluster if
693 * it was sequential from the point of view of the
694 * seqcount heuristic, otherwise leave the buffer
695 * intact so we can potentially optimize the I/O
696 * later on in the buf_daemon or update daemon
697 * flush.
698 */
699 cursize = vp->v_lastw - vp->v_cstart + 1;
700 if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
701 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
702 if (!async && seqcount > 0) {
703 cluster_wbuild_wb(vp, lblocksize,
704 vp->v_cstart, cursize, gbflags);
705 }
706 } else {
707 struct buf **bpp, **endbp;
708 struct cluster_save *buflist;
709
710 buflist = cluster_collectbufs(vp, bp, gbflags);
711 if (buflist == NULL) {
712 /*
713 * Cluster build failed so just write
714 * it now.
715 */
716 bawrite(bp);
717 return;
718 }
719 endbp = &buflist->bs_children
720 [buflist->bs_nchildren - 1];
721 if (VOP_REALLOCBLKS(vp, buflist)) {
722 /*
723 * Failed, push the previous cluster
724 * if *really* writing sequentially
725 * in the logical file (seqcount > 1),
726 * otherwise delay it in the hopes that
727 * the low level disk driver can
728 * optimize the write ordering.
729 */
730 for (bpp = buflist->bs_children;
731 bpp < endbp; bpp++)
732 brelse(*bpp);
733 free(buflist, M_SEGMENT);
734 if (seqcount > 1) {
735 cluster_wbuild_wb(vp,
736 lblocksize, vp->v_cstart,
737 cursize, gbflags);
738 }
739 } else {
740 /*
741 * Succeeded, keep building cluster.
742 */
743 for (bpp = buflist->bs_children;
744 bpp <= endbp; bpp++)
745 bdwrite(*bpp);
746 free(buflist, M_SEGMENT);
747 vp->v_lastw = lbn;
748 vp->v_lasta = bp->b_blkno;
749 return;
750 }
751 }
752 }
753 /*
754 * Consider beginning a cluster. If at end of file, make
755 * cluster as large as possible, otherwise find size of
756 * existing cluster.
757 */
758 if ((vp->v_type == VREG) &&
759 ((u_quad_t) bp->b_offset + lblocksize) != filesize &&
760 (bp->b_blkno == bp->b_lblkno) &&
761 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
762 bp->b_blkno == -1)) {
763 bawrite(bp);
764 vp->v_clen = 0;
765 vp->v_lasta = bp->b_blkno;
766 vp->v_cstart = lbn + 1;
767 vp->v_lastw = lbn;
768 return;
769 }
770 vp->v_clen = maxclen;
771 if (!async && maxclen == 0) { /* I/O not contiguous */
772 vp->v_cstart = lbn + 1;
773 bawrite(bp);
774 } else { /* Wait for rest of cluster */
775 vp->v_cstart = lbn;
776 bdwrite(bp);
777 }
778 } else if (lbn == vp->v_cstart + vp->v_clen) {
779 /*
780 * At end of cluster, write it out if seqcount tells us we
781 * are operating sequentially, otherwise let the buf or
782 * update daemon handle it.
783 */
784 bdwrite(bp);
785 if (seqcount > 1) {
786 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart,
787 vp->v_clen + 1, gbflags);
788 }
789 vp->v_clen = 0;
790 vp->v_cstart = lbn + 1;
791 } else if (vm_page_count_severe()) {
792 /*
793 * We are low on memory, get it going NOW
794 */
795 bawrite(bp);
796 } else {
797 /*
798 * In the middle of a cluster, so just delay the I/O for now.
799 */
800 bdwrite(bp);
801 }
802 vp->v_lastw = lbn;
803 vp->v_lasta = bp->b_blkno;
804 }
805
806 /*
807 * This is an awful lot like cluster_rbuild...wish they could be combined.
808 * The last lbn argument is the current block on which I/O is being
809 * performed. Check to see that it doesn't fall in the middle of
810 * the current block (if last_bp == NULL).
811 */
812 int
813 cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
814 int gbflags)
815 {
816 struct buf *bp, *tbp;
817 struct bufobj *bo;
818 int i, j;
819 int totalwritten = 0;
820 int dbsize = btodb(size);
821
822 if (!unmapped_buf_allowed)
823 gbflags &= ~GB_UNMAPPED;
824
825 bo = &vp->v_bufobj;
826 while (len > 0) {
827 /*
828 * If the buffer is not delayed-write (i.e. dirty), or it
829 * is delayed-write but either locked or inval, it cannot
830 * partake in the clustered write.
831 */
832 BO_LOCK(bo);
833 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
834 (tbp->b_vflags & BV_BKGRDINPROG)) {
835 BO_UNLOCK(bo);
836 ++start_lbn;
837 --len;
838 continue;
839 }
840 if (BUF_LOCK(tbp,
841 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, BO_LOCKPTR(bo))) {
842 ++start_lbn;
843 --len;
844 continue;
845 }
846 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) {
847 BUF_UNLOCK(tbp);
848 ++start_lbn;
849 --len;
850 continue;
851 }
852 bremfree(tbp);
853 tbp->b_flags &= ~B_DONE;
854
855 /*
856 * Extra memory in the buffer, punt on this buffer.
857 * XXX we could handle this in most cases, but we would
858 * have to push the extra memory down to after our max
859 * possible cluster size and then potentially pull it back
860 * up if the cluster was terminated prematurely--too much
861 * hassle.
862 */
863 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) !=
864 (B_CLUSTEROK | B_VMIO)) ||
865 (tbp->b_bcount != tbp->b_bufsize) ||
866 (tbp->b_bcount != size) ||
867 (len == 1) ||
868 ((bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT)) == NULL)) {
869 totalwritten += tbp->b_bufsize;
870 bawrite(tbp);
871 ++start_lbn;
872 --len;
873 continue;
874 }
875 MPASS((bp->b_flags & B_MAXPHYS) != 0);
876
877 /*
878 * We got a pbuf to make the cluster in.
879 * so initialise it.
880 */
881 TAILQ_INIT(&bp->b_cluster.cluster_head);
882 bp->b_bcount = 0;
883 bp->b_bufsize = 0;
884 bp->b_npages = 0;
885 if (tbp->b_wcred != NOCRED)
886 bp->b_wcred = crhold(tbp->b_wcred);
887
888 bp->b_blkno = tbp->b_blkno;
889 bp->b_lblkno = tbp->b_lblkno;
890 bp->b_offset = tbp->b_offset;
891
892 /*
893 * We are synthesizing a buffer out of vm_page_t's, but
894 * if the block size is not page aligned then the starting
895 * address may not be either. Inherit the b_data offset
896 * from the original buffer.
897 */
898 if ((gbflags & GB_UNMAPPED) == 0 ||
899 (tbp->b_flags & B_VMIO) == 0) {
900 bp->b_data = (char *)((vm_offset_t)bp->b_data |
901 ((vm_offset_t)tbp->b_data & PAGE_MASK));
902 } else {
903 bp->b_data = unmapped_buf;
904 }
905 bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO |
906 B_NEEDCOMMIT));
907 bp->b_iodone = cluster_callback;
908 pbgetvp(vp, bp);
909 /*
910 * From this location in the file, scan forward to see
911 * if there are buffers with adjacent data that need to
912 * be written as well.
913 */
914 for (i = 0; i < len; ++i, ++start_lbn) {
915 if (i != 0) { /* If not the first buffer */
916 /*
917 * If the adjacent data is not even in core it
918 * can't need to be written.
919 */
920 BO_LOCK(bo);
921 if ((tbp = gbincore(bo, start_lbn)) == NULL ||
922 (tbp->b_vflags & BV_BKGRDINPROG)) {
923 BO_UNLOCK(bo);
924 break;
925 }
926
927 /*
928 * If it IS in core, but has different
929 * characteristics, or is locked (which
930 * means it could be undergoing a background
931 * I/O or be in a weird state), then don't
932 * cluster with it.
933 */
934 if (BUF_LOCK(tbp,
935 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
936 BO_LOCKPTR(bo)))
937 break;
938
939 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
940 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
941 != (B_DELWRI | B_CLUSTEROK |
942 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
943 tbp->b_wcred != bp->b_wcred) {
944 BUF_UNLOCK(tbp);
945 break;
946 }
947
948 /*
949 * Check that the combined cluster
950 * would make sense with regard to pages
951 * and would not be too large
952 */
953 if ((tbp->b_bcount != size) ||
954 ((bp->b_blkno + (dbsize * i)) !=
955 tbp->b_blkno) ||
956 ((tbp->b_npages + bp->b_npages) >
957 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
958 BUF_UNLOCK(tbp);
959 break;
960 }
961
962 /*
963 * Ok, it's passed all the tests,
964 * so remove it from the free list
965 * and mark it busy. We will use it.
966 */
967 bremfree(tbp);
968 tbp->b_flags &= ~B_DONE;
969 } /* end of code for non-first buffers only */
970 /*
971 * If the IO is via the VM then we do some
972 * special VM hackery (yuck). Since the buffer's
973 * block size may not be page-aligned it is possible
974 * for a page to be shared between two buffers. We
975 * have to get rid of the duplication when building
976 * the cluster.
977 */
978 if (tbp->b_flags & B_VMIO) {
979 vm_page_t m;
980
981 if (i == 0) {
982 vfs_busy_pages_acquire(tbp);
983 } else { /* if not first buffer */
984 for (j = 0; j < tbp->b_npages; j += 1) {
985 m = tbp->b_pages[j];
986 if (vm_page_trysbusy(m) == 0) {
987 for (j--; j >= 0; j--)
988 vm_page_sunbusy(
989 tbp->b_pages[j]);
990 bqrelse(tbp);
991 goto finishcluster;
992 }
993 }
994 }
995 vm_object_pip_add(tbp->b_bufobj->bo_object,
996 tbp->b_npages);
997 for (j = 0; j < tbp->b_npages; j += 1) {
998 m = tbp->b_pages[j];
999 if ((bp->b_npages == 0) ||
1000 (bp->b_pages[bp->b_npages - 1] != m)) {
1001 bp->b_pages[bp->b_npages] = m;
1002 bp->b_npages++;
1003 }
1004 }
1005 }
1006 bp->b_bcount += size;
1007 bp->b_bufsize += size;
1008 /*
1009 * If any of the clustered buffers have their
1010 * B_BARRIER flag set, transfer that request to
1011 * the cluster.
1012 */
1013 bp->b_flags |= (tbp->b_flags & B_BARRIER);
1014 tbp->b_flags &= ~(B_DONE | B_BARRIER);
1015 tbp->b_flags |= B_ASYNC;
1016 tbp->b_ioflags &= ~BIO_ERROR;
1017 tbp->b_iocmd = BIO_WRITE;
1018 bundirty(tbp);
1019 reassignbuf(tbp); /* put on clean list */
1020 bufobj_wref(tbp->b_bufobj);
1021 BUF_KERNPROC(tbp);
1022 buf_track(tbp, __func__);
1023 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
1024 tbp, b_cluster.cluster_entry);
1025 }
1026 finishcluster:
1027 if (buf_mapped(bp)) {
1028 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1029 (vm_page_t *)bp->b_pages, bp->b_npages);
1030 }
1031 if (bp->b_bufsize > bp->b_kvasize)
1032 panic(
1033 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
1034 bp->b_bufsize, bp->b_kvasize);
1035 totalwritten += bp->b_bufsize;
1036 bp->b_dirtyoff = 0;
1037 bp->b_dirtyend = bp->b_bufsize;
1038 bawrite(bp);
1039
1040 len -= i;
1041 }
1042 return totalwritten;
1043 }
1044
1045 /*
1046 * Collect together all the buffers in a cluster.
1047 * Plus add one additional buffer.
1048 */
1049 static struct cluster_save *
1050 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int gbflags)
1051 {
1052 struct cluster_save *buflist;
1053 struct buf *bp;
1054 daddr_t lbn;
1055 int i, j, len, error;
1056
1057 len = vp->v_lastw - vp->v_cstart + 1;
1058 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1059 M_SEGMENT, M_WAITOK);
1060 buflist->bs_nchildren = 0;
1061 buflist->bs_children = (struct buf **) (buflist + 1);
1062 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) {
1063 error = bread_gb(vp, lbn, last_bp->b_bcount, NOCRED,
1064 gbflags, &bp);
1065 if (error != 0) {
1066 /*
1067 * If read fails, release collected buffers
1068 * and return failure.
1069 */
1070 for (j = 0; j < i; j++)
1071 brelse(buflist->bs_children[j]);
1072 free(buflist, M_SEGMENT);
1073 return (NULL);
1074 }
1075 buflist->bs_children[i] = bp;
1076 if (bp->b_blkno == bp->b_lblkno)
1077 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
1078 NULL, NULL);
1079 }
1080 buflist->bs_children[i] = bp = last_bp;
1081 if (bp->b_blkno == bp->b_lblkno)
1082 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1083 buflist->bs_nchildren = i + 1;
1084 return (buflist);
1085 }
Cache object: 1b21edda8fc120b85dbb46b263a76cbb
|