1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1993
5 * The Regents of the University of California. All rights reserved.
6 * Modifications/enhancements:
7 * Copyright (c) 1995 John S. Dyson. All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "opt_debug_cluster.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/proc.h>
45 #include <sys/bio.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/racct.h>
51 #include <sys/resourcevar.h>
52 #include <sys/rwlock.h>
53 #include <sys/vmmeter.h>
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 #include <sys/sysctl.h>
58
59 #if defined(CLUSTERDEBUG)
60 static int rcluster= 0;
61 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0,
62 "Debug VFS clustering code");
63 #endif
64
65 static MALLOC_DEFINE(M_SEGMENT, "cl_savebuf", "cluster_save buffer");
66
67 static struct cluster_save *cluster_collectbufs(struct vnode *vp,
68 struct buf *last_bp, int gbflags);
69 static struct buf *cluster_rbuild(struct vnode *vp, u_quad_t filesize,
70 daddr_t lbn, daddr_t blkno, long size, int run, int gbflags,
71 struct buf *fbp);
72 static void cluster_callback(struct buf *);
73
74 static int write_behind = 1;
75 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
76 "Cluster write-behind; 0: disable, 1: enable, 2: backed off");
77
78 static int read_max = 64;
79 SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0,
80 "Cluster read-ahead max block count");
81
82 static int read_min = 1;
83 SYSCTL_INT(_vfs, OID_AUTO, read_min, CTLFLAG_RW, &read_min, 0,
84 "Cluster read min block count");
85
86 /*
87 * Read data to a buf, including read-ahead if we find this to be beneficial.
88 * cluster_read replaces bread.
89 */
90 int
91 cluster_read(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size,
92 struct ucred *cred, long totread, int seqcount, int gbflags,
93 struct buf **bpp)
94 {
95 struct buf *bp, *rbp, *reqbp;
96 struct bufobj *bo;
97 struct thread *td;
98 daddr_t blkno, origblkno;
99 int maxra, racluster;
100 int error, ncontig;
101 int i;
102
103 error = 0;
104 td = curthread;
105 bo = &vp->v_bufobj;
106 if (!unmapped_buf_allowed)
107 gbflags &= ~GB_UNMAPPED;
108
109 /*
110 * Try to limit the amount of read-ahead by a few
111 * ad-hoc parameters. This needs work!!!
112 */
113 racluster = vp->v_mount->mnt_iosize_max / size;
114 maxra = seqcount;
115 maxra = min(read_max, maxra);
116 maxra = min(nbuf/8, maxra);
117 if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize)
118 maxra = (filesize / size) - lblkno;
119
120 /*
121 * get the requested block
122 */
123 error = getblkx(vp, lblkno, size, 0, 0, gbflags, &bp);
124 if (error != 0) {
125 *bpp = NULL;
126 return (error);
127 }
128 gbflags &= ~GB_NOSPARSE;
129 origblkno = lblkno;
130 *bpp = reqbp = bp;
131
132 /*
133 * if it is in the cache, then check to see if the reads have been
134 * sequential. If they have, then try some read-ahead, otherwise
135 * back-off on prospective read-aheads.
136 */
137 if (bp->b_flags & B_CACHE) {
138 if (!seqcount) {
139 return 0;
140 } else if ((bp->b_flags & B_RAM) == 0) {
141 return 0;
142 } else {
143 bp->b_flags &= ~B_RAM;
144 BO_RLOCK(bo);
145 for (i = 1; i < maxra; i++) {
146 /*
147 * Stop if the buffer does not exist or it
148 * is invalid (about to go away?)
149 */
150 rbp = gbincore(&vp->v_bufobj, lblkno+i);
151 if (rbp == NULL || (rbp->b_flags & B_INVAL))
152 break;
153
154 /*
155 * Set another read-ahead mark so we know
156 * to check again. (If we can lock the
157 * buffer without waiting)
158 */
159 if ((((i % racluster) == (racluster - 1)) ||
160 (i == (maxra - 1)))
161 && (0 == BUF_LOCK(rbp,
162 LK_EXCLUSIVE | LK_NOWAIT, NULL))) {
163 rbp->b_flags |= B_RAM;
164 BUF_UNLOCK(rbp);
165 }
166 }
167 BO_RUNLOCK(bo);
168 if (i >= maxra) {
169 return 0;
170 }
171 lblkno += i;
172 }
173 reqbp = bp = NULL;
174 /*
175 * If it isn't in the cache, then get a chunk from
176 * disk if sequential, otherwise just get the block.
177 */
178 } else {
179 off_t firstread = bp->b_offset;
180 int nblks;
181 long minread;
182
183 KASSERT(bp->b_offset != NOOFFSET,
184 ("cluster_read: no buffer offset"));
185
186 ncontig = 0;
187
188 /*
189 * Adjust totread if needed
190 */
191 minread = read_min * size;
192 if (minread > totread)
193 totread = minread;
194
195 /*
196 * Compute the total number of blocks that we should read
197 * synchronously.
198 */
199 if (firstread + totread > filesize)
200 totread = filesize - firstread;
201 nblks = howmany(totread, size);
202 if (nblks > racluster)
203 nblks = racluster;
204
205 /*
206 * Now compute the number of contiguous blocks.
207 */
208 if (nblks > 1) {
209 error = VOP_BMAP(vp, lblkno, NULL,
210 &blkno, &ncontig, NULL);
211 /*
212 * If this failed to map just do the original block.
213 */
214 if (error || blkno == -1)
215 ncontig = 0;
216 }
217
218 /*
219 * If we have contiguous data available do a cluster
220 * otherwise just read the requested block.
221 */
222 if (ncontig) {
223 /* Account for our first block. */
224 ncontig = min(ncontig + 1, nblks);
225 if (ncontig < nblks)
226 nblks = ncontig;
227 bp = cluster_rbuild(vp, filesize, lblkno,
228 blkno, size, nblks, gbflags, bp);
229 lblkno += (bp->b_bufsize / size);
230 } else {
231 bp->b_flags |= B_RAM;
232 bp->b_iocmd = BIO_READ;
233 lblkno += 1;
234 }
235 }
236
237 /*
238 * handle the synchronous read so that it is available ASAP.
239 */
240 if (bp) {
241 if ((bp->b_flags & B_CLUSTER) == 0) {
242 vfs_busy_pages(bp, 0);
243 }
244 bp->b_flags &= ~B_INVAL;
245 bp->b_ioflags &= ~BIO_ERROR;
246 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
247 BUF_KERNPROC(bp);
248 bp->b_iooffset = dbtob(bp->b_blkno);
249 bstrategy(bp);
250 #ifdef RACCT
251 if (racct_enable) {
252 PROC_LOCK(td->td_proc);
253 racct_add_buf(td->td_proc, bp, 0);
254 PROC_UNLOCK(td->td_proc);
255 }
256 #endif /* RACCT */
257 td->td_ru.ru_inblock++;
258 }
259
260 /*
261 * If we have been doing sequential I/O, then do some read-ahead.
262 */
263 while (lblkno < (origblkno + maxra)) {
264 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL);
265 if (error)
266 break;
267
268 if (blkno == -1)
269 break;
270
271 /*
272 * We could throttle ncontig here by maxra but we might as
273 * well read the data if it is contiguous. We're throttled
274 * by racluster anyway.
275 */
276 if (ncontig) {
277 ncontig = min(ncontig + 1, racluster);
278 rbp = cluster_rbuild(vp, filesize, lblkno, blkno,
279 size, ncontig, gbflags, NULL);
280 lblkno += (rbp->b_bufsize / size);
281 if (rbp->b_flags & B_DELWRI) {
282 bqrelse(rbp);
283 continue;
284 }
285 } else {
286 rbp = getblk(vp, lblkno, size, 0, 0, gbflags);
287 lblkno += 1;
288 if (rbp->b_flags & B_DELWRI) {
289 bqrelse(rbp);
290 continue;
291 }
292 rbp->b_flags |= B_ASYNC | B_RAM;
293 rbp->b_iocmd = BIO_READ;
294 rbp->b_blkno = blkno;
295 }
296 if (rbp->b_flags & B_CACHE) {
297 rbp->b_flags &= ~B_ASYNC;
298 bqrelse(rbp);
299 continue;
300 }
301 if ((rbp->b_flags & B_CLUSTER) == 0) {
302 vfs_busy_pages(rbp, 0);
303 }
304 rbp->b_flags &= ~B_INVAL;
305 rbp->b_ioflags &= ~BIO_ERROR;
306 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
307 BUF_KERNPROC(rbp);
308 rbp->b_iooffset = dbtob(rbp->b_blkno);
309 bstrategy(rbp);
310 #ifdef RACCT
311 if (racct_enable) {
312 PROC_LOCK(td->td_proc);
313 racct_add_buf(td->td_proc, rbp, 0);
314 PROC_UNLOCK(td->td_proc);
315 }
316 #endif /* RACCT */
317 td->td_ru.ru_inblock++;
318 }
319
320 if (reqbp) {
321 /*
322 * Like bread, always brelse() the buffer when
323 * returning an error.
324 */
325 error = bufwait(reqbp);
326 if (error != 0) {
327 brelse(reqbp);
328 *bpp = NULL;
329 }
330 }
331 return (error);
332 }
333
334 /*
335 * If blocks are contiguous on disk, use this to provide clustered
336 * read ahead. We will read as many blocks as possible sequentially
337 * and then parcel them up into logical blocks in the buffer hash table.
338 */
339 static struct buf *
340 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
341 daddr_t blkno, long size, int run, int gbflags, struct buf *fbp)
342 {
343 struct buf *bp, *tbp;
344 daddr_t bn;
345 off_t off;
346 long tinc, tsize;
347 int i, inc, j, k, toff;
348
349 KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
350 ("cluster_rbuild: size %ld != f_iosize %jd\n",
351 size, (intmax_t)vp->v_mount->mnt_stat.f_iosize));
352
353 /*
354 * avoid a division
355 */
356 while ((u_quad_t) size * (lbn + run) > filesize) {
357 --run;
358 }
359
360 if (fbp) {
361 tbp = fbp;
362 tbp->b_iocmd = BIO_READ;
363 } else {
364 tbp = getblk(vp, lbn, size, 0, 0, gbflags);
365 if (tbp->b_flags & B_CACHE)
366 return tbp;
367 tbp->b_flags |= B_ASYNC | B_RAM;
368 tbp->b_iocmd = BIO_READ;
369 }
370 tbp->b_blkno = blkno;
371 if( (tbp->b_flags & B_MALLOC) ||
372 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
373 return tbp;
374
375 bp = trypbuf(&cluster_pbuf_freecnt);
376 if (bp == NULL)
377 return tbp;
378
379 /*
380 * We are synthesizing a buffer out of vm_page_t's, but
381 * if the block size is not page aligned then the starting
382 * address may not be either. Inherit the b_data offset
383 * from the original buffer.
384 */
385 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
386 if ((gbflags & GB_UNMAPPED) != 0) {
387 bp->b_data = unmapped_buf;
388 } else {
389 bp->b_data = (char *)((vm_offset_t)bp->b_data |
390 ((vm_offset_t)tbp->b_data & PAGE_MASK));
391 }
392 bp->b_iocmd = BIO_READ;
393 bp->b_iodone = cluster_callback;
394 bp->b_blkno = blkno;
395 bp->b_lblkno = lbn;
396 bp->b_offset = tbp->b_offset;
397 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset"));
398 pbgetvp(vp, bp);
399
400 TAILQ_INIT(&bp->b_cluster.cluster_head);
401
402 bp->b_bcount = 0;
403 bp->b_bufsize = 0;
404 bp->b_npages = 0;
405
406 inc = btodb(size);
407 for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
408 if (i == 0) {
409 VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
410 vfs_drain_busy_pages(tbp);
411 vm_object_pip_add(tbp->b_bufobj->bo_object,
412 tbp->b_npages);
413 for (k = 0; k < tbp->b_npages; k++)
414 vm_page_sbusy(tbp->b_pages[k]);
415 VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
416 } else {
417 if ((bp->b_npages * PAGE_SIZE) +
418 round_page(size) > vp->v_mount->mnt_iosize_max) {
419 break;
420 }
421
422 tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT |
423 (gbflags & GB_UNMAPPED));
424
425 /* Don't wait around for locked bufs. */
426 if (tbp == NULL)
427 break;
428
429 /*
430 * Stop scanning if the buffer is fully valid
431 * (marked B_CACHE), or locked (may be doing a
432 * background write), or if the buffer is not
433 * VMIO backed. The clustering code can only deal
434 * with VMIO-backed buffers. The bo lock is not
435 * required for the BKGRDINPROG check since it
436 * can not be set without the buf lock.
437 */
438 if ((tbp->b_vflags & BV_BKGRDINPROG) ||
439 (tbp->b_flags & B_CACHE) ||
440 (tbp->b_flags & B_VMIO) == 0) {
441 bqrelse(tbp);
442 break;
443 }
444
445 /*
446 * The buffer must be completely invalid in order to
447 * take part in the cluster. If it is partially valid
448 * then we stop.
449 */
450 off = tbp->b_offset;
451 tsize = size;
452 VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
453 for (j = 0; tsize > 0; j++) {
454 toff = off & PAGE_MASK;
455 tinc = tsize;
456 if (toff + tinc > PAGE_SIZE)
457 tinc = PAGE_SIZE - toff;
458 VM_OBJECT_ASSERT_WLOCKED(tbp->b_pages[j]->object);
459 if ((tbp->b_pages[j]->valid &
460 vm_page_bits(toff, tinc)) != 0)
461 break;
462 if (vm_page_xbusied(tbp->b_pages[j]))
463 break;
464 vm_object_pip_add(tbp->b_bufobj->bo_object, 1);
465 vm_page_sbusy(tbp->b_pages[j]);
466 off += tinc;
467 tsize -= tinc;
468 }
469 if (tsize > 0) {
470 clean_sbusy:
471 vm_object_pip_add(tbp->b_bufobj->bo_object, -j);
472 for (k = 0; k < j; k++)
473 vm_page_sunbusy(tbp->b_pages[k]);
474 VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
475 bqrelse(tbp);
476 break;
477 }
478 VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
479
480 /*
481 * Set a read-ahead mark as appropriate
482 */
483 if ((fbp && (i == 1)) || (i == (run - 1)))
484 tbp->b_flags |= B_RAM;
485
486 /*
487 * Set the buffer up for an async read (XXX should
488 * we do this only if we do not wind up brelse()ing?).
489 * Set the block number if it isn't set, otherwise
490 * if it is make sure it matches the block number we
491 * expect.
492 */
493 tbp->b_flags |= B_ASYNC;
494 tbp->b_iocmd = BIO_READ;
495 if (tbp->b_blkno == tbp->b_lblkno) {
496 tbp->b_blkno = bn;
497 } else if (tbp->b_blkno != bn) {
498 VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
499 goto clean_sbusy;
500 }
501 }
502 /*
503 * XXX fbp from caller may not be B_ASYNC, but we are going
504 * to biodone() it in cluster_callback() anyway
505 */
506 BUF_KERNPROC(tbp);
507 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
508 tbp, b_cluster.cluster_entry);
509 VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
510 for (j = 0; j < tbp->b_npages; j += 1) {
511 vm_page_t m;
512 m = tbp->b_pages[j];
513 if ((bp->b_npages == 0) ||
514 (bp->b_pages[bp->b_npages-1] != m)) {
515 bp->b_pages[bp->b_npages] = m;
516 bp->b_npages++;
517 }
518 if (m->valid == VM_PAGE_BITS_ALL)
519 tbp->b_pages[j] = bogus_page;
520 }
521 VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
522 /*
523 * Don't inherit tbp->b_bufsize as it may be larger due to
524 * a non-page-aligned size. Instead just aggregate using
525 * 'size'.
526 */
527 if (tbp->b_bcount != size)
528 printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
529 if (tbp->b_bufsize != size)
530 printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
531 bp->b_bcount += size;
532 bp->b_bufsize += size;
533 }
534
535 /*
536 * Fully valid pages in the cluster are already good and do not need
537 * to be re-read from disk. Replace the page with bogus_page
538 */
539 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
540 for (j = 0; j < bp->b_npages; j++) {
541 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[j]->object);
542 if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
543 bp->b_pages[j] = bogus_page;
544 }
545 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
546 if (bp->b_bufsize > bp->b_kvasize)
547 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
548 bp->b_bufsize, bp->b_kvasize);
549
550 if (buf_mapped(bp)) {
551 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
552 (vm_page_t *)bp->b_pages, bp->b_npages);
553 }
554 return (bp);
555 }
556
557 /*
558 * Cleanup after a clustered read or write.
559 * This is complicated by the fact that any of the buffers might have
560 * extra memory (if there were no empty buffer headers at allocbuf time)
561 * that we will need to shift around.
562 */
563 static void
564 cluster_callback(struct buf *bp)
565 {
566 struct buf *nbp, *tbp;
567 int error = 0;
568
569 /*
570 * Must propagate errors to all the components.
571 */
572 if (bp->b_ioflags & BIO_ERROR)
573 error = bp->b_error;
574
575 if (buf_mapped(bp)) {
576 pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
577 bp->b_npages);
578 }
579 /*
580 * Move memory from the large cluster buffer into the component
581 * buffers and mark IO as done on these.
582 */
583 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
584 tbp; tbp = nbp) {
585 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
586 if (error) {
587 tbp->b_ioflags |= BIO_ERROR;
588 tbp->b_error = error;
589 } else {
590 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
591 tbp->b_flags &= ~B_INVAL;
592 tbp->b_ioflags &= ~BIO_ERROR;
593 /*
594 * XXX the bdwrite()/bqrelse() issued during
595 * cluster building clears B_RELBUF (see bqrelse()
596 * comment). If direct I/O was specified, we have
597 * to restore it here to allow the buffer and VM
598 * to be freed.
599 */
600 if (tbp->b_flags & B_DIRECT)
601 tbp->b_flags |= B_RELBUF;
602 }
603 bufdone(tbp);
604 }
605 pbrelvp(bp);
606 relpbuf(bp, &cluster_pbuf_freecnt);
607 }
608
609 /*
610 * cluster_wbuild_wb:
611 *
612 * Implement modified write build for cluster.
613 *
614 * write_behind = 0 write behind disabled
615 * write_behind = 1 write behind normal (default)
616 * write_behind = 2 write behind backed-off
617 */
618
619 static __inline int
620 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len,
621 int gbflags)
622 {
623 int r = 0;
624
625 switch (write_behind) {
626 case 2:
627 if (start_lbn < len)
628 break;
629 start_lbn -= len;
630 /* FALLTHROUGH */
631 case 1:
632 r = cluster_wbuild(vp, size, start_lbn, len, gbflags);
633 /* FALLTHROUGH */
634 default:
635 /* FALLTHROUGH */
636 break;
637 }
638 return(r);
639 }
640
641 /*
642 * Do clustered write for FFS.
643 *
644 * Three cases:
645 * 1. Write is not sequential (write asynchronously)
646 * Write is sequential:
647 * 2. beginning of cluster - begin cluster
648 * 3. middle of a cluster - add to cluster
649 * 4. end of a cluster - asynchronously write cluster
650 */
651 void
652 cluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount,
653 int gbflags)
654 {
655 daddr_t lbn;
656 int maxclen, cursize;
657 int lblocksize;
658 int async;
659
660 if (!unmapped_buf_allowed)
661 gbflags &= ~GB_UNMAPPED;
662
663 if (vp->v_type == VREG) {
664 async = DOINGASYNC(vp);
665 lblocksize = vp->v_mount->mnt_stat.f_iosize;
666 } else {
667 async = 0;
668 lblocksize = bp->b_bufsize;
669 }
670 lbn = bp->b_lblkno;
671 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset"));
672
673 /* Initialize vnode to beginning of file. */
674 if (lbn == 0)
675 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
676
677 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
678 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
679 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1;
680 if (vp->v_clen != 0) {
681 /*
682 * Next block is not sequential.
683 *
684 * If we are not writing at end of file, the process
685 * seeked to another point in the file since its last
686 * write, or we have reached our maximum cluster size,
687 * then push the previous cluster. Otherwise try
688 * reallocating to make it sequential.
689 *
690 * Change to algorithm: only push previous cluster if
691 * it was sequential from the point of view of the
692 * seqcount heuristic, otherwise leave the buffer
693 * intact so we can potentially optimize the I/O
694 * later on in the buf_daemon or update daemon
695 * flush.
696 */
697 cursize = vp->v_lastw - vp->v_cstart + 1;
698 if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
699 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
700 if (!async && seqcount > 0) {
701 cluster_wbuild_wb(vp, lblocksize,
702 vp->v_cstart, cursize, gbflags);
703 }
704 } else {
705 struct buf **bpp, **endbp;
706 struct cluster_save *buflist;
707
708 buflist = cluster_collectbufs(vp, bp, gbflags);
709 if (buflist == NULL) {
710 /*
711 * Cluster build failed so just write
712 * it now.
713 */
714 bawrite(bp);
715 return;
716 }
717 endbp = &buflist->bs_children
718 [buflist->bs_nchildren - 1];
719 if (VOP_REALLOCBLKS(vp, buflist)) {
720 /*
721 * Failed, push the previous cluster
722 * if *really* writing sequentially
723 * in the logical file (seqcount > 1),
724 * otherwise delay it in the hopes that
725 * the low level disk driver can
726 * optimize the write ordering.
727 */
728 for (bpp = buflist->bs_children;
729 bpp < endbp; bpp++)
730 brelse(*bpp);
731 free(buflist, M_SEGMENT);
732 if (seqcount > 1) {
733 cluster_wbuild_wb(vp,
734 lblocksize, vp->v_cstart,
735 cursize, gbflags);
736 }
737 } else {
738 /*
739 * Succeeded, keep building cluster.
740 */
741 for (bpp = buflist->bs_children;
742 bpp <= endbp; bpp++)
743 bdwrite(*bpp);
744 free(buflist, M_SEGMENT);
745 vp->v_lastw = lbn;
746 vp->v_lasta = bp->b_blkno;
747 return;
748 }
749 }
750 }
751 /*
752 * Consider beginning a cluster. If at end of file, make
753 * cluster as large as possible, otherwise find size of
754 * existing cluster.
755 */
756 if ((vp->v_type == VREG) &&
757 ((u_quad_t) bp->b_offset + lblocksize) != filesize &&
758 (bp->b_blkno == bp->b_lblkno) &&
759 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
760 bp->b_blkno == -1)) {
761 bawrite(bp);
762 vp->v_clen = 0;
763 vp->v_lasta = bp->b_blkno;
764 vp->v_cstart = lbn + 1;
765 vp->v_lastw = lbn;
766 return;
767 }
768 vp->v_clen = maxclen;
769 if (!async && maxclen == 0) { /* I/O not contiguous */
770 vp->v_cstart = lbn + 1;
771 bawrite(bp);
772 } else { /* Wait for rest of cluster */
773 vp->v_cstart = lbn;
774 bdwrite(bp);
775 }
776 } else if (lbn == vp->v_cstart + vp->v_clen) {
777 /*
778 * At end of cluster, write it out if seqcount tells us we
779 * are operating sequentially, otherwise let the buf or
780 * update daemon handle it.
781 */
782 bdwrite(bp);
783 if (seqcount > 1) {
784 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart,
785 vp->v_clen + 1, gbflags);
786 }
787 vp->v_clen = 0;
788 vp->v_cstart = lbn + 1;
789 } else if (vm_page_count_severe()) {
790 /*
791 * We are low on memory, get it going NOW
792 */
793 bawrite(bp);
794 } else {
795 /*
796 * In the middle of a cluster, so just delay the I/O for now.
797 */
798 bdwrite(bp);
799 }
800 vp->v_lastw = lbn;
801 vp->v_lasta = bp->b_blkno;
802 }
803
804
805 /*
806 * This is an awful lot like cluster_rbuild...wish they could be combined.
807 * The last lbn argument is the current block on which I/O is being
808 * performed. Check to see that it doesn't fall in the middle of
809 * the current block (if last_bp == NULL).
810 */
811 int
812 cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
813 int gbflags)
814 {
815 struct buf *bp, *tbp;
816 struct bufobj *bo;
817 int i, j;
818 int totalwritten = 0;
819 int dbsize = btodb(size);
820
821 if (!unmapped_buf_allowed)
822 gbflags &= ~GB_UNMAPPED;
823
824 bo = &vp->v_bufobj;
825 while (len > 0) {
826 /*
827 * If the buffer is not delayed-write (i.e. dirty), or it
828 * is delayed-write but either locked or inval, it cannot
829 * partake in the clustered write.
830 */
831 BO_LOCK(bo);
832 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
833 (tbp->b_vflags & BV_BKGRDINPROG)) {
834 BO_UNLOCK(bo);
835 ++start_lbn;
836 --len;
837 continue;
838 }
839 if (BUF_LOCK(tbp,
840 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, BO_LOCKPTR(bo))) {
841 ++start_lbn;
842 --len;
843 continue;
844 }
845 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) {
846 BUF_UNLOCK(tbp);
847 ++start_lbn;
848 --len;
849 continue;
850 }
851 bremfree(tbp);
852 tbp->b_flags &= ~B_DONE;
853
854 /*
855 * Extra memory in the buffer, punt on this buffer.
856 * XXX we could handle this in most cases, but we would
857 * have to push the extra memory down to after our max
858 * possible cluster size and then potentially pull it back
859 * up if the cluster was terminated prematurely--too much
860 * hassle.
861 */
862 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) !=
863 (B_CLUSTEROK | B_VMIO)) ||
864 (tbp->b_bcount != tbp->b_bufsize) ||
865 (tbp->b_bcount != size) ||
866 (len == 1) ||
867 ((bp = (vp->v_vflag & VV_MD) != 0 ?
868 trypbuf(&cluster_pbuf_freecnt) :
869 getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
870 totalwritten += tbp->b_bufsize;
871 bawrite(tbp);
872 ++start_lbn;
873 --len;
874 continue;
875 }
876
877 /*
878 * We got a pbuf to make the cluster in.
879 * so initialise it.
880 */
881 TAILQ_INIT(&bp->b_cluster.cluster_head);
882 bp->b_bcount = 0;
883 bp->b_bufsize = 0;
884 bp->b_npages = 0;
885 if (tbp->b_wcred != NOCRED)
886 bp->b_wcred = crhold(tbp->b_wcred);
887
888 bp->b_blkno = tbp->b_blkno;
889 bp->b_lblkno = tbp->b_lblkno;
890 bp->b_offset = tbp->b_offset;
891
892 /*
893 * We are synthesizing a buffer out of vm_page_t's, but
894 * if the block size is not page aligned then the starting
895 * address may not be either. Inherit the b_data offset
896 * from the original buffer.
897 */
898 if ((gbflags & GB_UNMAPPED) == 0 ||
899 (tbp->b_flags & B_VMIO) == 0) {
900 bp->b_data = (char *)((vm_offset_t)bp->b_data |
901 ((vm_offset_t)tbp->b_data & PAGE_MASK));
902 } else {
903 bp->b_data = unmapped_buf;
904 }
905 bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO |
906 B_NEEDCOMMIT));
907 bp->b_iodone = cluster_callback;
908 pbgetvp(vp, bp);
909 /*
910 * From this location in the file, scan forward to see
911 * if there are buffers with adjacent data that need to
912 * be written as well.
913 */
914 for (i = 0; i < len; ++i, ++start_lbn) {
915 if (i != 0) { /* If not the first buffer */
916 /*
917 * If the adjacent data is not even in core it
918 * can't need to be written.
919 */
920 BO_LOCK(bo);
921 if ((tbp = gbincore(bo, start_lbn)) == NULL ||
922 (tbp->b_vflags & BV_BKGRDINPROG)) {
923 BO_UNLOCK(bo);
924 break;
925 }
926
927 /*
928 * If it IS in core, but has different
929 * characteristics, or is locked (which
930 * means it could be undergoing a background
931 * I/O or be in a weird state), then don't
932 * cluster with it.
933 */
934 if (BUF_LOCK(tbp,
935 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
936 BO_LOCKPTR(bo)))
937 break;
938
939 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
940 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
941 != (B_DELWRI | B_CLUSTEROK |
942 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
943 tbp->b_wcred != bp->b_wcred) {
944 BUF_UNLOCK(tbp);
945 break;
946 }
947
948 /*
949 * Check that the combined cluster
950 * would make sense with regard to pages
951 * and would not be too large
952 */
953 if ((tbp->b_bcount != size) ||
954 ((bp->b_blkno + (dbsize * i)) !=
955 tbp->b_blkno) ||
956 ((tbp->b_npages + bp->b_npages) >
957 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
958 BUF_UNLOCK(tbp);
959 break;
960 }
961
962 /*
963 * Ok, it's passed all the tests,
964 * so remove it from the free list
965 * and mark it busy. We will use it.
966 */
967 bremfree(tbp);
968 tbp->b_flags &= ~B_DONE;
969 } /* end of code for non-first buffers only */
970 /*
971 * If the IO is via the VM then we do some
972 * special VM hackery (yuck). Since the buffer's
973 * block size may not be page-aligned it is possible
974 * for a page to be shared between two buffers. We
975 * have to get rid of the duplication when building
976 * the cluster.
977 */
978 if (tbp->b_flags & B_VMIO) {
979 vm_page_t m;
980
981 VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
982 if (i == 0) {
983 vfs_drain_busy_pages(tbp);
984 } else { /* if not first buffer */
985 for (j = 0; j < tbp->b_npages; j += 1) {
986 m = tbp->b_pages[j];
987 if (vm_page_xbusied(m)) {
988 VM_OBJECT_WUNLOCK(
989 tbp->b_object);
990 bqrelse(tbp);
991 goto finishcluster;
992 }
993 }
994 }
995 for (j = 0; j < tbp->b_npages; j += 1) {
996 m = tbp->b_pages[j];
997 vm_page_sbusy(m);
998 vm_object_pip_add(m->object, 1);
999 if ((bp->b_npages == 0) ||
1000 (bp->b_pages[bp->b_npages - 1] != m)) {
1001 bp->b_pages[bp->b_npages] = m;
1002 bp->b_npages++;
1003 }
1004 }
1005 VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
1006 }
1007 bp->b_bcount += size;
1008 bp->b_bufsize += size;
1009 /*
1010 * If any of the clustered buffers have their
1011 * B_BARRIER flag set, transfer that request to
1012 * the cluster.
1013 */
1014 bp->b_flags |= (tbp->b_flags & B_BARRIER);
1015 tbp->b_flags &= ~(B_DONE | B_BARRIER);
1016 tbp->b_flags |= B_ASYNC;
1017 tbp->b_ioflags &= ~BIO_ERROR;
1018 tbp->b_iocmd = BIO_WRITE;
1019 bundirty(tbp);
1020 reassignbuf(tbp); /* put on clean list */
1021 bufobj_wref(tbp->b_bufobj);
1022 BUF_KERNPROC(tbp);
1023 buf_track(tbp, __func__);
1024 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
1025 tbp, b_cluster.cluster_entry);
1026 }
1027 finishcluster:
1028 if (buf_mapped(bp)) {
1029 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1030 (vm_page_t *)bp->b_pages, bp->b_npages);
1031 }
1032 if (bp->b_bufsize > bp->b_kvasize)
1033 panic(
1034 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
1035 bp->b_bufsize, bp->b_kvasize);
1036 totalwritten += bp->b_bufsize;
1037 bp->b_dirtyoff = 0;
1038 bp->b_dirtyend = bp->b_bufsize;
1039 bawrite(bp);
1040
1041 len -= i;
1042 }
1043 return totalwritten;
1044 }
1045
1046 /*
1047 * Collect together all the buffers in a cluster.
1048 * Plus add one additional buffer.
1049 */
1050 static struct cluster_save *
1051 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int gbflags)
1052 {
1053 struct cluster_save *buflist;
1054 struct buf *bp;
1055 daddr_t lbn;
1056 int i, j, len, error;
1057
1058 len = vp->v_lastw - vp->v_cstart + 1;
1059 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1060 M_SEGMENT, M_WAITOK);
1061 buflist->bs_nchildren = 0;
1062 buflist->bs_children = (struct buf **) (buflist + 1);
1063 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) {
1064 error = bread_gb(vp, lbn, last_bp->b_bcount, NOCRED,
1065 gbflags, &bp);
1066 if (error != 0) {
1067 /*
1068 * If read fails, release collected buffers
1069 * and return failure.
1070 */
1071 for (j = 0; j < i; j++)
1072 brelse(buflist->bs_children[j]);
1073 free(buflist, M_SEGMENT);
1074 return (NULL);
1075 }
1076 buflist->bs_children[i] = bp;
1077 if (bp->b_blkno == bp->b_lblkno)
1078 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
1079 NULL, NULL);
1080 }
1081 buflist->bs_children[i] = bp = last_bp;
1082 if (bp->b_blkno == bp->b_lblkno)
1083 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1084 buflist->bs_nchildren = i + 1;
1085 return (buflist);
1086 }
Cache object: 92f626f05a4e027044cd98518e10f099
|