1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/kernel.h>
43 #include <sys/mount.h>
44 #include <sys/proc.h>
45 #include <sys/resourcevar.h>
46 #include <sys/signalvar.h>
47 #include <sys/vmmeter.h>
48 #include <sys/vnode.h>
49
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vnode_pager.h>
56
57 #include <rpc/rpcclnt.h>
58
59 #include <nfs/rpcv2.h>
60 #include <nfs/nfsproto.h>
61 #include <nfsclient/nfs.h>
62 #include <nfsclient/nfsmount.h>
63 #include <nfsclient/nfsnode.h>
64
65 #include <nfs4client/nfs4.h>
66
67 /*
68 * Just call nfs_writebp() with the force argument set to 1.
69 *
70 * NOTE: B_DONE may or may not be set in a_bp on call.
71 */
72 static int
73 nfs4_bwrite(struct buf *bp)
74 {
75
76 return (nfs4_writebp(bp, 1, curthread));
77 }
78
79 static int
80 nfs_bwrite(struct buf *bp)
81 {
82
83 return (nfs_writebp(bp, 1, curthread));
84 }
85
86 struct buf_ops buf_ops_nfs4 = {
87 "buf_ops_nfs4",
88 nfs4_bwrite
89 };
90
91 struct buf_ops buf_ops_nfs = {
92 "buf_ops_nfs",
93 nfs_bwrite
94 };
95
96 static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
97 struct thread *td);
98
99 /*
100 * Vnode op for VM getpages.
101 */
102 int
103 nfs_getpages(struct vop_getpages_args *ap)
104 {
105 int i, error, nextoff, size, toff, count, npages;
106 struct uio uio;
107 struct iovec iov;
108 vm_offset_t kva;
109 struct buf *bp;
110 struct vnode *vp;
111 struct thread *td;
112 struct ucred *cred;
113 struct nfsmount *nmp;
114 vm_object_t object;
115 vm_page_t *pages;
116
117 GIANT_REQUIRED;
118
119 vp = ap->a_vp;
120 td = curthread; /* XXX */
121 cred = curthread->td_ucred; /* XXX */
122 nmp = VFSTONFS(vp->v_mount);
123 pages = ap->a_m;
124 count = ap->a_count;
125
126 if ((object = vp->v_object) == NULL) {
127 printf("nfs_getpages: called with non-merged cache vnode??\n");
128 return VM_PAGER_ERROR;
129 }
130
131 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
132 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
133 /* We'll never get here for v4, because we always have fsinfo */
134 (void)nfs_fsinfo(nmp, vp, cred, td);
135 }
136
137 npages = btoc(count);
138
139 /*
140 * If the requested page is partially valid, just return it and
141 * allow the pager to zero-out the blanks. Partially valid pages
142 * can only occur at the file EOF.
143 */
144
145 {
146 vm_page_t m = pages[ap->a_reqpage];
147
148 VM_OBJECT_LOCK(object);
149 vm_page_lock_queues();
150 if (m->valid != 0) {
151 /* handled by vm_fault now */
152 /* vm_page_zero_invalid(m, TRUE); */
153 for (i = 0; i < npages; ++i) {
154 if (i != ap->a_reqpage)
155 vm_page_free(pages[i]);
156 }
157 vm_page_unlock_queues();
158 VM_OBJECT_UNLOCK(object);
159 return(0);
160 }
161 vm_page_unlock_queues();
162 VM_OBJECT_UNLOCK(object);
163 }
164
165 /*
166 * We use only the kva address for the buffer, but this is extremely
167 * convienient and fast.
168 */
169 bp = getpbuf(&nfs_pbuf_freecnt);
170
171 kva = (vm_offset_t) bp->b_data;
172 pmap_qenter(kva, pages, npages);
173 cnt.v_vnodein++;
174 cnt.v_vnodepgsin += npages;
175
176 iov.iov_base = (caddr_t) kva;
177 iov.iov_len = count;
178 uio.uio_iov = &iov;
179 uio.uio_iovcnt = 1;
180 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
181 uio.uio_resid = count;
182 uio.uio_segflg = UIO_SYSSPACE;
183 uio.uio_rw = UIO_READ;
184 uio.uio_td = td;
185
186 error = (nmp->nm_rpcops->nr_readrpc)(vp, &uio, cred);
187 pmap_qremove(kva, npages);
188
189 relpbuf(bp, &nfs_pbuf_freecnt);
190
191 if (error && (uio.uio_resid == count)) {
192 printf("nfs_getpages: error %d\n", error);
193 VM_OBJECT_LOCK(object);
194 vm_page_lock_queues();
195 for (i = 0; i < npages; ++i) {
196 if (i != ap->a_reqpage)
197 vm_page_free(pages[i]);
198 }
199 vm_page_unlock_queues();
200 VM_OBJECT_UNLOCK(object);
201 return VM_PAGER_ERROR;
202 }
203
204 /*
205 * Calculate the number of bytes read and validate only that number
206 * of bytes. Note that due to pending writes, size may be 0. This
207 * does not mean that the remaining data is invalid!
208 */
209
210 size = count - uio.uio_resid;
211 VM_OBJECT_LOCK(object);
212 vm_page_lock_queues();
213 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
214 vm_page_t m;
215 nextoff = toff + PAGE_SIZE;
216 m = pages[i];
217
218 if (nextoff <= size) {
219 /*
220 * Read operation filled an entire page
221 */
222 m->valid = VM_PAGE_BITS_ALL;
223 vm_page_undirty(m);
224 } else if (size > toff) {
225 /*
226 * Read operation filled a partial page.
227 */
228 m->valid = 0;
229 vm_page_set_validclean(m, 0, size - toff);
230 /* handled by vm_fault now */
231 /* vm_page_zero_invalid(m, TRUE); */
232 } else {
233 /*
234 * Read operation was short. If no error occured
235 * we may have hit a zero-fill section. We simply
236 * leave valid set to 0.
237 */
238 ;
239 }
240 if (i != ap->a_reqpage) {
241 /*
242 * Whether or not to leave the page activated is up in
243 * the air, but we should put the page on a page queue
244 * somewhere (it already is in the object). Result:
245 * It appears that emperical results show that
246 * deactivating pages is best.
247 */
248
249 /*
250 * Just in case someone was asking for this page we
251 * now tell them that it is ok to use.
252 */
253 if (!error) {
254 if (m->flags & PG_WANTED)
255 vm_page_activate(m);
256 else
257 vm_page_deactivate(m);
258 vm_page_wakeup(m);
259 } else {
260 vm_page_free(m);
261 }
262 }
263 }
264 vm_page_unlock_queues();
265 VM_OBJECT_UNLOCK(object);
266 return 0;
267 }
268
269 /*
270 * Vnode op for VM putpages.
271 */
272 int
273 nfs_putpages(struct vop_putpages_args *ap)
274 {
275 struct uio uio;
276 struct iovec iov;
277 vm_offset_t kva;
278 struct buf *bp;
279 int iomode, must_commit, i, error, npages, count;
280 off_t offset;
281 int *rtvals;
282 struct vnode *vp;
283 struct thread *td;
284 struct ucred *cred;
285 struct nfsmount *nmp;
286 struct nfsnode *np;
287 vm_page_t *pages;
288
289 GIANT_REQUIRED;
290
291 vp = ap->a_vp;
292 np = VTONFS(vp);
293 td = curthread; /* XXX */
294 cred = curthread->td_ucred; /* XXX */
295 nmp = VFSTONFS(vp->v_mount);
296 pages = ap->a_m;
297 count = ap->a_count;
298 rtvals = ap->a_rtvals;
299 npages = btoc(count);
300 offset = IDX_TO_OFF(pages[0]->pindex);
301
302 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
303 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
304 (void)nfs_fsinfo(nmp, vp, cred, td);
305 }
306
307 for (i = 0; i < npages; i++)
308 rtvals[i] = VM_PAGER_AGAIN;
309
310 /*
311 * When putting pages, do not extend file past EOF.
312 */
313
314 if (offset + count > np->n_size) {
315 count = np->n_size - offset;
316 if (count < 0)
317 count = 0;
318 }
319
320 /*
321 * We use only the kva address for the buffer, but this is extremely
322 * convienient and fast.
323 */
324 bp = getpbuf(&nfs_pbuf_freecnt);
325
326 kva = (vm_offset_t) bp->b_data;
327 pmap_qenter(kva, pages, npages);
328 cnt.v_vnodeout++;
329 cnt.v_vnodepgsout += count;
330
331 iov.iov_base = (caddr_t) kva;
332 iov.iov_len = count;
333 uio.uio_iov = &iov;
334 uio.uio_iovcnt = 1;
335 uio.uio_offset = offset;
336 uio.uio_resid = count;
337 uio.uio_segflg = UIO_SYSSPACE;
338 uio.uio_rw = UIO_WRITE;
339 uio.uio_td = td;
340
341 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
342 iomode = NFSV3WRITE_UNSTABLE;
343 else
344 iomode = NFSV3WRITE_FILESYNC;
345
346 error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, &iomode, &must_commit);
347
348 pmap_qremove(kva, npages);
349 relpbuf(bp, &nfs_pbuf_freecnt);
350
351 if (!error) {
352 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
353 for (i = 0; i < nwritten; i++) {
354 rtvals[i] = VM_PAGER_OK;
355 vm_page_undirty(pages[i]);
356 }
357 if (must_commit) {
358 nfs_clearcommit(vp->v_mount);
359 }
360 }
361 return rtvals[0];
362 }
363
364 /*
365 * Vnode op for read using bio
366 */
367 int
368 nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
369 {
370 struct nfsnode *np = VTONFS(vp);
371 int biosize, i;
372 struct buf *bp = 0, *rabp;
373 struct vattr vattr;
374 struct thread *td;
375 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
376 daddr_t lbn, rabn;
377 int bcount;
378 int seqcount;
379 int nra, error = 0, n = 0, on = 0;
380
381 #ifdef DIAGNOSTIC
382 if (uio->uio_rw != UIO_READ)
383 panic("nfs_read mode");
384 #endif
385 if (uio->uio_resid == 0)
386 return (0);
387 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
388 return (EINVAL);
389 td = uio->uio_td;
390
391 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
392 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
393 (void)nfs_fsinfo(nmp, vp, cred, td);
394 if (vp->v_type != VDIR &&
395 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
396 return (EFBIG);
397 biosize = vp->v_mount->mnt_stat.f_iosize;
398 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
399 /*
400 * For nfs, cache consistency can only be maintained approximately.
401 * Although RFC1094 does not specify the criteria, the following is
402 * believed to be compatible with the reference port.
403 * For nfs:
404 * If the file's modify time on the server has changed since the
405 * last read rpc or you have written to the file,
406 * you may have lost data cache consistency with the
407 * server, so flush all of the file's data out of the cache.
408 * Then force a getattr rpc to ensure that you have up to date
409 * attributes.
410 * NB: This implies that cache data can be read when up to
411 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
412 * attributes this could be forced by setting n_attrstamp to 0 before
413 * the VOP_GETATTR() call.
414 */
415 if (np->n_flag & NMODIFIED) {
416 if (vp->v_type != VREG) {
417 if (vp->v_type != VDIR)
418 panic("nfs: bioread, not dir");
419 (nmp->nm_rpcops->nr_invaldir)(vp);
420 error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
421 if (error)
422 return (error);
423 }
424 np->n_attrstamp = 0;
425 error = VOP_GETATTR(vp, &vattr, cred, td);
426 if (error)
427 return (error);
428 np->n_mtime = vattr.va_mtime.tv_sec;
429 } else {
430 error = VOP_GETATTR(vp, &vattr, cred, td);
431 if (error)
432 return (error);
433 if ((np->n_flag & NSIZECHANGED)
434 || (np->n_mtime != vattr.va_mtime.tv_sec)) {
435 if (vp->v_type == VDIR)
436 (nmp->nm_rpcops->nr_invaldir)(vp);
437 error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
438 if (error)
439 return (error);
440 np->n_mtime = vattr.va_mtime.tv_sec;
441 np->n_flag &= ~NSIZECHANGED;
442 }
443 }
444 do {
445 switch (vp->v_type) {
446 case VREG:
447 nfsstats.biocache_reads++;
448 lbn = uio->uio_offset / biosize;
449 on = uio->uio_offset & (biosize - 1);
450
451 /*
452 * Start the read ahead(s), as required.
453 */
454 if (nmp->nm_readahead > 0) {
455 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
456 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
457 rabn = lbn + 1 + nra;
458 if (incore(vp, rabn) == NULL) {
459 rabp = nfs_getcacheblk(vp, rabn, biosize, td);
460 if (!rabp) {
461 error = nfs_sigintr(nmp, NULL, td);
462 return (error ? error : EINTR);
463 }
464 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
465 rabp->b_flags |= B_ASYNC;
466 rabp->b_iocmd = BIO_READ;
467 vfs_busy_pages(rabp, 0);
468 if (nfs_asyncio(rabp, cred, td)) {
469 rabp->b_flags |= B_INVAL;
470 rabp->b_ioflags |= BIO_ERROR;
471 vfs_unbusy_pages(rabp);
472 brelse(rabp);
473 break;
474 }
475 } else {
476 brelse(rabp);
477 }
478 }
479 }
480 }
481
482 /*
483 * Obtain the buffer cache block. Figure out the buffer size
484 * when we are at EOF. If we are modifying the size of the
485 * buffer based on an EOF condition we need to hold
486 * nfs_rslock() through obtaining the buffer to prevent
487 * a potential writer-appender from messing with n_size.
488 * Otherwise we may accidently truncate the buffer and
489 * lose dirty data.
490 *
491 * Note that bcount is *not* DEV_BSIZE aligned.
492 */
493
494 again:
495 bcount = biosize;
496 if ((off_t)lbn * biosize >= np->n_size) {
497 bcount = 0;
498 } else if ((off_t)(lbn + 1) * biosize > np->n_size) {
499 bcount = np->n_size - (off_t)lbn * biosize;
500 }
501 if (bcount != biosize) {
502 switch(nfs_rslock(np, td)) {
503 case ENOLCK:
504 goto again;
505 /* not reached */
506 case EIO:
507 return (EIO);
508 case EINTR:
509 case ERESTART:
510 return(EINTR);
511 /* not reached */
512 default:
513 break;
514 }
515 }
516
517 bp = nfs_getcacheblk(vp, lbn, bcount, td);
518
519 if (bcount != biosize)
520 nfs_rsunlock(np, td);
521 if (!bp) {
522 error = nfs_sigintr(nmp, NULL, td);
523 return (error ? error : EINTR);
524 }
525
526 /*
527 * If B_CACHE is not set, we must issue the read. If this
528 * fails, we return an error.
529 */
530
531 if ((bp->b_flags & B_CACHE) == 0) {
532 bp->b_iocmd = BIO_READ;
533 vfs_busy_pages(bp, 0);
534 error = nfs_doio(bp, cred, td);
535 if (error) {
536 brelse(bp);
537 return (error);
538 }
539 }
540
541 /*
542 * on is the offset into the current bp. Figure out how many
543 * bytes we can copy out of the bp. Note that bcount is
544 * NOT DEV_BSIZE aligned.
545 *
546 * Then figure out how many bytes we can copy into the uio.
547 */
548
549 n = 0;
550 if (on < bcount)
551 n = min((unsigned)(bcount - on), uio->uio_resid);
552 break;
553 case VLNK:
554 nfsstats.biocache_readlinks++;
555 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
556 if (!bp) {
557 error = nfs_sigintr(nmp, NULL, td);
558 return (error ? error : EINTR);
559 }
560 if ((bp->b_flags & B_CACHE) == 0) {
561 bp->b_iocmd = BIO_READ;
562 vfs_busy_pages(bp, 0);
563 error = nfs_doio(bp, cred, td);
564 if (error) {
565 bp->b_ioflags |= BIO_ERROR;
566 brelse(bp);
567 return (error);
568 }
569 }
570 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
571 on = 0;
572 break;
573 case VDIR:
574 nfsstats.biocache_readdirs++;
575 if (np->n_direofoffset
576 && uio->uio_offset >= np->n_direofoffset) {
577 return (0);
578 }
579 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
580 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
581 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
582 if (!bp) {
583 error = nfs_sigintr(nmp, NULL, td);
584 return (error ? error : EINTR);
585 }
586 if ((bp->b_flags & B_CACHE) == 0) {
587 bp->b_iocmd = BIO_READ;
588 vfs_busy_pages(bp, 0);
589 error = nfs_doio(bp, cred, td);
590 if (error) {
591 brelse(bp);
592 }
593 while (error == NFSERR_BAD_COOKIE) {
594 (nmp->nm_rpcops->nr_invaldir)(vp);
595 error = nfs_vinvalbuf(vp, 0, cred, td, 1);
596 /*
597 * Yuck! The directory has been modified on the
598 * server. The only way to get the block is by
599 * reading from the beginning to get all the
600 * offset cookies.
601 *
602 * Leave the last bp intact unless there is an error.
603 * Loop back up to the while if the error is another
604 * NFSERR_BAD_COOKIE (double yuch!).
605 */
606 for (i = 0; i <= lbn && !error; i++) {
607 if (np->n_direofoffset
608 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
609 return (0);
610 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
611 if (!bp) {
612 error = nfs_sigintr(nmp, NULL, td);
613 return (error ? error : EINTR);
614 }
615 if ((bp->b_flags & B_CACHE) == 0) {
616 bp->b_iocmd = BIO_READ;
617 vfs_busy_pages(bp, 0);
618 error = nfs_doio(bp, cred, td);
619 /*
620 * no error + B_INVAL == directory EOF,
621 * use the block.
622 */
623 if (error == 0 && (bp->b_flags & B_INVAL))
624 break;
625 }
626 /*
627 * An error will throw away the block and the
628 * for loop will break out. If no error and this
629 * is not the block we want, we throw away the
630 * block and go for the next one via the for loop.
631 */
632 if (error || i < lbn)
633 brelse(bp);
634 }
635 }
636 /*
637 * The above while is repeated if we hit another cookie
638 * error. If we hit an error and it wasn't a cookie error,
639 * we give up.
640 */
641 if (error)
642 return (error);
643 }
644
645 /*
646 * If not eof and read aheads are enabled, start one.
647 * (You need the current block first, so that you have the
648 * directory offset cookie of the next block.)
649 */
650 if (nmp->nm_readahead > 0 &&
651 (bp->b_flags & B_INVAL) == 0 &&
652 (np->n_direofoffset == 0 ||
653 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
654 incore(vp, lbn + 1) == NULL) {
655 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
656 if (rabp) {
657 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
658 rabp->b_flags |= B_ASYNC;
659 rabp->b_iocmd = BIO_READ;
660 vfs_busy_pages(rabp, 0);
661 if (nfs_asyncio(rabp, cred, td)) {
662 rabp->b_flags |= B_INVAL;
663 rabp->b_ioflags |= BIO_ERROR;
664 vfs_unbusy_pages(rabp);
665 brelse(rabp);
666 }
667 } else {
668 brelse(rabp);
669 }
670 }
671 }
672 /*
673 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
674 * chopped for the EOF condition, we cannot tell how large
675 * NFS directories are going to be until we hit EOF. So
676 * an NFS directory buffer is *not* chopped to its EOF. Now,
677 * it just so happens that b_resid will effectively chop it
678 * to EOF. *BUT* this information is lost if the buffer goes
679 * away and is reconstituted into a B_CACHE state ( due to
680 * being VMIO ) later. So we keep track of the directory eof
681 * in np->n_direofoffset and chop it off as an extra step
682 * right here.
683 */
684 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
685 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
686 n = np->n_direofoffset - uio->uio_offset;
687 break;
688 default:
689 printf(" nfs_bioread: type %x unexpected\n", vp->v_type);
690 break;
691 };
692
693 if (n > 0) {
694 error = uiomove(bp->b_data + on, (int)n, uio);
695 }
696 switch (vp->v_type) {
697 case VREG:
698 break;
699 case VLNK:
700 n = 0;
701 break;
702 case VDIR:
703 break;
704 default:
705 printf(" nfs_bioread: type %x unexpected\n", vp->v_type);
706 }
707 brelse(bp);
708 } while (error == 0 && uio->uio_resid > 0 && n > 0);
709 return (error);
710 }
711
712 /*
713 * Vnode op for write using bio
714 */
715 int
716 nfs_write(struct vop_write_args *ap)
717 {
718 int biosize;
719 struct uio *uio = ap->a_uio;
720 struct thread *td = uio->uio_td;
721 struct vnode *vp = ap->a_vp;
722 struct nfsnode *np = VTONFS(vp);
723 struct ucred *cred = ap->a_cred;
724 int ioflag = ap->a_ioflag;
725 struct buf *bp;
726 struct vattr vattr;
727 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
728 daddr_t lbn;
729 int bcount;
730 int n, on, error = 0;
731 int haverslock = 0;
732 struct proc *p = td?td->td_proc:NULL;
733
734 GIANT_REQUIRED;
735
736 #ifdef DIAGNOSTIC
737 if (uio->uio_rw != UIO_WRITE)
738 panic("nfs_write mode");
739 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
740 panic("nfs_write proc");
741 #endif
742 if (vp->v_type != VREG)
743 return (EIO);
744 if (np->n_flag & NWRITEERR) {
745 np->n_flag &= ~NWRITEERR;
746 return (np->n_error);
747 }
748 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
749 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
750 (void)nfs_fsinfo(nmp, vp, cred, td);
751
752 /*
753 * Synchronously flush pending buffers if we are in synchronous
754 * mode or if we are appending.
755 */
756 if (ioflag & (IO_APPEND | IO_SYNC)) {
757 if (np->n_flag & NMODIFIED) {
758 #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
759 /*
760 * Require non-blocking, synchronous writes to
761 * dirty files to inform the program it needs
762 * to fsync(2) explicitly.
763 */
764 if (ioflag & IO_NDELAY)
765 return (EAGAIN);
766 #endif
767 flush_and_restart:
768 np->n_attrstamp = 0;
769 error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
770 if (error)
771 return (error);
772 }
773 }
774
775 /*
776 * If IO_APPEND then load uio_offset. We restart here if we cannot
777 * get the append lock.
778 */
779 restart:
780 if (ioflag & IO_APPEND) {
781 np->n_attrstamp = 0;
782 error = VOP_GETATTR(vp, &vattr, cred, td);
783 if (error)
784 return (error);
785 uio->uio_offset = np->n_size;
786 }
787
788 if (uio->uio_offset < 0)
789 return (EINVAL);
790 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
791 return (EFBIG);
792 if (uio->uio_resid == 0)
793 return (0);
794
795 /*
796 * We need to obtain the rslock if we intend to modify np->n_size
797 * in order to guarentee the append point with multiple contending
798 * writers, to guarentee that no other appenders modify n_size
799 * while we are trying to obtain a truncated buffer (i.e. to avoid
800 * accidently truncating data written by another appender due to
801 * the race), and to ensure that the buffer is populated prior to
802 * our extending of the file. We hold rslock through the entire
803 * operation.
804 *
805 * Note that we do not synchronize the case where someone truncates
806 * the file while we are appending to it because attempting to lock
807 * this case may deadlock other parts of the system unexpectedly.
808 */
809 if ((ioflag & IO_APPEND) ||
810 uio->uio_offset + uio->uio_resid > np->n_size) {
811 switch(nfs_rslock(np, td)) {
812 case ENOLCK:
813 goto restart;
814 /* not reached */
815 case EIO:
816 return (EIO);
817 case EINTR:
818 case ERESTART:
819 return(EINTR);
820 /* not reached */
821 default:
822 break;
823 }
824 haverslock = 1;
825 }
826
827 /*
828 * Maybe this should be above the vnode op call, but so long as
829 * file servers have no limits, i don't think it matters
830 */
831 if (p != NULL) {
832 PROC_LOCK(p);
833 if (uio->uio_offset + uio->uio_resid >
834 lim_cur(p, RLIMIT_FSIZE)) {
835 psignal(p, SIGXFSZ);
836 PROC_UNLOCK(p);
837 if (haverslock)
838 nfs_rsunlock(np, td);
839 return (EFBIG);
840 }
841 PROC_UNLOCK(p);
842 }
843
844 biosize = vp->v_mount->mnt_stat.f_iosize;
845 /*
846 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
847 * would exceed the local maximum per-file write commit size when
848 * combined with those, we must decide whether to flush,
849 * go synchronous, or return error.
850 */
851 if (!(ioflag & IO_SYNC)) {
852 int needrestart = 0;
853 if (nmp->nm_wcommitsize < uio->uio_resid) {
854 /*
855 * If this request could not possibly be completed
856 * without exceeding the maximum outstanding write
857 * commit size, see if we can convert it into a
858 * synchronous write operation.
859 */
860 if (ioflag & IO_NDELAY)
861 return (EAGAIN);
862 ioflag |= IO_SYNC;
863 if (np->n_flag & NMODIFIED)
864 needrestart = 1;
865 } else if (np->n_flag & NMODIFIED) {
866 int wouldcommit = 0;
867 VI_LOCK(vp);
868 TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
869 if (bp->b_flags & B_NEEDCOMMIT)
870 wouldcommit += bp->b_bcount;
871 }
872 VI_UNLOCK(vp);
873 /*
874 * Since we're not operating synchronously and
875 * bypassing the buffer cache, we are in a commit
876 * and holding all of these buffers whether
877 * transmitted or not. If not limited, this
878 * will lead to the buffer cache deadlocking,
879 * as no one else can flush our uncommitted buffers.
880 */
881 wouldcommit += uio->uio_resid;
882 /*
883 * If we would initially exceed the maximum
884 * outstanding write commit size, flush and restart.
885 */
886 if (wouldcommit > nmp->nm_wcommitsize)
887 needrestart = 1;
888 }
889 if (needrestart) {
890 if (haverslock) {
891 nfs_rsunlock(np, td);
892 haverslock = 0;
893 }
894 goto flush_and_restart;
895 }
896 }
897
898 do {
899 nfsstats.biocache_writes++;
900 lbn = uio->uio_offset / biosize;
901 on = uio->uio_offset & (biosize-1);
902 n = min((unsigned)(biosize - on), uio->uio_resid);
903 again:
904 /*
905 * Handle direct append and file extension cases, calculate
906 * unaligned buffer size.
907 */
908
909 if (uio->uio_offset == np->n_size && n) {
910 /*
911 * Get the buffer (in its pre-append state to maintain
912 * B_CACHE if it was previously set). Resize the
913 * nfsnode after we have locked the buffer to prevent
914 * readers from reading garbage.
915 */
916 bcount = on;
917 bp = nfs_getcacheblk(vp, lbn, bcount, td);
918
919 if (bp != NULL) {
920 long save;
921
922 np->n_size = uio->uio_offset + n;
923 np->n_flag |= NMODIFIED;
924 vnode_pager_setsize(vp, np->n_size);
925
926 save = bp->b_flags & B_CACHE;
927 bcount += n;
928 allocbuf(bp, bcount);
929 bp->b_flags |= save;
930 bp->b_magic = B_MAGIC_NFS;
931 if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
932 bp->b_op = &buf_ops_nfs4;
933 else
934 bp->b_op = &buf_ops_nfs;
935 }
936 } else {
937 /*
938 * Obtain the locked cache block first, and then
939 * adjust the file's size as appropriate.
940 */
941 bcount = on + n;
942 if ((off_t)lbn * biosize + bcount < np->n_size) {
943 if ((off_t)(lbn + 1) * biosize < np->n_size)
944 bcount = biosize;
945 else
946 bcount = np->n_size - (off_t)lbn * biosize;
947 }
948 bp = nfs_getcacheblk(vp, lbn, bcount, td);
949 if (uio->uio_offset + n > np->n_size) {
950 np->n_size = uio->uio_offset + n;
951 np->n_flag |= NMODIFIED;
952 vnode_pager_setsize(vp, np->n_size);
953 }
954 }
955
956 if (!bp) {
957 error = nfs_sigintr(nmp, NULL, td);
958 if (!error)
959 error = EINTR;
960 break;
961 }
962
963 /*
964 * Issue a READ if B_CACHE is not set. In special-append
965 * mode, B_CACHE is based on the buffer prior to the write
966 * op and is typically set, avoiding the read. If a read
967 * is required in special append mode, the server will
968 * probably send us a short-read since we extended the file
969 * on our end, resulting in b_resid == 0 and, thusly,
970 * B_CACHE getting set.
971 *
972 * We can also avoid issuing the read if the write covers
973 * the entire buffer. We have to make sure the buffer state
974 * is reasonable in this case since we will not be initiating
975 * I/O. See the comments in kern/vfs_bio.c's getblk() for
976 * more information.
977 *
978 * B_CACHE may also be set due to the buffer being cached
979 * normally.
980 */
981
982 if (on == 0 && n == bcount) {
983 bp->b_flags |= B_CACHE;
984 bp->b_flags &= ~B_INVAL;
985 bp->b_ioflags &= ~BIO_ERROR;
986 }
987
988 if ((bp->b_flags & B_CACHE) == 0) {
989 bp->b_iocmd = BIO_READ;
990 vfs_busy_pages(bp, 0);
991 error = nfs_doio(bp, cred, td);
992 if (error) {
993 brelse(bp);
994 break;
995 }
996 }
997 if (bp->b_wcred == NOCRED)
998 bp->b_wcred = crhold(cred);
999 np->n_flag |= NMODIFIED;
1000
1001 /*
1002 * If dirtyend exceeds file size, chop it down. This should
1003 * not normally occur but there is an append race where it
1004 * might occur XXX, so we log it.
1005 *
1006 * If the chopping creates a reverse-indexed or degenerate
1007 * situation with dirtyoff/end, we 0 both of them.
1008 */
1009
1010 if (bp->b_dirtyend > bcount) {
1011 printf("NFS append race @%lx:%d\n",
1012 (long)bp->b_blkno * DEV_BSIZE,
1013 bp->b_dirtyend - bcount);
1014 bp->b_dirtyend = bcount;
1015 }
1016
1017 if (bp->b_dirtyoff >= bp->b_dirtyend)
1018 bp->b_dirtyoff = bp->b_dirtyend = 0;
1019
1020 /*
1021 * If the new write will leave a contiguous dirty
1022 * area, just update the b_dirtyoff and b_dirtyend,
1023 * otherwise force a write rpc of the old dirty area.
1024 *
1025 * While it is possible to merge discontiguous writes due to
1026 * our having a B_CACHE buffer ( and thus valid read data
1027 * for the hole), we don't because it could lead to
1028 * significant cache coherency problems with multiple clients,
1029 * especially if locking is implemented later on.
1030 *
1031 * as an optimization we could theoretically maintain
1032 * a linked list of discontinuous areas, but we would still
1033 * have to commit them separately so there isn't much
1034 * advantage to it except perhaps a bit of asynchronization.
1035 */
1036
1037 if (bp->b_dirtyend > 0 &&
1038 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1039 if (bwrite(bp) == EINTR) {
1040 error = EINTR;
1041 break;
1042 }
1043 goto again;
1044 }
1045
1046 error = uiomove((char *)bp->b_data + on, n, uio);
1047
1048 /*
1049 * Since this block is being modified, it must be written
1050 * again and not just committed. Since write clustering does
1051 * not work for the stage 1 data write, only the stage 2
1052 * commit rpc, we have to clear B_CLUSTEROK as well.
1053 */
1054 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1055
1056 if (error) {
1057 bp->b_ioflags |= BIO_ERROR;
1058 brelse(bp);
1059 break;
1060 }
1061
1062 /*
1063 * Only update dirtyoff/dirtyend if not a degenerate
1064 * condition.
1065 */
1066 if (n) {
1067 if (bp->b_dirtyend > 0) {
1068 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1069 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1070 } else {
1071 bp->b_dirtyoff = on;
1072 bp->b_dirtyend = on + n;
1073 }
1074 vfs_bio_set_validclean(bp, on, n);
1075 }
1076
1077 /*
1078 * If IO_SYNC do bwrite().
1079 *
1080 * IO_INVAL appears to be unused. The idea appears to be
1081 * to turn off caching in this case. Very odd. XXX
1082 */
1083 if ((ioflag & IO_SYNC)) {
1084 if (ioflag & IO_INVAL)
1085 bp->b_flags |= B_NOCACHE;
1086 error = bwrite(bp);
1087 if (error)
1088 break;
1089 } else if ((n + on) == biosize) {
1090 bp->b_flags |= B_ASYNC;
1091 (void) (nmp->nm_rpcops->nr_writebp)(bp, 0, 0);
1092 } else {
1093 bdwrite(bp);
1094 }
1095 } while (uio->uio_resid > 0 && n > 0);
1096
1097 if (haverslock)
1098 nfs_rsunlock(np, td);
1099
1100 return (error);
1101 }
1102
1103 /*
1104 * Get an nfs cache block.
1105 *
1106 * Allocate a new one if the block isn't currently in the cache
1107 * and return the block marked busy. If the calling process is
1108 * interrupted by a signal for an interruptible mount point, return
1109 * NULL.
1110 *
1111 * The caller must carefully deal with the possible B_INVAL state of
1112 * the buffer. nfs_doio() clears B_INVAL (and nfs_asyncio() clears it
1113 * indirectly), so synchronous reads can be issued without worrying about
1114 * the B_INVAL state. We have to be a little more careful when dealing
1115 * with writes (see comments in nfs_write()) when extending a file past
1116 * its EOF.
1117 */
1118 static struct buf *
1119 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1120 {
1121 struct buf *bp;
1122 struct mount *mp;
1123 struct nfsmount *nmp;
1124
1125 mp = vp->v_mount;
1126 nmp = VFSTONFS(mp);
1127
1128 if (nmp->nm_flag & NFSMNT_INT) {
1129 bp = getblk(vp, bn, size, PCATCH, 0, 0);
1130 while (bp == NULL) {
1131 if (nfs_sigintr(nmp, NULL, td))
1132 return (NULL);
1133 bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1134 }
1135 } else {
1136 bp = getblk(vp, bn, size, 0, 0, 0);
1137 }
1138
1139 if (vp->v_type == VREG) {
1140 int biosize;
1141
1142 biosize = mp->mnt_stat.f_iosize;
1143 bp->b_blkno = bn * (biosize / DEV_BSIZE);
1144 }
1145 return (bp);
1146 }
1147
1148 /*
1149 * Flush and invalidate all dirty buffers. If another process is already
1150 * doing the flush, just wait for completion.
1151 */
1152 int
1153 nfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred,
1154 struct thread *td, int intrflg)
1155 {
1156 struct nfsnode *np = VTONFS(vp);
1157 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1158 int error = 0, slpflag, slptimeo;
1159
1160 ASSERT_VOP_LOCKED(vp, "nfs_vinvalbuf");
1161
1162 /*
1163 * XXX This check stops us from needlessly doing a vinvalbuf when
1164 * being called through vclean(). It is not clear that this is
1165 * unsafe.
1166 */
1167 if (vp->v_iflag & VI_XLOCK)
1168 return (0);
1169
1170 if ((nmp->nm_flag & NFSMNT_INT) == 0)
1171 intrflg = 0;
1172 if (intrflg) {
1173 slpflag = PCATCH;
1174 slptimeo = 2 * hz;
1175 } else {
1176 slpflag = 0;
1177 slptimeo = 0;
1178 }
1179 /*
1180 * First wait for any other process doing a flush to complete.
1181 */
1182 while (np->n_flag & NFLUSHINPROG) {
1183 np->n_flag |= NFLUSHWANT;
1184 error = tsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
1185 slptimeo);
1186 if (error && intrflg &&
1187 nfs_sigintr(nmp, NULL, td))
1188 return (EINTR);
1189 }
1190
1191 /*
1192 * Now, flush as required.
1193 */
1194 np->n_flag |= NFLUSHINPROG;
1195 error = vinvalbuf(vp, flags, cred, td, slpflag, 0);
1196 while (error) {
1197 if (intrflg && (error = nfs_sigintr(nmp, NULL, td))) {
1198 np->n_flag &= ~NFLUSHINPROG;
1199 if (np->n_flag & NFLUSHWANT) {
1200 np->n_flag &= ~NFLUSHWANT;
1201 wakeup(&np->n_flag);
1202 }
1203 return (error);
1204 }
1205 error = vinvalbuf(vp, flags, cred, td, 0, slptimeo);
1206 }
1207 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
1208 if (np->n_flag & NFLUSHWANT) {
1209 np->n_flag &= ~NFLUSHWANT;
1210 wakeup(&np->n_flag);
1211 }
1212 return (0);
1213 }
1214
1215 /*
1216 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1217 * This is mainly to avoid queueing async I/O requests when the nfsiods
1218 * are all hung on a dead server.
1219 *
1220 * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1221 * is eventually dequeued by the async daemon, nfs_doio() *will*.
1222 */
1223 int
1224 nfs_asyncio(struct buf *bp, struct ucred *cred, struct thread *td)
1225 {
1226 struct nfsmount *nmp;
1227 int iod;
1228 int gotiod;
1229 int slpflag = 0;
1230 int slptimeo = 0;
1231 int error, error2;
1232
1233 nmp = VFSTONFS(bp->b_vp->v_mount);
1234
1235 /*
1236 * Commits are usually short and sweet so lets save some cpu and
1237 * leave the async daemons for more important rpc's (such as reads
1238 * and writes).
1239 */
1240 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1241 (nmp->nm_bufqiods > nfs_numasync / 2)) {
1242 return(EIO);
1243 }
1244
1245 again:
1246 if (nmp->nm_flag & NFSMNT_INT)
1247 slpflag = PCATCH;
1248 gotiod = FALSE;
1249
1250 /*
1251 * Find a free iod to process this request.
1252 */
1253 for (iod = 0; iod < nfs_numasync; iod++)
1254 if (nfs_iodwant[iod]) {
1255 gotiod = TRUE;
1256 break;
1257 }
1258
1259 /*
1260 * Try to create one if none are free.
1261 */
1262 if (!gotiod) {
1263 iod = nfs_nfsiodnew();
1264 if (iod != -1)
1265 gotiod = TRUE;
1266 }
1267
1268 if (gotiod) {
1269 /*
1270 * Found one, so wake it up and tell it which
1271 * mount to process.
1272 */
1273 NFS_DPF(ASYNCIO, ("nfs_asyncio: waking iod %d for mount %p\n",
1274 iod, nmp));
1275 nfs_iodwant[iod] = NULL;
1276 nfs_iodmount[iod] = nmp;
1277 nmp->nm_bufqiods++;
1278 wakeup(&nfs_iodwant[iod]);
1279 }
1280
1281 /*
1282 * If none are free, we may already have an iod working on this mount
1283 * point. If so, it will process our request.
1284 */
1285 if (!gotiod) {
1286 if (nmp->nm_bufqiods > 0) {
1287 NFS_DPF(ASYNCIO,
1288 ("nfs_asyncio: %d iods are already processing mount %p\n",
1289 nmp->nm_bufqiods, nmp));
1290 gotiod = TRUE;
1291 }
1292 }
1293
1294 /*
1295 * If we have an iod which can process the request, then queue
1296 * the buffer.
1297 */
1298 if (gotiod) {
1299 /*
1300 * Ensure that the queue never grows too large. We still want
1301 * to asynchronize so we block rather then return EIO.
1302 */
1303 while (nmp->nm_bufqlen >= 2*nfs_numasync) {
1304 NFS_DPF(ASYNCIO,
1305 ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp));
1306 nmp->nm_bufqwant = TRUE;
1307 error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO,
1308 "nfsaio", slptimeo);
1309 if (error) {
1310 error2 = nfs_sigintr(nmp, NULL, td);
1311 if (error2)
1312 return (error2);
1313 if (slpflag == PCATCH) {
1314 slpflag = 0;
1315 slptimeo = 2 * hz;
1316 }
1317 }
1318 /*
1319 * We might have lost our iod while sleeping,
1320 * so check and loop if nescessary.
1321 */
1322 if (nmp->nm_bufqiods == 0) {
1323 NFS_DPF(ASYNCIO,
1324 ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1325 goto again;
1326 }
1327 }
1328
1329 if (bp->b_iocmd == BIO_READ) {
1330 if (bp->b_rcred == NOCRED && cred != NOCRED)
1331 bp->b_rcred = crhold(cred);
1332 } else {
1333 bp->b_flags |= B_WRITEINPROG;
1334 if (bp->b_wcred == NOCRED && cred != NOCRED)
1335 bp->b_wcred = crhold(cred);
1336 }
1337
1338 BUF_KERNPROC(bp);
1339 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1340 nmp->nm_bufqlen++;
1341 return (0);
1342 }
1343
1344 /*
1345 * All the iods are busy on other mounts, so return EIO to
1346 * force the caller to process the i/o synchronously.
1347 */
1348 NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n"));
1349 return (EIO);
1350 }
1351
1352 /*
1353 * Do an I/O operation to/from a cache block. This may be called
1354 * synchronously or from an nfsiod.
1355 */
1356 int
1357 nfs_doio(struct buf *bp, struct ucred *cr, struct thread *td)
1358 {
1359 struct uio *uiop;
1360 struct vnode *vp;
1361 struct nfsnode *np;
1362 struct nfsmount *nmp;
1363 int error = 0, iomode, must_commit = 0;
1364 struct uio uio;
1365 struct iovec io;
1366 struct proc *p = td ? td->td_proc : NULL;
1367
1368 vp = bp->b_vp;
1369 np = VTONFS(vp);
1370 nmp = VFSTONFS(vp->v_mount);
1371 uiop = &uio;
1372 uiop->uio_iov = &io;
1373 uiop->uio_iovcnt = 1;
1374 uiop->uio_segflg = UIO_SYSSPACE;
1375 uiop->uio_td = td;
1376
1377 /*
1378 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1379 * do this here so we do not have to do it in all the code that
1380 * calls us.
1381 */
1382 bp->b_flags &= ~B_INVAL;
1383 bp->b_ioflags &= ~BIO_ERROR;
1384
1385 KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp));
1386
1387 if (bp->b_iocmd == BIO_READ) {
1388 io.iov_len = uiop->uio_resid = bp->b_bcount;
1389 io.iov_base = bp->b_data;
1390 uiop->uio_rw = UIO_READ;
1391
1392 switch (vp->v_type) {
1393 case VREG:
1394 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1395 nfsstats.read_bios++;
1396 error = (nmp->nm_rpcops->nr_readrpc)(vp, uiop, cr);
1397
1398 if (!error) {
1399 if (uiop->uio_resid) {
1400 /*
1401 * If we had a short read with no error, we must have
1402 * hit a file hole. We should zero-fill the remainder.
1403 * This can also occur if the server hits the file EOF.
1404 *
1405 * Holes used to be able to occur due to pending
1406 * writes, but that is not possible any longer.
1407 */
1408 int nread = bp->b_bcount - uiop->uio_resid;
1409 int left = uiop->uio_resid;
1410
1411 if (left > 0)
1412 bzero((char *)bp->b_data + nread, left);
1413 uiop->uio_resid = 0;
1414 }
1415 }
1416 /* ASSERT_VOP_LOCKED(vp, "nfs_doio"); */
1417 if (p && (vp->v_vflag & VV_TEXT) &&
1418 (np->n_mtime != np->n_vattr.va_mtime.tv_sec)) {
1419 PROC_LOCK(p);
1420 killproc(p, "text file modification");
1421 PROC_UNLOCK(p);
1422 }
1423 break;
1424 case VLNK:
1425 uiop->uio_offset = (off_t)0;
1426 nfsstats.readlink_bios++;
1427 error = (nmp->nm_rpcops->nr_readlinkrpc)(vp, uiop, cr);
1428 break;
1429 case VDIR:
1430 nfsstats.readdir_bios++;
1431 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1432 if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
1433 error = nfs4_readdirrpc(vp, uiop, cr);
1434 else {
1435 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1436 error = nfs_readdirplusrpc(vp, uiop, cr);
1437 if (error == NFSERR_NOTSUPP)
1438 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1439 }
1440 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1441 error = nfs_readdirrpc(vp, uiop, cr);
1442 }
1443 /*
1444 * end-of-directory sets B_INVAL but does not generate an
1445 * error.
1446 */
1447 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1448 bp->b_flags |= B_INVAL;
1449 break;
1450 default:
1451 printf("nfs_doio: type %x unexpected\n", vp->v_type);
1452 break;
1453 };
1454 if (error) {
1455 bp->b_ioflags |= BIO_ERROR;
1456 bp->b_error = error;
1457 }
1458 } else {
1459 /*
1460 * If we only need to commit, try to commit
1461 */
1462 if (bp->b_flags & B_NEEDCOMMIT) {
1463 int retv;
1464 off_t off;
1465
1466 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1467 bp->b_flags |= B_WRITEINPROG;
1468 retv = (nmp->nm_rpcops->nr_commit)(
1469 bp->b_vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1470 bp->b_wcred, td);
1471 bp->b_flags &= ~B_WRITEINPROG;
1472 if (retv == 0) {
1473 bp->b_dirtyoff = bp->b_dirtyend = 0;
1474 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1475 bp->b_resid = 0;
1476 bufdone(bp);
1477 return (0);
1478 }
1479 if (retv == NFSERR_STALEWRITEVERF) {
1480 nfs_clearcommit(bp->b_vp->v_mount);
1481 }
1482 }
1483
1484 /*
1485 * Setup for actual write
1486 */
1487
1488 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1489 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1490
1491 if (bp->b_dirtyend > bp->b_dirtyoff) {
1492 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1493 - bp->b_dirtyoff;
1494 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1495 + bp->b_dirtyoff;
1496 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1497 uiop->uio_rw = UIO_WRITE;
1498 nfsstats.write_bios++;
1499
1500 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1501 iomode = NFSV3WRITE_UNSTABLE;
1502 else
1503 iomode = NFSV3WRITE_FILESYNC;
1504
1505 bp->b_flags |= B_WRITEINPROG;
1506 error = (nmp->nm_rpcops->nr_writerpc)(vp, uiop, cr, &iomode, &must_commit);
1507
1508 /*
1509 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1510 * to cluster the buffers needing commit. This will allow
1511 * the system to submit a single commit rpc for the whole
1512 * cluster. We can do this even if the buffer is not 100%
1513 * dirty (relative to the NFS blocksize), so we optimize the
1514 * append-to-file-case.
1515 *
1516 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1517 * cleared because write clustering only works for commit
1518 * rpc's, not for the data portion of the write).
1519 */
1520
1521 if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1522 bp->b_flags |= B_NEEDCOMMIT;
1523 if (bp->b_dirtyoff == 0
1524 && bp->b_dirtyend == bp->b_bcount)
1525 bp->b_flags |= B_CLUSTEROK;
1526 } else {
1527 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1528 }
1529 bp->b_flags &= ~B_WRITEINPROG;
1530
1531 /*
1532 * For an interrupted write, the buffer is still valid
1533 * and the write hasn't been pushed to the server yet,
1534 * so we can't set BIO_ERROR and report the interruption
1535 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1536 * is not relevant, so the rpc attempt is essentially
1537 * a noop. For the case of a V3 write rpc not being
1538 * committed to stable storage, the block is still
1539 * dirty and requires either a commit rpc or another
1540 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1541 * the block is reused. This is indicated by setting
1542 * the B_DELWRI and B_NEEDCOMMIT flags.
1543 *
1544 * If the buffer is marked B_PAGING, it does not reside on
1545 * the vp's paging queues so we cannot call bdirty(). The
1546 * bp in this case is not an NFS cache block so we should
1547 * be safe. XXX
1548 */
1549 if (error == EINTR || error == EIO
1550 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1551 int s;
1552
1553 s = splbio();
1554 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1555 if ((bp->b_flags & B_PAGING) == 0) {
1556 bdirty(bp);
1557 bp->b_flags &= ~B_DONE;
1558 }
1559 if (error && (bp->b_flags & B_ASYNC) == 0)
1560 bp->b_flags |= B_EINTR;
1561 splx(s);
1562 } else {
1563 if (error) {
1564 bp->b_ioflags |= BIO_ERROR;
1565 bp->b_error = np->n_error = error;
1566 np->n_flag |= NWRITEERR;
1567 }
1568 bp->b_dirtyoff = bp->b_dirtyend = 0;
1569 }
1570 } else {
1571 bp->b_resid = 0;
1572 bufdone(bp);
1573 return (0);
1574 }
1575 }
1576 bp->b_resid = uiop->uio_resid;
1577 if (must_commit)
1578 nfs_clearcommit(vp->v_mount);
1579 bufdone(bp);
1580 return (error);
1581 }
1582
1583 /*
1584 * Used to aid in handling ftruncate() operations on the NFS client side.
1585 * Truncation creates a number of special problems for NFS. We have to
1586 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1587 * we have to properly handle VM pages or (potentially dirty) buffers
1588 * that straddle the truncation point.
1589 */
1590
1591 int
1592 nfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1593 {
1594 struct nfsnode *np = VTONFS(vp);
1595 u_quad_t tsize = np->n_size;
1596 int biosize = vp->v_mount->mnt_stat.f_iosize;
1597 int error = 0;
1598
1599 np->n_size = nsize;
1600
1601 if (np->n_size < tsize) {
1602 struct buf *bp;
1603 daddr_t lbn;
1604 int bufsize;
1605
1606 /*
1607 * vtruncbuf() doesn't get the buffer overlapping the
1608 * truncation point. We may have a B_DELWRI and/or B_CACHE
1609 * buffer that now needs to be truncated.
1610 */
1611 error = vtruncbuf(vp, cred, td, nsize, biosize);
1612 lbn = nsize / biosize;
1613 bufsize = nsize & (biosize - 1);
1614 bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1615 if (bp->b_dirtyoff > bp->b_bcount)
1616 bp->b_dirtyoff = bp->b_bcount;
1617 if (bp->b_dirtyend > bp->b_bcount)
1618 bp->b_dirtyend = bp->b_bcount;
1619 bp->b_flags |= B_RELBUF; /* don't leave garbage around */
1620 brelse(bp);
1621 } else {
1622 vnode_pager_setsize(vp, nsize);
1623 }
1624 return(error);
1625 }
1626
Cache object: f4663c03ef84e8c4908eba6136722623
|