1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/6.3/sys/nfsclient/nfs_bio.c 171478 2007-07-17 21:02:08Z jhb $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/kernel.h>
43 #include <sys/mount.h>
44 #include <sys/proc.h>
45 #include <sys/resourcevar.h>
46 #include <sys/signalvar.h>
47 #include <sys/vmmeter.h>
48 #include <sys/vnode.h>
49
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vnode_pager.h>
56
57 #include <rpc/rpcclnt.h>
58
59 #include <nfs/rpcv2.h>
60 #include <nfs/nfsproto.h>
61 #include <nfsclient/nfs.h>
62 #include <nfsclient/nfsmount.h>
63 #include <nfsclient/nfsnode.h>
64
65 #include <nfs4client/nfs4.h>
66
67 static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
68 struct thread *td);
69 static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
70 struct ucred *cred, int ioflag);
71
72 extern int nfs_directio_enable;
73 extern int nfs_directio_allow_mmap;
74 /*
75 * Vnode op for VM getpages.
76 */
77 int
78 nfs_getpages(struct vop_getpages_args *ap)
79 {
80 int i, error, nextoff, size, toff, count, npages;
81 struct uio uio;
82 struct iovec iov;
83 vm_offset_t kva;
84 struct buf *bp;
85 struct vnode *vp;
86 struct thread *td;
87 struct ucred *cred;
88 struct nfsmount *nmp;
89 vm_object_t object;
90 vm_page_t *pages;
91 struct nfsnode *np;
92
93 GIANT_REQUIRED;
94
95 vp = ap->a_vp;
96 np = VTONFS(vp);
97 td = curthread; /* XXX */
98 cred = curthread->td_ucred; /* XXX */
99 nmp = VFSTONFS(vp->v_mount);
100 pages = ap->a_m;
101 count = ap->a_count;
102
103 if ((object = vp->v_object) == NULL) {
104 printf("nfs_getpages: called with non-merged cache vnode??\n");
105 return VM_PAGER_ERROR;
106 }
107
108 if (nfs_directio_enable && !nfs_directio_allow_mmap &&
109 (np->n_flag & NNONCACHE) &&
110 (vp->v_type == VREG)) {
111 printf("nfs_getpages: called on non-cacheable vnode??\n");
112 return VM_PAGER_ERROR;
113 }
114
115 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
116 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
117 /* We'll never get here for v4, because we always have fsinfo */
118 (void)nfs_fsinfo(nmp, vp, cred, td);
119 }
120
121 npages = btoc(count);
122
123 /*
124 * If the requested page is partially valid, just return it and
125 * allow the pager to zero-out the blanks. Partially valid pages
126 * can only occur at the file EOF.
127 */
128
129 {
130 vm_page_t m = pages[ap->a_reqpage];
131
132 VM_OBJECT_LOCK(object);
133 vm_page_lock_queues();
134 if (m->valid != 0) {
135 /* handled by vm_fault now */
136 /* vm_page_zero_invalid(m, TRUE); */
137 for (i = 0; i < npages; ++i) {
138 if (i != ap->a_reqpage)
139 vm_page_free(pages[i]);
140 }
141 vm_page_unlock_queues();
142 VM_OBJECT_UNLOCK(object);
143 return(0);
144 }
145 vm_page_unlock_queues();
146 VM_OBJECT_UNLOCK(object);
147 }
148
149 /*
150 * We use only the kva address for the buffer, but this is extremely
151 * convienient and fast.
152 */
153 bp = getpbuf(&nfs_pbuf_freecnt);
154
155 kva = (vm_offset_t) bp->b_data;
156 pmap_qenter(kva, pages, npages);
157 cnt.v_vnodein++;
158 cnt.v_vnodepgsin += npages;
159
160 iov.iov_base = (caddr_t) kva;
161 iov.iov_len = count;
162 uio.uio_iov = &iov;
163 uio.uio_iovcnt = 1;
164 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
165 uio.uio_resid = count;
166 uio.uio_segflg = UIO_SYSSPACE;
167 uio.uio_rw = UIO_READ;
168 uio.uio_td = td;
169
170 error = (nmp->nm_rpcops->nr_readrpc)(vp, &uio, cred);
171 pmap_qremove(kva, npages);
172
173 relpbuf(bp, &nfs_pbuf_freecnt);
174
175 if (error && (uio.uio_resid == count)) {
176 printf("nfs_getpages: error %d\n", error);
177 VM_OBJECT_LOCK(object);
178 vm_page_lock_queues();
179 for (i = 0; i < npages; ++i) {
180 if (i != ap->a_reqpage)
181 vm_page_free(pages[i]);
182 }
183 vm_page_unlock_queues();
184 VM_OBJECT_UNLOCK(object);
185 return VM_PAGER_ERROR;
186 }
187
188 /*
189 * Calculate the number of bytes read and validate only that number
190 * of bytes. Note that due to pending writes, size may be 0. This
191 * does not mean that the remaining data is invalid!
192 */
193
194 size = count - uio.uio_resid;
195 VM_OBJECT_LOCK(object);
196 vm_page_lock_queues();
197 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
198 vm_page_t m;
199 nextoff = toff + PAGE_SIZE;
200 m = pages[i];
201
202 if (nextoff <= size) {
203 /*
204 * Read operation filled an entire page
205 */
206 m->valid = VM_PAGE_BITS_ALL;
207 vm_page_undirty(m);
208 } else if (size > toff) {
209 /*
210 * Read operation filled a partial page.
211 */
212 m->valid = 0;
213 vm_page_set_validclean(m, 0, size - toff);
214 /* handled by vm_fault now */
215 /* vm_page_zero_invalid(m, TRUE); */
216 } else {
217 /*
218 * Read operation was short. If no error occured
219 * we may have hit a zero-fill section. We simply
220 * leave valid set to 0.
221 */
222 ;
223 }
224 if (i != ap->a_reqpage) {
225 /*
226 * Whether or not to leave the page activated is up in
227 * the air, but we should put the page on a page queue
228 * somewhere (it already is in the object). Result:
229 * It appears that emperical results show that
230 * deactivating pages is best.
231 */
232
233 /*
234 * Just in case someone was asking for this page we
235 * now tell them that it is ok to use.
236 */
237 if (!error) {
238 if (m->flags & PG_WANTED)
239 vm_page_activate(m);
240 else
241 vm_page_deactivate(m);
242 vm_page_wakeup(m);
243 } else {
244 vm_page_free(m);
245 }
246 }
247 }
248 vm_page_unlock_queues();
249 VM_OBJECT_UNLOCK(object);
250 return 0;
251 }
252
253 /*
254 * Vnode op for VM putpages.
255 */
256 int
257 nfs_putpages(struct vop_putpages_args *ap)
258 {
259 struct uio uio;
260 struct iovec iov;
261 vm_offset_t kva;
262 struct buf *bp;
263 int iomode, must_commit, i, error, npages, count;
264 off_t offset;
265 int *rtvals;
266 struct vnode *vp;
267 struct thread *td;
268 struct ucred *cred;
269 struct nfsmount *nmp;
270 struct nfsnode *np;
271 vm_page_t *pages;
272
273 GIANT_REQUIRED;
274
275 vp = ap->a_vp;
276 np = VTONFS(vp);
277 td = curthread; /* XXX */
278 cred = curthread->td_ucred; /* XXX */
279 nmp = VFSTONFS(vp->v_mount);
280 pages = ap->a_m;
281 count = ap->a_count;
282 rtvals = ap->a_rtvals;
283 npages = btoc(count);
284 offset = IDX_TO_OFF(pages[0]->pindex);
285
286 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
287 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
288 (void)nfs_fsinfo(nmp, vp, cred, td);
289 }
290
291 if (nfs_directio_enable && !nfs_directio_allow_mmap &&
292 (np->n_flag & NNONCACHE) && (vp->v_type == VREG))
293 printf("nfs_putpages: called on noncache-able vnode??\n");
294
295 for (i = 0; i < npages; i++)
296 rtvals[i] = VM_PAGER_AGAIN;
297
298 /*
299 * When putting pages, do not extend file past EOF.
300 */
301
302 if (offset + count > np->n_size) {
303 count = np->n_size - offset;
304 if (count < 0)
305 count = 0;
306 }
307
308 /*
309 * We use only the kva address for the buffer, but this is extremely
310 * convienient and fast.
311 */
312 bp = getpbuf(&nfs_pbuf_freecnt);
313
314 kva = (vm_offset_t) bp->b_data;
315 pmap_qenter(kva, pages, npages);
316 cnt.v_vnodeout++;
317 cnt.v_vnodepgsout += count;
318
319 iov.iov_base = (caddr_t) kva;
320 iov.iov_len = count;
321 uio.uio_iov = &iov;
322 uio.uio_iovcnt = 1;
323 uio.uio_offset = offset;
324 uio.uio_resid = count;
325 uio.uio_segflg = UIO_SYSSPACE;
326 uio.uio_rw = UIO_WRITE;
327 uio.uio_td = td;
328
329 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
330 iomode = NFSV3WRITE_UNSTABLE;
331 else
332 iomode = NFSV3WRITE_FILESYNC;
333
334 error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, &iomode, &must_commit);
335
336 pmap_qremove(kva, npages);
337 relpbuf(bp, &nfs_pbuf_freecnt);
338
339 if (!error) {
340 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
341 for (i = 0; i < nwritten; i++) {
342 rtvals[i] = VM_PAGER_OK;
343 vm_page_undirty(pages[i]);
344 }
345 if (must_commit) {
346 nfs_clearcommit(vp->v_mount);
347 }
348 }
349 return rtvals[0];
350 }
351
352 /*
353 * Vnode op for read using bio
354 */
355 int
356 nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
357 {
358 struct nfsnode *np = VTONFS(vp);
359 int biosize, i;
360 struct buf *bp, *rabp;
361 struct vattr vattr;
362 struct thread *td;
363 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
364 daddr_t lbn, rabn;
365 int bcount;
366 int seqcount;
367 int nra, error = 0, n = 0, on = 0;
368
369 #ifdef DIAGNOSTIC
370 if (uio->uio_rw != UIO_READ)
371 panic("nfs_read mode");
372 #endif
373 if (uio->uio_resid == 0)
374 return (0);
375 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
376 return (EINVAL);
377 td = uio->uio_td;
378
379 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
380 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
381 (void)nfs_fsinfo(nmp, vp, cred, td);
382 if (vp->v_type != VDIR &&
383 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
384 return (EFBIG);
385
386 if (nfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
387 /* No caching/ no readaheads. Just read data into the user buffer */
388 return nfs_readrpc(vp, uio, cred);
389
390 biosize = vp->v_mount->mnt_stat.f_iosize;
391 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
392 /*
393 * For nfs, cache consistency can only be maintained approximately.
394 * Although RFC1094 does not specify the criteria, the following is
395 * believed to be compatible with the reference port.
396 * For nfs:
397 * If the file's modify time on the server has changed since the
398 * last read rpc or you have written to the file,
399 * you may have lost data cache consistency with the
400 * server, so flush all of the file's data out of the cache.
401 * Then force a getattr rpc to ensure that you have up to date
402 * attributes.
403 * NB: This implies that cache data can be read when up to
404 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
405 * attributes this could be forced by setting n_attrstamp to 0 before
406 * the VOP_GETATTR() call.
407 */
408 if (np->n_flag & NMODIFIED) {
409 if (vp->v_type != VREG) {
410 if (vp->v_type != VDIR)
411 panic("nfs: bioread, not dir");
412 (nmp->nm_rpcops->nr_invaldir)(vp);
413 error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
414 if (error)
415 return (error);
416 }
417 np->n_attrstamp = 0;
418 error = VOP_GETATTR(vp, &vattr, cred, td);
419 if (error)
420 return (error);
421 np->n_mtime = vattr.va_mtime;
422 } else {
423 error = VOP_GETATTR(vp, &vattr, cred, td);
424 if (error)
425 return (error);
426 if ((np->n_flag & NSIZECHANGED)
427 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
428 if (vp->v_type == VDIR)
429 (nmp->nm_rpcops->nr_invaldir)(vp);
430 error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
431 if (error)
432 return (error);
433 np->n_mtime = vattr.va_mtime;
434 np->n_flag &= ~NSIZECHANGED;
435 }
436 }
437 do {
438 switch (vp->v_type) {
439 case VREG:
440 nfsstats.biocache_reads++;
441 lbn = uio->uio_offset / biosize;
442 on = uio->uio_offset & (biosize - 1);
443
444 /*
445 * Start the read ahead(s), as required.
446 * The readahead is kicked off only if sequential access
447 * is detected, based on the readahead hint (ra_expect_lbn).
448 */
449 if (nmp->nm_readahead > 0 && np->ra_expect_lbn == lbn) {
450 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
451 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
452 rabn = lbn + 1 + nra;
453 if (incore(&vp->v_bufobj, rabn) == NULL) {
454 rabp = nfs_getcacheblk(vp, rabn, biosize, td);
455 if (!rabp) {
456 error = nfs_sigintr(nmp, NULL, td);
457 return (error ? error : EINTR);
458 }
459 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
460 rabp->b_flags |= B_ASYNC;
461 rabp->b_iocmd = BIO_READ;
462 vfs_busy_pages(rabp, 0);
463 if (nfs_asyncio(nmp, rabp, cred, td)) {
464 rabp->b_flags |= B_INVAL;
465 rabp->b_ioflags |= BIO_ERROR;
466 vfs_unbusy_pages(rabp);
467 brelse(rabp);
468 break;
469 }
470 } else {
471 brelse(rabp);
472 }
473 }
474 }
475 np->ra_expect_lbn = lbn + 1;
476 }
477
478 /*
479 * Obtain the buffer cache block. Figure out the buffer size
480 * when we are at EOF. If we are modifying the size of the
481 * buffer based on an EOF condition we need to hold
482 * nfs_rslock() through obtaining the buffer to prevent
483 * a potential writer-appender from messing with n_size.
484 * Otherwise we may accidently truncate the buffer and
485 * lose dirty data.
486 *
487 * Note that bcount is *not* DEV_BSIZE aligned.
488 */
489
490 again:
491 bcount = biosize;
492 if ((off_t)lbn * biosize >= np->n_size) {
493 bcount = 0;
494 } else if ((off_t)(lbn + 1) * biosize > np->n_size) {
495 bcount = np->n_size - (off_t)lbn * biosize;
496 }
497 if (bcount != biosize) {
498 switch(nfs_rslock(np, td)) {
499 case ENOLCK:
500 goto again;
501 /* not reached */
502 case EIO:
503 return (EIO);
504 case EINTR:
505 case ERESTART:
506 return(EINTR);
507 /* not reached */
508 default:
509 break;
510 }
511 }
512
513 bp = nfs_getcacheblk(vp, lbn, bcount, td);
514
515 if (bcount != biosize)
516 nfs_rsunlock(np, td);
517 if (!bp) {
518 error = nfs_sigintr(nmp, NULL, td);
519 return (error ? error : EINTR);
520 }
521
522 /*
523 * If B_CACHE is not set, we must issue the read. If this
524 * fails, we return an error.
525 */
526
527 if ((bp->b_flags & B_CACHE) == 0) {
528 bp->b_iocmd = BIO_READ;
529 vfs_busy_pages(bp, 0);
530 error = nfs_doio(vp, bp, cred, td);
531 if (error) {
532 brelse(bp);
533 return (error);
534 }
535 }
536
537 /*
538 * on is the offset into the current bp. Figure out how many
539 * bytes we can copy out of the bp. Note that bcount is
540 * NOT DEV_BSIZE aligned.
541 *
542 * Then figure out how many bytes we can copy into the uio.
543 */
544
545 n = 0;
546 if (on < bcount)
547 n = min((unsigned)(bcount - on), uio->uio_resid);
548 break;
549 case VLNK:
550 nfsstats.biocache_readlinks++;
551 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
552 if (!bp) {
553 error = nfs_sigintr(nmp, NULL, td);
554 return (error ? error : EINTR);
555 }
556 if ((bp->b_flags & B_CACHE) == 0) {
557 bp->b_iocmd = BIO_READ;
558 vfs_busy_pages(bp, 0);
559 error = nfs_doio(vp, bp, cred, td);
560 if (error) {
561 bp->b_ioflags |= BIO_ERROR;
562 brelse(bp);
563 return (error);
564 }
565 }
566 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
567 on = 0;
568 break;
569 case VDIR:
570 nfsstats.biocache_readdirs++;
571 if (np->n_direofoffset
572 && uio->uio_offset >= np->n_direofoffset) {
573 return (0);
574 }
575 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
576 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
577 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
578 if (!bp) {
579 error = nfs_sigintr(nmp, NULL, td);
580 return (error ? error : EINTR);
581 }
582 if ((bp->b_flags & B_CACHE) == 0) {
583 bp->b_iocmd = BIO_READ;
584 vfs_busy_pages(bp, 0);
585 error = nfs_doio(vp, bp, cred, td);
586 if (error) {
587 brelse(bp);
588 }
589 while (error == NFSERR_BAD_COOKIE) {
590 (nmp->nm_rpcops->nr_invaldir)(vp);
591 error = nfs_vinvalbuf(vp, 0, td, 1);
592 /*
593 * Yuck! The directory has been modified on the
594 * server. The only way to get the block is by
595 * reading from the beginning to get all the
596 * offset cookies.
597 *
598 * Leave the last bp intact unless there is an error.
599 * Loop back up to the while if the error is another
600 * NFSERR_BAD_COOKIE (double yuch!).
601 */
602 for (i = 0; i <= lbn && !error; i++) {
603 if (np->n_direofoffset
604 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
605 return (0);
606 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
607 if (!bp) {
608 error = nfs_sigintr(nmp, NULL, td);
609 return (error ? error : EINTR);
610 }
611 if ((bp->b_flags & B_CACHE) == 0) {
612 bp->b_iocmd = BIO_READ;
613 vfs_busy_pages(bp, 0);
614 error = nfs_doio(vp, bp, cred, td);
615 /*
616 * no error + B_INVAL == directory EOF,
617 * use the block.
618 */
619 if (error == 0 && (bp->b_flags & B_INVAL))
620 break;
621 }
622 /*
623 * An error will throw away the block and the
624 * for loop will break out. If no error and this
625 * is not the block we want, we throw away the
626 * block and go for the next one via the for loop.
627 */
628 if (error || i < lbn)
629 brelse(bp);
630 }
631 }
632 /*
633 * The above while is repeated if we hit another cookie
634 * error. If we hit an error and it wasn't a cookie error,
635 * we give up.
636 */
637 if (error)
638 return (error);
639 }
640
641 /*
642 * If not eof and read aheads are enabled, start one.
643 * (You need the current block first, so that you have the
644 * directory offset cookie of the next block.)
645 */
646 if (nmp->nm_readahead > 0 &&
647 (bp->b_flags & B_INVAL) == 0 &&
648 (np->n_direofoffset == 0 ||
649 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
650 incore(&vp->v_bufobj, lbn + 1) == NULL) {
651 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
652 if (rabp) {
653 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
654 rabp->b_flags |= B_ASYNC;
655 rabp->b_iocmd = BIO_READ;
656 vfs_busy_pages(rabp, 0);
657 if (nfs_asyncio(nmp, rabp, cred, td)) {
658 rabp->b_flags |= B_INVAL;
659 rabp->b_ioflags |= BIO_ERROR;
660 vfs_unbusy_pages(rabp);
661 brelse(rabp);
662 }
663 } else {
664 brelse(rabp);
665 }
666 }
667 }
668 /*
669 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
670 * chopped for the EOF condition, we cannot tell how large
671 * NFS directories are going to be until we hit EOF. So
672 * an NFS directory buffer is *not* chopped to its EOF. Now,
673 * it just so happens that b_resid will effectively chop it
674 * to EOF. *BUT* this information is lost if the buffer goes
675 * away and is reconstituted into a B_CACHE state ( due to
676 * being VMIO ) later. So we keep track of the directory eof
677 * in np->n_direofoffset and chop it off as an extra step
678 * right here.
679 */
680 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
681 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
682 n = np->n_direofoffset - uio->uio_offset;
683 break;
684 default:
685 printf(" nfs_bioread: type %x unexpected\n", vp->v_type);
686 bp = NULL;
687 break;
688 };
689
690 if (n > 0) {
691 error = uiomove(bp->b_data + on, (int)n, uio);
692 }
693 if (vp->v_type == VLNK)
694 n = 0;
695 if (bp != NULL)
696 brelse(bp);
697 } while (error == 0 && uio->uio_resid > 0 && n > 0);
698 return (error);
699 }
700
701 /*
702 * The NFS write path cannot handle iovecs with len > 1. So we need to
703 * break up iovecs accordingly (restricting them to wsize).
704 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
705 * For the ASYNC case, 2 copies are needed. The first a copy from the
706 * user buffer to a staging buffer and then a second copy from the staging
707 * buffer to mbufs. This can be optimized by copying from the user buffer
708 * directly into mbufs and passing the chain down, but that requires a
709 * fair amount of re-working of the relevant codepaths (and can be done
710 * later).
711 */
712 static int
713 nfs_directio_write(vp, uiop, cred, ioflag)
714 struct vnode *vp;
715 struct uio *uiop;
716 struct ucred *cred;
717 int ioflag;
718 {
719 int error;
720 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
721 struct thread *td = uiop->uio_td;
722 int size;
723
724 if (ioflag & IO_SYNC) {
725 int iomode, must_commit;
726 struct uio uio;
727 struct iovec iov;
728 do_sync:
729 while (uiop->uio_resid > 0) {
730 size = min(uiop->uio_resid, nmp->nm_wsize);
731 size = min(uiop->uio_iov->iov_len, size);
732 iov.iov_base = uiop->uio_iov->iov_base;
733 iov.iov_len = size;
734 uio.uio_iov = &iov;
735 uio.uio_iovcnt = 1;
736 uio.uio_offset = uiop->uio_offset;
737 uio.uio_resid = size;
738 uio.uio_segflg = UIO_USERSPACE;
739 uio.uio_rw = UIO_WRITE;
740 uio.uio_td = td;
741 iomode = NFSV3WRITE_FILESYNC;
742 error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred,
743 &iomode, &must_commit);
744 KASSERT((must_commit == 0),
745 ("nfs_directio_write: Did not commit write"));
746 if (error)
747 return (error);
748 uiop->uio_offset += size;
749 uiop->uio_resid -= size;
750 if (uiop->uio_iov->iov_len <= size) {
751 uiop->uio_iovcnt--;
752 uiop->uio_iov++;
753 } else {
754 uiop->uio_iov->iov_base =
755 (char *)uiop->uio_iov->iov_base + size;
756 uiop->uio_iov->iov_len -= size;
757 }
758 }
759 } else {
760 struct uio *t_uio;
761 struct iovec *t_iov;
762 struct buf *bp;
763
764 /*
765 * Break up the write into blocksize chunks and hand these
766 * over to nfsiod's for write back.
767 * Unfortunately, this incurs a copy of the data. Since
768 * the user could modify the buffer before the write is
769 * initiated.
770 *
771 * The obvious optimization here is that one of the 2 copies
772 * in the async write path can be eliminated by copying the
773 * data here directly into mbufs and passing the mbuf chain
774 * down. But that will require a fair amount of re-working
775 * of the code and can be done if there's enough interest
776 * in NFS directio access.
777 */
778 while (uiop->uio_resid > 0) {
779 size = min(uiop->uio_resid, nmp->nm_wsize);
780 size = min(uiop->uio_iov->iov_len, size);
781 bp = getpbuf(&nfs_pbuf_freecnt);
782 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
783 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
784 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
785 t_iov->iov_len = size;
786 t_uio->uio_iov = t_iov;
787 t_uio->uio_iovcnt = 1;
788 t_uio->uio_offset = uiop->uio_offset;
789 t_uio->uio_resid = size;
790 t_uio->uio_segflg = UIO_SYSSPACE;
791 t_uio->uio_rw = UIO_WRITE;
792 t_uio->uio_td = td;
793 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
794 bp->b_flags |= B_DIRECT;
795 bp->b_iocmd = BIO_WRITE;
796 if (cred != NOCRED) {
797 crhold(cred);
798 bp->b_wcred = cred;
799 } else
800 bp->b_wcred = NOCRED;
801 bp->b_caller1 = (void *)t_uio;
802 bp->b_vp = vp;
803 error = nfs_asyncio(nmp, bp, NOCRED, td);
804 if (error) {
805 free(t_iov->iov_base, M_NFSDIRECTIO);
806 free(t_iov, M_NFSDIRECTIO);
807 free(t_uio, M_NFSDIRECTIO);
808 bp->b_vp = NULL;
809 relpbuf(bp, &nfs_pbuf_freecnt);
810 if (error == EINTR)
811 return (error);
812 goto do_sync;
813 }
814 uiop->uio_offset += size;
815 uiop->uio_resid -= size;
816 if (uiop->uio_iov->iov_len <= size) {
817 uiop->uio_iovcnt--;
818 uiop->uio_iov++;
819 } else {
820 uiop->uio_iov->iov_base =
821 (char *)uiop->uio_iov->iov_base + size;
822 uiop->uio_iov->iov_len -= size;
823 }
824 }
825 }
826 return (0);
827 }
828
829 /*
830 * Vnode op for write using bio
831 */
832 int
833 nfs_write(struct vop_write_args *ap)
834 {
835 int biosize;
836 struct uio *uio = ap->a_uio;
837 struct thread *td = uio->uio_td;
838 struct vnode *vp = ap->a_vp;
839 struct nfsnode *np = VTONFS(vp);
840 struct ucred *cred = ap->a_cred;
841 int ioflag = ap->a_ioflag;
842 struct buf *bp;
843 struct vattr vattr;
844 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
845 daddr_t lbn;
846 int bcount;
847 int n, on, error = 0;
848 int haverslock = 0;
849 struct proc *p = td?td->td_proc:NULL;
850
851 GIANT_REQUIRED;
852
853 #ifdef DIAGNOSTIC
854 if (uio->uio_rw != UIO_WRITE)
855 panic("nfs_write mode");
856 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
857 panic("nfs_write proc");
858 #endif
859 if (vp->v_type != VREG)
860 return (EIO);
861 if (np->n_flag & NWRITEERR) {
862 np->n_flag &= ~NWRITEERR;
863 return (np->n_error);
864 }
865 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
866 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
867 (void)nfs_fsinfo(nmp, vp, cred, td);
868
869 /*
870 * Synchronously flush pending buffers if we are in synchronous
871 * mode or if we are appending.
872 */
873 if (ioflag & (IO_APPEND | IO_SYNC)) {
874 if (np->n_flag & NMODIFIED) {
875 #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
876 /*
877 * Require non-blocking, synchronous writes to
878 * dirty files to inform the program it needs
879 * to fsync(2) explicitly.
880 */
881 if (ioflag & IO_NDELAY)
882 return (EAGAIN);
883 #endif
884 flush_and_restart:
885 np->n_attrstamp = 0;
886 error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
887 if (error)
888 return (error);
889 }
890 }
891
892 /*
893 * If IO_APPEND then load uio_offset. We restart here if we cannot
894 * get the append lock.
895 */
896 restart:
897 if (ioflag & IO_APPEND) {
898 np->n_attrstamp = 0;
899 error = VOP_GETATTR(vp, &vattr, cred, td);
900 if (error)
901 return (error);
902 uio->uio_offset = np->n_size;
903 }
904
905 if (uio->uio_offset < 0)
906 return (EINVAL);
907 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
908 return (EFBIG);
909 if (uio->uio_resid == 0)
910 return (0);
911
912 if (nfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
913 return nfs_directio_write(vp, uio, cred, ioflag);
914
915 /*
916 * We need to obtain the rslock if we intend to modify np->n_size
917 * in order to guarentee the append point with multiple contending
918 * writers, to guarentee that no other appenders modify n_size
919 * while we are trying to obtain a truncated buffer (i.e. to avoid
920 * accidently truncating data written by another appender due to
921 * the race), and to ensure that the buffer is populated prior to
922 * our extending of the file. We hold rslock through the entire
923 * operation.
924 *
925 * Note that we do not synchronize the case where someone truncates
926 * the file while we are appending to it because attempting to lock
927 * this case may deadlock other parts of the system unexpectedly.
928 */
929 if ((ioflag & IO_APPEND) ||
930 uio->uio_offset + uio->uio_resid > np->n_size) {
931 switch(nfs_rslock(np, td)) {
932 case ENOLCK:
933 goto restart;
934 /* not reached */
935 case EIO:
936 return (EIO);
937 case EINTR:
938 case ERESTART:
939 return(EINTR);
940 /* not reached */
941 default:
942 break;
943 }
944 haverslock = 1;
945 }
946
947 /*
948 * Maybe this should be above the vnode op call, but so long as
949 * file servers have no limits, i don't think it matters
950 */
951 if (p != NULL) {
952 PROC_LOCK(p);
953 if (uio->uio_offset + uio->uio_resid >
954 lim_cur(p, RLIMIT_FSIZE)) {
955 psignal(p, SIGXFSZ);
956 PROC_UNLOCK(p);
957 if (haverslock)
958 nfs_rsunlock(np, td);
959 return (EFBIG);
960 }
961 PROC_UNLOCK(p);
962 }
963
964 biosize = vp->v_mount->mnt_stat.f_iosize;
965 /*
966 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
967 * would exceed the local maximum per-file write commit size when
968 * combined with those, we must decide whether to flush,
969 * go synchronous, or return error. We don't bother checking
970 * IO_UNIT -- we just make all writes atomic anyway, as there's
971 * no point optimizing for something that really won't ever happen.
972 */
973 if (!(ioflag & IO_SYNC)) {
974 int needrestart = 0;
975 if (nmp->nm_wcommitsize < uio->uio_resid) {
976 /*
977 * If this request could not possibly be completed
978 * without exceeding the maximum outstanding write
979 * commit size, see if we can convert it into a
980 * synchronous write operation.
981 */
982 if (ioflag & IO_NDELAY)
983 return (EAGAIN);
984 ioflag |= IO_SYNC;
985 if (np->n_flag & NMODIFIED)
986 needrestart = 1;
987 } else if (np->n_flag & NMODIFIED) {
988 int wouldcommit = 0;
989 BO_LOCK(&vp->v_bufobj);
990 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
991 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
992 b_bobufs) {
993 if (bp->b_flags & B_NEEDCOMMIT)
994 wouldcommit += bp->b_bcount;
995 }
996 }
997 BO_UNLOCK(&vp->v_bufobj);
998 /*
999 * Since we're not operating synchronously and
1000 * bypassing the buffer cache, we are in a commit
1001 * and holding all of these buffers whether
1002 * transmitted or not. If not limited, this
1003 * will lead to the buffer cache deadlocking,
1004 * as no one else can flush our uncommitted buffers.
1005 */
1006 wouldcommit += uio->uio_resid;
1007 /*
1008 * If we would initially exceed the maximum
1009 * outstanding write commit size, flush and restart.
1010 */
1011 if (wouldcommit > nmp->nm_wcommitsize)
1012 needrestart = 1;
1013 }
1014 if (needrestart) {
1015 if (haverslock) {
1016 nfs_rsunlock(np, td);
1017 haverslock = 0;
1018 }
1019 goto flush_and_restart;
1020 }
1021 }
1022
1023 do {
1024 nfsstats.biocache_writes++;
1025 lbn = uio->uio_offset / biosize;
1026 on = uio->uio_offset & (biosize-1);
1027 n = min((unsigned)(biosize - on), uio->uio_resid);
1028 again:
1029 /*
1030 * Handle direct append and file extension cases, calculate
1031 * unaligned buffer size.
1032 */
1033
1034 if (uio->uio_offset == np->n_size && n) {
1035 /*
1036 * Get the buffer (in its pre-append state to maintain
1037 * B_CACHE if it was previously set). Resize the
1038 * nfsnode after we have locked the buffer to prevent
1039 * readers from reading garbage.
1040 */
1041 bcount = on;
1042 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1043
1044 if (bp != NULL) {
1045 long save;
1046
1047 np->n_size = uio->uio_offset + n;
1048 np->n_flag |= NMODIFIED;
1049 vnode_pager_setsize(vp, np->n_size);
1050
1051 save = bp->b_flags & B_CACHE;
1052 bcount += n;
1053 allocbuf(bp, bcount);
1054 bp->b_flags |= save;
1055 }
1056 } else {
1057 /*
1058 * Obtain the locked cache block first, and then
1059 * adjust the file's size as appropriate.
1060 */
1061 bcount = on + n;
1062 if ((off_t)lbn * biosize + bcount < np->n_size) {
1063 if ((off_t)(lbn + 1) * biosize < np->n_size)
1064 bcount = biosize;
1065 else
1066 bcount = np->n_size - (off_t)lbn * biosize;
1067 }
1068 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1069 if (uio->uio_offset + n > np->n_size) {
1070 np->n_size = uio->uio_offset + n;
1071 np->n_flag |= NMODIFIED;
1072 vnode_pager_setsize(vp, np->n_size);
1073 }
1074 }
1075
1076 if (!bp) {
1077 error = nfs_sigintr(nmp, NULL, td);
1078 if (!error)
1079 error = EINTR;
1080 break;
1081 }
1082
1083 /*
1084 * Issue a READ if B_CACHE is not set. In special-append
1085 * mode, B_CACHE is based on the buffer prior to the write
1086 * op and is typically set, avoiding the read. If a read
1087 * is required in special append mode, the server will
1088 * probably send us a short-read since we extended the file
1089 * on our end, resulting in b_resid == 0 and, thusly,
1090 * B_CACHE getting set.
1091 *
1092 * We can also avoid issuing the read if the write covers
1093 * the entire buffer. We have to make sure the buffer state
1094 * is reasonable in this case since we will not be initiating
1095 * I/O. See the comments in kern/vfs_bio.c's getblk() for
1096 * more information.
1097 *
1098 * B_CACHE may also be set due to the buffer being cached
1099 * normally.
1100 */
1101
1102 if (on == 0 && n == bcount) {
1103 bp->b_flags |= B_CACHE;
1104 bp->b_flags &= ~B_INVAL;
1105 bp->b_ioflags &= ~BIO_ERROR;
1106 }
1107
1108 if ((bp->b_flags & B_CACHE) == 0) {
1109 bp->b_iocmd = BIO_READ;
1110 vfs_busy_pages(bp, 0);
1111 error = nfs_doio(vp, bp, cred, td);
1112 if (error) {
1113 brelse(bp);
1114 break;
1115 }
1116 }
1117 if (bp->b_wcred == NOCRED)
1118 bp->b_wcred = crhold(cred);
1119 np->n_flag |= NMODIFIED;
1120
1121 /*
1122 * If dirtyend exceeds file size, chop it down. This should
1123 * not normally occur but there is an append race where it
1124 * might occur XXX, so we log it.
1125 *
1126 * If the chopping creates a reverse-indexed or degenerate
1127 * situation with dirtyoff/end, we 0 both of them.
1128 */
1129
1130 if (bp->b_dirtyend > bcount) {
1131 printf("NFS append race @%lx:%d\n",
1132 (long)bp->b_blkno * DEV_BSIZE,
1133 bp->b_dirtyend - bcount);
1134 bp->b_dirtyend = bcount;
1135 }
1136
1137 if (bp->b_dirtyoff >= bp->b_dirtyend)
1138 bp->b_dirtyoff = bp->b_dirtyend = 0;
1139
1140 /*
1141 * If the new write will leave a contiguous dirty
1142 * area, just update the b_dirtyoff and b_dirtyend,
1143 * otherwise force a write rpc of the old dirty area.
1144 *
1145 * While it is possible to merge discontiguous writes due to
1146 * our having a B_CACHE buffer ( and thus valid read data
1147 * for the hole), we don't because it could lead to
1148 * significant cache coherency problems with multiple clients,
1149 * especially if locking is implemented later on.
1150 *
1151 * as an optimization we could theoretically maintain
1152 * a linked list of discontinuous areas, but we would still
1153 * have to commit them separately so there isn't much
1154 * advantage to it except perhaps a bit of asynchronization.
1155 */
1156
1157 if (bp->b_dirtyend > 0 &&
1158 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1159 if (bwrite(bp) == EINTR) {
1160 error = EINTR;
1161 break;
1162 }
1163 goto again;
1164 }
1165
1166 error = uiomove((char *)bp->b_data + on, n, uio);
1167
1168 /*
1169 * Since this block is being modified, it must be written
1170 * again and not just committed. Since write clustering does
1171 * not work for the stage 1 data write, only the stage 2
1172 * commit rpc, we have to clear B_CLUSTEROK as well.
1173 */
1174 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1175
1176 if (error) {
1177 bp->b_ioflags |= BIO_ERROR;
1178 brelse(bp);
1179 break;
1180 }
1181
1182 /*
1183 * Only update dirtyoff/dirtyend if not a degenerate
1184 * condition.
1185 */
1186 if (n) {
1187 if (bp->b_dirtyend > 0) {
1188 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1189 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1190 } else {
1191 bp->b_dirtyoff = on;
1192 bp->b_dirtyend = on + n;
1193 }
1194 vfs_bio_set_validclean(bp, on, n);
1195 }
1196
1197 /*
1198 * If IO_SYNC do bwrite().
1199 *
1200 * IO_INVAL appears to be unused. The idea appears to be
1201 * to turn off caching in this case. Very odd. XXX
1202 */
1203 if ((ioflag & IO_SYNC)) {
1204 if (ioflag & IO_INVAL)
1205 bp->b_flags |= B_NOCACHE;
1206 error = bwrite(bp);
1207 if (error)
1208 break;
1209 } else if ((n + on) == biosize) {
1210 bp->b_flags |= B_ASYNC;
1211 (void) (nmp->nm_rpcops->nr_writebp)(bp, 0, 0);
1212 } else {
1213 bdwrite(bp);
1214 }
1215 } while (uio->uio_resid > 0 && n > 0);
1216
1217 if (haverslock)
1218 nfs_rsunlock(np, td);
1219
1220 return (error);
1221 }
1222
1223 /*
1224 * Get an nfs cache block.
1225 *
1226 * Allocate a new one if the block isn't currently in the cache
1227 * and return the block marked busy. If the calling process is
1228 * interrupted by a signal for an interruptible mount point, return
1229 * NULL.
1230 *
1231 * The caller must carefully deal with the possible B_INVAL state of
1232 * the buffer. nfs_doio() clears B_INVAL (and nfs_asyncio() clears it
1233 * indirectly), so synchronous reads can be issued without worrying about
1234 * the B_INVAL state. We have to be a little more careful when dealing
1235 * with writes (see comments in nfs_write()) when extending a file past
1236 * its EOF.
1237 */
1238 static struct buf *
1239 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1240 {
1241 struct buf *bp;
1242 struct mount *mp;
1243 struct nfsmount *nmp;
1244
1245 mp = vp->v_mount;
1246 nmp = VFSTONFS(mp);
1247
1248 if (nmp->nm_flag & NFSMNT_INT) {
1249 sigset_t oldset;
1250
1251 nfs_set_sigmask(td, &oldset);
1252 bp = getblk(vp, bn, size, PCATCH, 0, 0);
1253 nfs_restore_sigmask(td, &oldset);
1254 while (bp == NULL) {
1255 if (nfs_sigintr(nmp, NULL, td))
1256 return (NULL);
1257 bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1258 }
1259 } else {
1260 bp = getblk(vp, bn, size, 0, 0, 0);
1261 }
1262
1263 if (vp->v_type == VREG) {
1264 int biosize;
1265
1266 biosize = mp->mnt_stat.f_iosize;
1267 bp->b_blkno = bn * (biosize / DEV_BSIZE);
1268 }
1269 return (bp);
1270 }
1271
1272 /*
1273 * Flush and invalidate all dirty buffers. If another process is already
1274 * doing the flush, just wait for completion.
1275 */
1276 int
1277 nfs_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1278 {
1279 struct nfsnode *np = VTONFS(vp);
1280 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1281 int error = 0, slpflag, slptimeo;
1282 int old_lock = 0;
1283
1284 ASSERT_VOP_LOCKED(vp, "nfs_vinvalbuf");
1285
1286 /*
1287 * XXX This check stops us from needlessly doing a vinvalbuf when
1288 * being called through vclean(). It is not clear that this is
1289 * unsafe.
1290 */
1291 if (vp->v_iflag & VI_DOOMED)
1292 return (0);
1293
1294 if ((nmp->nm_flag & NFSMNT_INT) == 0)
1295 intrflg = 0;
1296 if (intrflg) {
1297 slpflag = PCATCH;
1298 slptimeo = 2 * hz;
1299 } else {
1300 slpflag = 0;
1301 slptimeo = 0;
1302 }
1303
1304 if ((old_lock = VOP_ISLOCKED(vp, td)) != LK_EXCLUSIVE) {
1305 if (old_lock == LK_SHARED) {
1306 /* Upgrade to exclusive lock, this might block */
1307 vn_lock(vp, LK_UPGRADE | LK_RETRY, td);
1308 } else {
1309 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1310 }
1311 }
1312
1313 /*
1314 * Now, flush as required.
1315 */
1316 if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1317 VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1318 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1319 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1320 /*
1321 * If the page clean was interrupted, fail the invalidation.
1322 * Not doing so, we run the risk of losing dirty pages in the
1323 * vinvalbuf() call below.
1324 */
1325 if (intrflg && (error = nfs_sigintr(nmp, NULL, td)))
1326 goto out;
1327 }
1328
1329 error = vinvalbuf(vp, flags, td, slpflag, 0);
1330 while (error) {
1331 if (intrflg && (error = nfs_sigintr(nmp, NULL, td)))
1332 goto out;
1333 error = vinvalbuf(vp, flags, td, 0, slptimeo);
1334 }
1335 if (np->n_directio_asyncwr == 0)
1336 np->n_flag &= ~NMODIFIED;
1337 out:
1338 if (old_lock != LK_EXCLUSIVE) {
1339 if (old_lock == LK_SHARED) {
1340 /* Downgrade from exclusive lock, this might block */
1341 vn_lock(vp, LK_DOWNGRADE, td);
1342 } else {
1343 VOP_UNLOCK(vp, 0, td);
1344 }
1345 }
1346 return error;
1347 }
1348
1349 /*
1350 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1351 * This is mainly to avoid queueing async I/O requests when the nfsiods
1352 * are all hung on a dead server.
1353 *
1354 * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1355 * is eventually dequeued by the async daemon, nfs_doio() *will*.
1356 */
1357 int
1358 nfs_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1359 {
1360 int iod;
1361 int gotiod;
1362 int slpflag = 0;
1363 int slptimeo = 0;
1364 int error, error2;
1365
1366 /*
1367 * Commits are usually short and sweet so lets save some cpu and
1368 * leave the async daemons for more important rpc's (such as reads
1369 * and writes).
1370 */
1371 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1372 (nmp->nm_bufqiods > nfs_numasync / 2)) {
1373 return(EIO);
1374 }
1375
1376 again:
1377 if (nmp->nm_flag & NFSMNT_INT)
1378 slpflag = PCATCH;
1379 gotiod = FALSE;
1380
1381 /*
1382 * Find a free iod to process this request.
1383 */
1384 for (iod = 0; iod < nfs_numasync; iod++)
1385 if (nfs_iodwant[iod]) {
1386 gotiod = TRUE;
1387 break;
1388 }
1389
1390 /*
1391 * Try to create one if none are free.
1392 */
1393 if (!gotiod) {
1394 iod = nfs_nfsiodnew();
1395 if (iod != -1)
1396 gotiod = TRUE;
1397 }
1398
1399 if (gotiod) {
1400 /*
1401 * Found one, so wake it up and tell it which
1402 * mount to process.
1403 */
1404 NFS_DPF(ASYNCIO, ("nfs_asyncio: waking iod %d for mount %p\n",
1405 iod, nmp));
1406 nfs_iodwant[iod] = NULL;
1407 nfs_iodmount[iod] = nmp;
1408 nmp->nm_bufqiods++;
1409 wakeup(&nfs_iodwant[iod]);
1410 }
1411
1412 /*
1413 * If none are free, we may already have an iod working on this mount
1414 * point. If so, it will process our request.
1415 */
1416 if (!gotiod) {
1417 if (nmp->nm_bufqiods > 0) {
1418 NFS_DPF(ASYNCIO,
1419 ("nfs_asyncio: %d iods are already processing mount %p\n",
1420 nmp->nm_bufqiods, nmp));
1421 gotiod = TRUE;
1422 }
1423 }
1424
1425 /*
1426 * If we have an iod which can process the request, then queue
1427 * the buffer.
1428 */
1429 if (gotiod) {
1430 /*
1431 * Ensure that the queue never grows too large. We still want
1432 * to asynchronize so we block rather then return EIO.
1433 */
1434 while (nmp->nm_bufqlen >= 2*nfs_numasync) {
1435 NFS_DPF(ASYNCIO,
1436 ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp));
1437 nmp->nm_bufqwant = TRUE;
1438 error = nfs_tsleep(td, &nmp->nm_bufq, slpflag | PRIBIO,
1439 "nfsaio", slptimeo);
1440 if (error) {
1441 error2 = nfs_sigintr(nmp, NULL, td);
1442 if (error2)
1443 return (error2);
1444 if (slpflag == PCATCH) {
1445 slpflag = 0;
1446 slptimeo = 2 * hz;
1447 }
1448 }
1449 /*
1450 * We might have lost our iod while sleeping,
1451 * so check and loop if nescessary.
1452 */
1453 if (nmp->nm_bufqiods == 0) {
1454 NFS_DPF(ASYNCIO,
1455 ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1456 goto again;
1457 }
1458 }
1459
1460 if (bp->b_iocmd == BIO_READ) {
1461 if (bp->b_rcred == NOCRED && cred != NOCRED)
1462 bp->b_rcred = crhold(cred);
1463 } else {
1464 if (bp->b_wcred == NOCRED && cred != NOCRED)
1465 bp->b_wcred = crhold(cred);
1466 }
1467
1468 if (bp->b_flags & B_REMFREE)
1469 bremfreef(bp);
1470 BUF_KERNPROC(bp);
1471 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1472 nmp->nm_bufqlen++;
1473 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1474 VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1475 VTONFS(bp->b_vp)->n_directio_asyncwr++;
1476 }
1477 return (0);
1478 }
1479
1480 /*
1481 * All the iods are busy on other mounts, so return EIO to
1482 * force the caller to process the i/o synchronously.
1483 */
1484 NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n"));
1485 return (EIO);
1486 }
1487
1488 void
1489 nfs_doio_directwrite(struct buf *bp)
1490 {
1491 int iomode, must_commit;
1492 struct uio *uiop = (struct uio *)bp->b_caller1;
1493 char *iov_base = uiop->uio_iov->iov_base;
1494 struct nfsmount *nmp = VFSTONFS(bp->b_vp->v_mount);
1495
1496 iomode = NFSV3WRITE_FILESYNC;
1497 uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1498 (nmp->nm_rpcops->nr_writerpc)(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit);
1499 KASSERT((must_commit == 0), ("nfs_doio_directwrite: Did not commit write"));
1500 free(iov_base, M_NFSDIRECTIO);
1501 free(uiop->uio_iov, M_NFSDIRECTIO);
1502 free(uiop, M_NFSDIRECTIO);
1503 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1504 struct nfsnode *np = VTONFS(bp->b_vp);
1505
1506 np->n_directio_asyncwr--;
1507 if (np->n_directio_asyncwr == 0) {
1508 VTONFS(bp->b_vp)->n_flag &= ~NMODIFIED;
1509 if ((np->n_flag & NFSYNCWAIT)) {
1510 np->n_flag &= ~NFSYNCWAIT;
1511 wakeup((caddr_t)&np->n_directio_asyncwr);
1512 }
1513 }
1514 }
1515 bp->b_vp = NULL;
1516 relpbuf(bp, &nfs_pbuf_freecnt);
1517 }
1518
1519 /*
1520 * Do an I/O operation to/from a cache block. This may be called
1521 * synchronously or from an nfsiod.
1522 */
1523 int
1524 nfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
1525 {
1526 struct uio *uiop;
1527 struct nfsnode *np;
1528 struct nfsmount *nmp;
1529 int error = 0, iomode, must_commit = 0;
1530 struct uio uio;
1531 struct iovec io;
1532 struct proc *p = td ? td->td_proc : NULL;
1533
1534 np = VTONFS(vp);
1535 nmp = VFSTONFS(vp->v_mount);
1536 uiop = &uio;
1537 uiop->uio_iov = &io;
1538 uiop->uio_iovcnt = 1;
1539 uiop->uio_segflg = UIO_SYSSPACE;
1540 uiop->uio_td = td;
1541
1542 /*
1543 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1544 * do this here so we do not have to do it in all the code that
1545 * calls us.
1546 */
1547 bp->b_flags &= ~B_INVAL;
1548 bp->b_ioflags &= ~BIO_ERROR;
1549
1550 KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp));
1551
1552 if (bp->b_iocmd == BIO_READ) {
1553 io.iov_len = uiop->uio_resid = bp->b_bcount;
1554 io.iov_base = bp->b_data;
1555 uiop->uio_rw = UIO_READ;
1556
1557 switch (vp->v_type) {
1558 case VREG:
1559 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1560 nfsstats.read_bios++;
1561 error = (nmp->nm_rpcops->nr_readrpc)(vp, uiop, cr);
1562
1563 if (!error) {
1564 if (uiop->uio_resid) {
1565 /*
1566 * If we had a short read with no error, we must have
1567 * hit a file hole. We should zero-fill the remainder.
1568 * This can also occur if the server hits the file EOF.
1569 *
1570 * Holes used to be able to occur due to pending
1571 * writes, but that is not possible any longer.
1572 */
1573 int nread = bp->b_bcount - uiop->uio_resid;
1574 int left = uiop->uio_resid;
1575
1576 if (left > 0)
1577 bzero((char *)bp->b_data + nread, left);
1578 uiop->uio_resid = 0;
1579 }
1580 }
1581 /* ASSERT_VOP_LOCKED(vp, "nfs_doio"); */
1582 if (p && (vp->v_vflag & VV_TEXT) &&
1583 (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.va_mtime))) {
1584 PROC_LOCK(p);
1585 killproc(p, "text file modification");
1586 PROC_UNLOCK(p);
1587 }
1588 break;
1589 case VLNK:
1590 uiop->uio_offset = (off_t)0;
1591 nfsstats.readlink_bios++;
1592 error = (nmp->nm_rpcops->nr_readlinkrpc)(vp, uiop, cr);
1593 break;
1594 case VDIR:
1595 nfsstats.readdir_bios++;
1596 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1597 if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
1598 error = nfs4_readdirrpc(vp, uiop, cr);
1599 else {
1600 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1601 error = nfs_readdirplusrpc(vp, uiop, cr);
1602 if (error == NFSERR_NOTSUPP)
1603 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1604 }
1605 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1606 error = nfs_readdirrpc(vp, uiop, cr);
1607 }
1608 /*
1609 * end-of-directory sets B_INVAL but does not generate an
1610 * error.
1611 */
1612 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1613 bp->b_flags |= B_INVAL;
1614 break;
1615 default:
1616 printf("nfs_doio: type %x unexpected\n", vp->v_type);
1617 break;
1618 };
1619 if (error) {
1620 bp->b_ioflags |= BIO_ERROR;
1621 bp->b_error = error;
1622 }
1623 } else {
1624 /*
1625 * If we only need to commit, try to commit
1626 */
1627 if (bp->b_flags & B_NEEDCOMMIT) {
1628 int retv;
1629 off_t off;
1630
1631 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1632 retv = (nmp->nm_rpcops->nr_commit)(
1633 vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1634 bp->b_wcred, td);
1635 if (retv == 0) {
1636 bp->b_dirtyoff = bp->b_dirtyend = 0;
1637 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1638 bp->b_resid = 0;
1639 bufdone(bp);
1640 return (0);
1641 }
1642 if (retv == NFSERR_STALEWRITEVERF) {
1643 nfs_clearcommit(vp->v_mount);
1644 }
1645 }
1646
1647 /*
1648 * Setup for actual write
1649 */
1650
1651 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1652 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1653
1654 if (bp->b_dirtyend > bp->b_dirtyoff) {
1655 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1656 - bp->b_dirtyoff;
1657 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1658 + bp->b_dirtyoff;
1659 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1660 uiop->uio_rw = UIO_WRITE;
1661 nfsstats.write_bios++;
1662
1663 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1664 iomode = NFSV3WRITE_UNSTABLE;
1665 else
1666 iomode = NFSV3WRITE_FILESYNC;
1667
1668 error = (nmp->nm_rpcops->nr_writerpc)(vp, uiop, cr, &iomode, &must_commit);
1669
1670 /*
1671 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1672 * to cluster the buffers needing commit. This will allow
1673 * the system to submit a single commit rpc for the whole
1674 * cluster. We can do this even if the buffer is not 100%
1675 * dirty (relative to the NFS blocksize), so we optimize the
1676 * append-to-file-case.
1677 *
1678 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1679 * cleared because write clustering only works for commit
1680 * rpc's, not for the data portion of the write).
1681 */
1682
1683 if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1684 bp->b_flags |= B_NEEDCOMMIT;
1685 if (bp->b_dirtyoff == 0
1686 && bp->b_dirtyend == bp->b_bcount)
1687 bp->b_flags |= B_CLUSTEROK;
1688 } else {
1689 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1690 }
1691
1692 /*
1693 * For an interrupted write, the buffer is still valid
1694 * and the write hasn't been pushed to the server yet,
1695 * so we can't set BIO_ERROR and report the interruption
1696 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1697 * is not relevant, so the rpc attempt is essentially
1698 * a noop. For the case of a V3 write rpc not being
1699 * committed to stable storage, the block is still
1700 * dirty and requires either a commit rpc or another
1701 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1702 * the block is reused. This is indicated by setting
1703 * the B_DELWRI and B_NEEDCOMMIT flags.
1704 *
1705 * If the buffer is marked B_PAGING, it does not reside on
1706 * the vp's paging queues so we cannot call bdirty(). The
1707 * bp in this case is not an NFS cache block so we should
1708 * be safe. XXX
1709 *
1710 * The logic below breaks up errors into recoverable and
1711 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1712 * and keep the buffer around for potential write retries.
1713 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1714 * and save the error in the nfsnode. This is less than ideal
1715 * but necessary. Keeping such buffers around could potentially
1716 * cause buffer exhaustion eventually (they can never be written
1717 * out, so will get constantly be re-dirtied). It also causes
1718 * all sorts of vfs panics. For non-recoverable write errors,
1719 * also invalidate the attrcache, so we'll be forced to go over
1720 * the wire for this object, returning an error to user on next
1721 * call (most of the time).
1722 */
1723 if (error == EINTR || error == EIO || error == ETIMEDOUT
1724 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1725 int s;
1726
1727 s = splbio();
1728 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1729 if ((bp->b_flags & B_PAGING) == 0) {
1730 bdirty(bp);
1731 bp->b_flags &= ~B_DONE;
1732 }
1733 if (error && (bp->b_flags & B_ASYNC) == 0)
1734 bp->b_flags |= B_EINTR;
1735 splx(s);
1736 } else {
1737 if (error) {
1738 bp->b_ioflags |= BIO_ERROR;
1739 bp->b_flags |= B_INVAL;
1740 bp->b_error = np->n_error = error;
1741 np->n_flag |= NWRITEERR;
1742 np->n_attrstamp = 0;
1743 }
1744 bp->b_dirtyoff = bp->b_dirtyend = 0;
1745 }
1746 } else {
1747 bp->b_resid = 0;
1748 bufdone(bp);
1749 return (0);
1750 }
1751 }
1752 bp->b_resid = uiop->uio_resid;
1753 if (must_commit)
1754 nfs_clearcommit(vp->v_mount);
1755 bufdone(bp);
1756 return (error);
1757 }
1758
1759 /*
1760 * Used to aid in handling ftruncate() operations on the NFS client side.
1761 * Truncation creates a number of special problems for NFS. We have to
1762 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1763 * we have to properly handle VM pages or (potentially dirty) buffers
1764 * that straddle the truncation point.
1765 */
1766
1767 int
1768 nfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1769 {
1770 struct nfsnode *np = VTONFS(vp);
1771 u_quad_t tsize = np->n_size;
1772 int biosize = vp->v_mount->mnt_stat.f_iosize;
1773 int error = 0;
1774
1775 np->n_size = nsize;
1776
1777 if (np->n_size < tsize) {
1778 struct buf *bp;
1779 daddr_t lbn;
1780 int bufsize;
1781
1782 /*
1783 * vtruncbuf() doesn't get the buffer overlapping the
1784 * truncation point. We may have a B_DELWRI and/or B_CACHE
1785 * buffer that now needs to be truncated.
1786 */
1787 error = vtruncbuf(vp, cred, td, nsize, biosize);
1788 lbn = nsize / biosize;
1789 bufsize = nsize & (biosize - 1);
1790 bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1791 if (!bp)
1792 return EINTR;
1793 if (bp->b_dirtyoff > bp->b_bcount)
1794 bp->b_dirtyoff = bp->b_bcount;
1795 if (bp->b_dirtyend > bp->b_bcount)
1796 bp->b_dirtyend = bp->b_bcount;
1797 bp->b_flags |= B_RELBUF; /* don't leave garbage around */
1798 brelse(bp);
1799 } else {
1800 vnode_pager_setsize(vp, nsize);
1801 }
1802 return(error);
1803 }
1804
Cache object: 96cf63800b4d9487777eeaeacb38677a
|