1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/9.0/sys/fs/nfsclient/nfs_clbio.c 223280 2011-06-18 23:02:53Z rmacklem $");
37
38 #include "opt_kdtrace.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bio.h>
43 #include <sys/buf.h>
44 #include <sys/kernel.h>
45 #include <sys/mount.h>
46 #include <sys/vmmeter.h>
47 #include <sys/vnode.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vnode_pager.h>
55
56 #include <fs/nfs/nfsport.h>
57 #include <fs/nfsclient/nfsmount.h>
58 #include <fs/nfsclient/nfs.h>
59 #include <fs/nfsclient/nfsnode.h>
60 #include <fs/nfsclient/nfs_kdtrace.h>
61
62 extern int newnfs_directio_allow_mmap;
63 extern struct nfsstats newnfsstats;
64 extern struct mtx ncl_iod_mutex;
65 extern int ncl_numasync;
66 extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
67 extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
68 extern int newnfs_directio_enable;
69
70 int ncl_pbuf_freecnt = -1; /* start out unlimited */
71
72 static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
73 struct thread *td);
74 static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
75 struct ucred *cred, int ioflag);
76
77 /*
78 * Vnode op for VM getpages.
79 */
80 int
81 ncl_getpages(struct vop_getpages_args *ap)
82 {
83 int i, error, nextoff, size, toff, count, npages;
84 struct uio uio;
85 struct iovec iov;
86 vm_offset_t kva;
87 struct buf *bp;
88 struct vnode *vp;
89 struct thread *td;
90 struct ucred *cred;
91 struct nfsmount *nmp;
92 vm_object_t object;
93 vm_page_t *pages;
94 struct nfsnode *np;
95
96 vp = ap->a_vp;
97 np = VTONFS(vp);
98 td = curthread; /* XXX */
99 cred = curthread->td_ucred; /* XXX */
100 nmp = VFSTONFS(vp->v_mount);
101 pages = ap->a_m;
102 count = ap->a_count;
103
104 if ((object = vp->v_object) == NULL) {
105 ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
106 return (VM_PAGER_ERROR);
107 }
108
109 if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
110 mtx_lock(&np->n_mtx);
111 if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
112 mtx_unlock(&np->n_mtx);
113 ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
114 return (VM_PAGER_ERROR);
115 } else
116 mtx_unlock(&np->n_mtx);
117 }
118
119 mtx_lock(&nmp->nm_mtx);
120 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
121 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
122 mtx_unlock(&nmp->nm_mtx);
123 /* We'll never get here for v4, because we always have fsinfo */
124 (void)ncl_fsinfo(nmp, vp, cred, td);
125 } else
126 mtx_unlock(&nmp->nm_mtx);
127
128 npages = btoc(count);
129
130 /*
131 * If the requested page is partially valid, just return it and
132 * allow the pager to zero-out the blanks. Partially valid pages
133 * can only occur at the file EOF.
134 */
135 VM_OBJECT_LOCK(object);
136 if (pages[ap->a_reqpage]->valid != 0) {
137 for (i = 0; i < npages; ++i) {
138 if (i != ap->a_reqpage) {
139 vm_page_lock(pages[i]);
140 vm_page_free(pages[i]);
141 vm_page_unlock(pages[i]);
142 }
143 }
144 VM_OBJECT_UNLOCK(object);
145 return (0);
146 }
147 VM_OBJECT_UNLOCK(object);
148
149 /*
150 * We use only the kva address for the buffer, but this is extremely
151 * convienient and fast.
152 */
153 bp = getpbuf(&ncl_pbuf_freecnt);
154
155 kva = (vm_offset_t) bp->b_data;
156 pmap_qenter(kva, pages, npages);
157 PCPU_INC(cnt.v_vnodein);
158 PCPU_ADD(cnt.v_vnodepgsin, npages);
159
160 iov.iov_base = (caddr_t) kva;
161 iov.iov_len = count;
162 uio.uio_iov = &iov;
163 uio.uio_iovcnt = 1;
164 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
165 uio.uio_resid = count;
166 uio.uio_segflg = UIO_SYSSPACE;
167 uio.uio_rw = UIO_READ;
168 uio.uio_td = td;
169
170 error = ncl_readrpc(vp, &uio, cred);
171 pmap_qremove(kva, npages);
172
173 relpbuf(bp, &ncl_pbuf_freecnt);
174
175 if (error && (uio.uio_resid == count)) {
176 ncl_printf("nfs_getpages: error %d\n", error);
177 VM_OBJECT_LOCK(object);
178 for (i = 0; i < npages; ++i) {
179 if (i != ap->a_reqpage) {
180 vm_page_lock(pages[i]);
181 vm_page_free(pages[i]);
182 vm_page_unlock(pages[i]);
183 }
184 }
185 VM_OBJECT_UNLOCK(object);
186 return (VM_PAGER_ERROR);
187 }
188
189 /*
190 * Calculate the number of bytes read and validate only that number
191 * of bytes. Note that due to pending writes, size may be 0. This
192 * does not mean that the remaining data is invalid!
193 */
194
195 size = count - uio.uio_resid;
196 VM_OBJECT_LOCK(object);
197 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
198 vm_page_t m;
199 nextoff = toff + PAGE_SIZE;
200 m = pages[i];
201
202 if (nextoff <= size) {
203 /*
204 * Read operation filled an entire page
205 */
206 m->valid = VM_PAGE_BITS_ALL;
207 KASSERT(m->dirty == 0,
208 ("nfs_getpages: page %p is dirty", m));
209 } else if (size > toff) {
210 /*
211 * Read operation filled a partial page.
212 */
213 m->valid = 0;
214 vm_page_set_valid(m, 0, size - toff);
215 KASSERT(m->dirty == 0,
216 ("nfs_getpages: page %p is dirty", m));
217 } else {
218 /*
219 * Read operation was short. If no error occured
220 * we may have hit a zero-fill section. We simply
221 * leave valid set to 0.
222 */
223 ;
224 }
225 if (i != ap->a_reqpage) {
226 /*
227 * Whether or not to leave the page activated is up in
228 * the air, but we should put the page on a page queue
229 * somewhere (it already is in the object). Result:
230 * It appears that emperical results show that
231 * deactivating pages is best.
232 */
233
234 /*
235 * Just in case someone was asking for this page we
236 * now tell them that it is ok to use.
237 */
238 if (!error) {
239 if (m->oflags & VPO_WANTED) {
240 vm_page_lock(m);
241 vm_page_activate(m);
242 vm_page_unlock(m);
243 } else {
244 vm_page_lock(m);
245 vm_page_deactivate(m);
246 vm_page_unlock(m);
247 }
248 vm_page_wakeup(m);
249 } else {
250 vm_page_lock(m);
251 vm_page_free(m);
252 vm_page_unlock(m);
253 }
254 }
255 }
256 VM_OBJECT_UNLOCK(object);
257 return (0);
258 }
259
260 /*
261 * Vnode op for VM putpages.
262 */
263 int
264 ncl_putpages(struct vop_putpages_args *ap)
265 {
266 struct uio uio;
267 struct iovec iov;
268 vm_offset_t kva;
269 struct buf *bp;
270 int iomode, must_commit, i, error, npages, count;
271 off_t offset;
272 int *rtvals;
273 struct vnode *vp;
274 struct thread *td;
275 struct ucred *cred;
276 struct nfsmount *nmp;
277 struct nfsnode *np;
278 vm_page_t *pages;
279
280 vp = ap->a_vp;
281 np = VTONFS(vp);
282 td = curthread; /* XXX */
283 cred = curthread->td_ucred; /* XXX */
284 nmp = VFSTONFS(vp->v_mount);
285 pages = ap->a_m;
286 count = ap->a_count;
287 rtvals = ap->a_rtvals;
288 npages = btoc(count);
289 offset = IDX_TO_OFF(pages[0]->pindex);
290
291 mtx_lock(&nmp->nm_mtx);
292 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
293 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
294 mtx_unlock(&nmp->nm_mtx);
295 (void)ncl_fsinfo(nmp, vp, cred, td);
296 } else
297 mtx_unlock(&nmp->nm_mtx);
298
299 mtx_lock(&np->n_mtx);
300 if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
301 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
302 mtx_unlock(&np->n_mtx);
303 ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
304 mtx_lock(&np->n_mtx);
305 }
306
307 for (i = 0; i < npages; i++)
308 rtvals[i] = VM_PAGER_ERROR;
309
310 /*
311 * When putting pages, do not extend file past EOF.
312 */
313 if (offset + count > np->n_size) {
314 count = np->n_size - offset;
315 if (count < 0)
316 count = 0;
317 }
318 mtx_unlock(&np->n_mtx);
319
320 /*
321 * We use only the kva address for the buffer, but this is extremely
322 * convienient and fast.
323 */
324 bp = getpbuf(&ncl_pbuf_freecnt);
325
326 kva = (vm_offset_t) bp->b_data;
327 pmap_qenter(kva, pages, npages);
328 PCPU_INC(cnt.v_vnodeout);
329 PCPU_ADD(cnt.v_vnodepgsout, count);
330
331 iov.iov_base = (caddr_t) kva;
332 iov.iov_len = count;
333 uio.uio_iov = &iov;
334 uio.uio_iovcnt = 1;
335 uio.uio_offset = offset;
336 uio.uio_resid = count;
337 uio.uio_segflg = UIO_SYSSPACE;
338 uio.uio_rw = UIO_WRITE;
339 uio.uio_td = td;
340
341 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
342 iomode = NFSWRITE_UNSTABLE;
343 else
344 iomode = NFSWRITE_FILESYNC;
345
346 error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0);
347
348 pmap_qremove(kva, npages);
349 relpbuf(bp, &ncl_pbuf_freecnt);
350
351 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
352 if (must_commit)
353 ncl_clearcommit(vp->v_mount);
354 return rtvals[0];
355 }
356
357 /*
358 * For nfs, cache consistency can only be maintained approximately.
359 * Although RFC1094 does not specify the criteria, the following is
360 * believed to be compatible with the reference port.
361 * For nfs:
362 * If the file's modify time on the server has changed since the
363 * last read rpc or you have written to the file,
364 * you may have lost data cache consistency with the
365 * server, so flush all of the file's data out of the cache.
366 * Then force a getattr rpc to ensure that you have up to date
367 * attributes.
368 * NB: This implies that cache data can be read when up to
369 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
370 * attributes this could be forced by setting n_attrstamp to 0 before
371 * the VOP_GETATTR() call.
372 */
373 static inline int
374 nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
375 {
376 int error = 0;
377 struct vattr vattr;
378 struct nfsnode *np = VTONFS(vp);
379 int old_lock;
380
381 /*
382 * Grab the exclusive lock before checking whether the cache is
383 * consistent.
384 * XXX - We can make this cheaper later (by acquiring cheaper locks).
385 * But for now, this suffices.
386 */
387 old_lock = ncl_upgrade_vnlock(vp);
388 if (vp->v_iflag & VI_DOOMED) {
389 ncl_downgrade_vnlock(vp, old_lock);
390 return (EBADF);
391 }
392
393 mtx_lock(&np->n_mtx);
394 if (np->n_flag & NMODIFIED) {
395 mtx_unlock(&np->n_mtx);
396 if (vp->v_type != VREG) {
397 if (vp->v_type != VDIR)
398 panic("nfs: bioread, not dir");
399 ncl_invaldir(vp);
400 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
401 if (error)
402 goto out;
403 }
404 np->n_attrstamp = 0;
405 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
406 error = VOP_GETATTR(vp, &vattr, cred);
407 if (error)
408 goto out;
409 mtx_lock(&np->n_mtx);
410 np->n_mtime = vattr.va_mtime;
411 mtx_unlock(&np->n_mtx);
412 } else {
413 mtx_unlock(&np->n_mtx);
414 error = VOP_GETATTR(vp, &vattr, cred);
415 if (error)
416 return (error);
417 mtx_lock(&np->n_mtx);
418 if ((np->n_flag & NSIZECHANGED)
419 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
420 mtx_unlock(&np->n_mtx);
421 if (vp->v_type == VDIR)
422 ncl_invaldir(vp);
423 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
424 if (error)
425 goto out;
426 mtx_lock(&np->n_mtx);
427 np->n_mtime = vattr.va_mtime;
428 np->n_flag &= ~NSIZECHANGED;
429 }
430 mtx_unlock(&np->n_mtx);
431 }
432 out:
433 ncl_downgrade_vnlock(vp, old_lock);
434 return error;
435 }
436
437 /*
438 * Vnode op for read using bio
439 */
440 int
441 ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
442 {
443 struct nfsnode *np = VTONFS(vp);
444 int biosize, i;
445 struct buf *bp, *rabp;
446 struct thread *td;
447 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
448 daddr_t lbn, rabn;
449 int bcount;
450 int seqcount;
451 int nra, error = 0, n = 0, on = 0;
452 off_t tmp_off;
453
454 KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode"));
455 if (uio->uio_resid == 0)
456 return (0);
457 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
458 return (EINVAL);
459 td = uio->uio_td;
460
461 mtx_lock(&nmp->nm_mtx);
462 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
463 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
464 mtx_unlock(&nmp->nm_mtx);
465 (void)ncl_fsinfo(nmp, vp, cred, td);
466 mtx_lock(&nmp->nm_mtx);
467 }
468 if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
469 (void) newnfs_iosize(nmp);
470
471 tmp_off = uio->uio_offset + uio->uio_resid;
472 if (vp->v_type != VDIR &&
473 (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) {
474 mtx_unlock(&nmp->nm_mtx);
475 return (EFBIG);
476 }
477 mtx_unlock(&nmp->nm_mtx);
478
479 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
480 /* No caching/ no readaheads. Just read data into the user buffer */
481 return ncl_readrpc(vp, uio, cred);
482
483 biosize = vp->v_mount->mnt_stat.f_iosize;
484 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
485
486 error = nfs_bioread_check_cons(vp, td, cred);
487 if (error)
488 return error;
489
490 do {
491 u_quad_t nsize;
492
493 mtx_lock(&np->n_mtx);
494 nsize = np->n_size;
495 mtx_unlock(&np->n_mtx);
496
497 switch (vp->v_type) {
498 case VREG:
499 NFSINCRGLOBAL(newnfsstats.biocache_reads);
500 lbn = uio->uio_offset / biosize;
501 on = uio->uio_offset & (biosize - 1);
502
503 /*
504 * Start the read ahead(s), as required.
505 */
506 if (nmp->nm_readahead > 0) {
507 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
508 (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
509 rabn = lbn + 1 + nra;
510 if (incore(&vp->v_bufobj, rabn) == NULL) {
511 rabp = nfs_getcacheblk(vp, rabn, biosize, td);
512 if (!rabp) {
513 error = newnfs_sigintr(nmp, td);
514 return (error ? error : EINTR);
515 }
516 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
517 rabp->b_flags |= B_ASYNC;
518 rabp->b_iocmd = BIO_READ;
519 vfs_busy_pages(rabp, 0);
520 if (ncl_asyncio(nmp, rabp, cred, td)) {
521 rabp->b_flags |= B_INVAL;
522 rabp->b_ioflags |= BIO_ERROR;
523 vfs_unbusy_pages(rabp);
524 brelse(rabp);
525 break;
526 }
527 } else {
528 brelse(rabp);
529 }
530 }
531 }
532 }
533
534 /* Note that bcount is *not* DEV_BSIZE aligned. */
535 bcount = biosize;
536 if ((off_t)lbn * biosize >= nsize) {
537 bcount = 0;
538 } else if ((off_t)(lbn + 1) * biosize > nsize) {
539 bcount = nsize - (off_t)lbn * biosize;
540 }
541 bp = nfs_getcacheblk(vp, lbn, bcount, td);
542
543 if (!bp) {
544 error = newnfs_sigintr(nmp, td);
545 return (error ? error : EINTR);
546 }
547
548 /*
549 * If B_CACHE is not set, we must issue the read. If this
550 * fails, we return an error.
551 */
552
553 if ((bp->b_flags & B_CACHE) == 0) {
554 bp->b_iocmd = BIO_READ;
555 vfs_busy_pages(bp, 0);
556 error = ncl_doio(vp, bp, cred, td, 0);
557 if (error) {
558 brelse(bp);
559 return (error);
560 }
561 }
562
563 /*
564 * on is the offset into the current bp. Figure out how many
565 * bytes we can copy out of the bp. Note that bcount is
566 * NOT DEV_BSIZE aligned.
567 *
568 * Then figure out how many bytes we can copy into the uio.
569 */
570
571 n = 0;
572 if (on < bcount)
573 n = min((unsigned)(bcount - on), uio->uio_resid);
574 break;
575 case VLNK:
576 NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
577 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
578 if (!bp) {
579 error = newnfs_sigintr(nmp, td);
580 return (error ? error : EINTR);
581 }
582 if ((bp->b_flags & B_CACHE) == 0) {
583 bp->b_iocmd = BIO_READ;
584 vfs_busy_pages(bp, 0);
585 error = ncl_doio(vp, bp, cred, td, 0);
586 if (error) {
587 bp->b_ioflags |= BIO_ERROR;
588 brelse(bp);
589 return (error);
590 }
591 }
592 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
593 on = 0;
594 break;
595 case VDIR:
596 NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
597 if (np->n_direofoffset
598 && uio->uio_offset >= np->n_direofoffset) {
599 return (0);
600 }
601 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
602 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
603 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
604 if (!bp) {
605 error = newnfs_sigintr(nmp, td);
606 return (error ? error : EINTR);
607 }
608 if ((bp->b_flags & B_CACHE) == 0) {
609 bp->b_iocmd = BIO_READ;
610 vfs_busy_pages(bp, 0);
611 error = ncl_doio(vp, bp, cred, td, 0);
612 if (error) {
613 brelse(bp);
614 }
615 while (error == NFSERR_BAD_COOKIE) {
616 ncl_invaldir(vp);
617 error = ncl_vinvalbuf(vp, 0, td, 1);
618 /*
619 * Yuck! The directory has been modified on the
620 * server. The only way to get the block is by
621 * reading from the beginning to get all the
622 * offset cookies.
623 *
624 * Leave the last bp intact unless there is an error.
625 * Loop back up to the while if the error is another
626 * NFSERR_BAD_COOKIE (double yuch!).
627 */
628 for (i = 0; i <= lbn && !error; i++) {
629 if (np->n_direofoffset
630 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
631 return (0);
632 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
633 if (!bp) {
634 error = newnfs_sigintr(nmp, td);
635 return (error ? error : EINTR);
636 }
637 if ((bp->b_flags & B_CACHE) == 0) {
638 bp->b_iocmd = BIO_READ;
639 vfs_busy_pages(bp, 0);
640 error = ncl_doio(vp, bp, cred, td, 0);
641 /*
642 * no error + B_INVAL == directory EOF,
643 * use the block.
644 */
645 if (error == 0 && (bp->b_flags & B_INVAL))
646 break;
647 }
648 /*
649 * An error will throw away the block and the
650 * for loop will break out. If no error and this
651 * is not the block we want, we throw away the
652 * block and go for the next one via the for loop.
653 */
654 if (error || i < lbn)
655 brelse(bp);
656 }
657 }
658 /*
659 * The above while is repeated if we hit another cookie
660 * error. If we hit an error and it wasn't a cookie error,
661 * we give up.
662 */
663 if (error)
664 return (error);
665 }
666
667 /*
668 * If not eof and read aheads are enabled, start one.
669 * (You need the current block first, so that you have the
670 * directory offset cookie of the next block.)
671 */
672 if (nmp->nm_readahead > 0 &&
673 (bp->b_flags & B_INVAL) == 0 &&
674 (np->n_direofoffset == 0 ||
675 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
676 incore(&vp->v_bufobj, lbn + 1) == NULL) {
677 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
678 if (rabp) {
679 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
680 rabp->b_flags |= B_ASYNC;
681 rabp->b_iocmd = BIO_READ;
682 vfs_busy_pages(rabp, 0);
683 if (ncl_asyncio(nmp, rabp, cred, td)) {
684 rabp->b_flags |= B_INVAL;
685 rabp->b_ioflags |= BIO_ERROR;
686 vfs_unbusy_pages(rabp);
687 brelse(rabp);
688 }
689 } else {
690 brelse(rabp);
691 }
692 }
693 }
694 /*
695 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
696 * chopped for the EOF condition, we cannot tell how large
697 * NFS directories are going to be until we hit EOF. So
698 * an NFS directory buffer is *not* chopped to its EOF. Now,
699 * it just so happens that b_resid will effectively chop it
700 * to EOF. *BUT* this information is lost if the buffer goes
701 * away and is reconstituted into a B_CACHE state ( due to
702 * being VMIO ) later. So we keep track of the directory eof
703 * in np->n_direofoffset and chop it off as an extra step
704 * right here.
705 */
706 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
707 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
708 n = np->n_direofoffset - uio->uio_offset;
709 break;
710 default:
711 ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
712 bp = NULL;
713 break;
714 };
715
716 if (n > 0) {
717 error = uiomove(bp->b_data + on, (int)n, uio);
718 }
719 if (vp->v_type == VLNK)
720 n = 0;
721 if (bp != NULL)
722 brelse(bp);
723 } while (error == 0 && uio->uio_resid > 0 && n > 0);
724 return (error);
725 }
726
727 /*
728 * The NFS write path cannot handle iovecs with len > 1. So we need to
729 * break up iovecs accordingly (restricting them to wsize).
730 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
731 * For the ASYNC case, 2 copies are needed. The first a copy from the
732 * user buffer to a staging buffer and then a second copy from the staging
733 * buffer to mbufs. This can be optimized by copying from the user buffer
734 * directly into mbufs and passing the chain down, but that requires a
735 * fair amount of re-working of the relevant codepaths (and can be done
736 * later).
737 */
738 static int
739 nfs_directio_write(vp, uiop, cred, ioflag)
740 struct vnode *vp;
741 struct uio *uiop;
742 struct ucred *cred;
743 int ioflag;
744 {
745 int error;
746 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
747 struct thread *td = uiop->uio_td;
748 int size;
749 int wsize;
750
751 mtx_lock(&nmp->nm_mtx);
752 wsize = nmp->nm_wsize;
753 mtx_unlock(&nmp->nm_mtx);
754 if (ioflag & IO_SYNC) {
755 int iomode, must_commit;
756 struct uio uio;
757 struct iovec iov;
758 do_sync:
759 while (uiop->uio_resid > 0) {
760 size = min(uiop->uio_resid, wsize);
761 size = min(uiop->uio_iov->iov_len, size);
762 iov.iov_base = uiop->uio_iov->iov_base;
763 iov.iov_len = size;
764 uio.uio_iov = &iov;
765 uio.uio_iovcnt = 1;
766 uio.uio_offset = uiop->uio_offset;
767 uio.uio_resid = size;
768 uio.uio_segflg = UIO_USERSPACE;
769 uio.uio_rw = UIO_WRITE;
770 uio.uio_td = td;
771 iomode = NFSWRITE_FILESYNC;
772 error = ncl_writerpc(vp, &uio, cred, &iomode,
773 &must_commit, 0);
774 KASSERT((must_commit == 0),
775 ("ncl_directio_write: Did not commit write"));
776 if (error)
777 return (error);
778 uiop->uio_offset += size;
779 uiop->uio_resid -= size;
780 if (uiop->uio_iov->iov_len <= size) {
781 uiop->uio_iovcnt--;
782 uiop->uio_iov++;
783 } else {
784 uiop->uio_iov->iov_base =
785 (char *)uiop->uio_iov->iov_base + size;
786 uiop->uio_iov->iov_len -= size;
787 }
788 }
789 } else {
790 struct uio *t_uio;
791 struct iovec *t_iov;
792 struct buf *bp;
793
794 /*
795 * Break up the write into blocksize chunks and hand these
796 * over to nfsiod's for write back.
797 * Unfortunately, this incurs a copy of the data. Since
798 * the user could modify the buffer before the write is
799 * initiated.
800 *
801 * The obvious optimization here is that one of the 2 copies
802 * in the async write path can be eliminated by copying the
803 * data here directly into mbufs and passing the mbuf chain
804 * down. But that will require a fair amount of re-working
805 * of the code and can be done if there's enough interest
806 * in NFS directio access.
807 */
808 while (uiop->uio_resid > 0) {
809 size = min(uiop->uio_resid, wsize);
810 size = min(uiop->uio_iov->iov_len, size);
811 bp = getpbuf(&ncl_pbuf_freecnt);
812 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
813 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
814 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
815 t_iov->iov_len = size;
816 t_uio->uio_iov = t_iov;
817 t_uio->uio_iovcnt = 1;
818 t_uio->uio_offset = uiop->uio_offset;
819 t_uio->uio_resid = size;
820 t_uio->uio_segflg = UIO_SYSSPACE;
821 t_uio->uio_rw = UIO_WRITE;
822 t_uio->uio_td = td;
823 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
824 bp->b_flags |= B_DIRECT;
825 bp->b_iocmd = BIO_WRITE;
826 if (cred != NOCRED) {
827 crhold(cred);
828 bp->b_wcred = cred;
829 } else
830 bp->b_wcred = NOCRED;
831 bp->b_caller1 = (void *)t_uio;
832 bp->b_vp = vp;
833 error = ncl_asyncio(nmp, bp, NOCRED, td);
834 if (error) {
835 free(t_iov->iov_base, M_NFSDIRECTIO);
836 free(t_iov, M_NFSDIRECTIO);
837 free(t_uio, M_NFSDIRECTIO);
838 bp->b_vp = NULL;
839 relpbuf(bp, &ncl_pbuf_freecnt);
840 if (error == EINTR)
841 return (error);
842 goto do_sync;
843 }
844 uiop->uio_offset += size;
845 uiop->uio_resid -= size;
846 if (uiop->uio_iov->iov_len <= size) {
847 uiop->uio_iovcnt--;
848 uiop->uio_iov++;
849 } else {
850 uiop->uio_iov->iov_base =
851 (char *)uiop->uio_iov->iov_base + size;
852 uiop->uio_iov->iov_len -= size;
853 }
854 }
855 }
856 return (0);
857 }
858
859 /*
860 * Vnode op for write using bio
861 */
862 int
863 ncl_write(struct vop_write_args *ap)
864 {
865 int biosize;
866 struct uio *uio = ap->a_uio;
867 struct thread *td = uio->uio_td;
868 struct vnode *vp = ap->a_vp;
869 struct nfsnode *np = VTONFS(vp);
870 struct ucred *cred = ap->a_cred;
871 int ioflag = ap->a_ioflag;
872 struct buf *bp;
873 struct vattr vattr;
874 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
875 daddr_t lbn;
876 int bcount;
877 int n, on, error = 0;
878 off_t tmp_off;
879
880 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
881 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
882 ("ncl_write proc"));
883 if (vp->v_type != VREG)
884 return (EIO);
885 mtx_lock(&np->n_mtx);
886 if (np->n_flag & NWRITEERR) {
887 np->n_flag &= ~NWRITEERR;
888 mtx_unlock(&np->n_mtx);
889 return (np->n_error);
890 } else
891 mtx_unlock(&np->n_mtx);
892 mtx_lock(&nmp->nm_mtx);
893 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
894 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
895 mtx_unlock(&nmp->nm_mtx);
896 (void)ncl_fsinfo(nmp, vp, cred, td);
897 mtx_lock(&nmp->nm_mtx);
898 }
899 if (nmp->nm_wsize == 0)
900 (void) newnfs_iosize(nmp);
901 mtx_unlock(&nmp->nm_mtx);
902
903 /*
904 * Synchronously flush pending buffers if we are in synchronous
905 * mode or if we are appending.
906 */
907 if (ioflag & (IO_APPEND | IO_SYNC)) {
908 mtx_lock(&np->n_mtx);
909 if (np->n_flag & NMODIFIED) {
910 mtx_unlock(&np->n_mtx);
911 #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
912 /*
913 * Require non-blocking, synchronous writes to
914 * dirty files to inform the program it needs
915 * to fsync(2) explicitly.
916 */
917 if (ioflag & IO_NDELAY)
918 return (EAGAIN);
919 #endif
920 flush_and_restart:
921 np->n_attrstamp = 0;
922 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
923 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
924 if (error)
925 return (error);
926 } else
927 mtx_unlock(&np->n_mtx);
928 }
929
930 /*
931 * If IO_APPEND then load uio_offset. We restart here if we cannot
932 * get the append lock.
933 */
934 if (ioflag & IO_APPEND) {
935 np->n_attrstamp = 0;
936 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
937 error = VOP_GETATTR(vp, &vattr, cred);
938 if (error)
939 return (error);
940 mtx_lock(&np->n_mtx);
941 uio->uio_offset = np->n_size;
942 mtx_unlock(&np->n_mtx);
943 }
944
945 if (uio->uio_offset < 0)
946 return (EINVAL);
947 tmp_off = uio->uio_offset + uio->uio_resid;
948 if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)
949 return (EFBIG);
950 if (uio->uio_resid == 0)
951 return (0);
952
953 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
954 return nfs_directio_write(vp, uio, cred, ioflag);
955
956 /*
957 * Maybe this should be above the vnode op call, but so long as
958 * file servers have no limits, i don't think it matters
959 */
960 if (vn_rlimit_fsize(vp, uio, td))
961 return (EFBIG);
962
963 biosize = vp->v_mount->mnt_stat.f_iosize;
964 /*
965 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
966 * would exceed the local maximum per-file write commit size when
967 * combined with those, we must decide whether to flush,
968 * go synchronous, or return error. We don't bother checking
969 * IO_UNIT -- we just make all writes atomic anyway, as there's
970 * no point optimizing for something that really won't ever happen.
971 */
972 if (!(ioflag & IO_SYNC)) {
973 int nflag;
974
975 mtx_lock(&np->n_mtx);
976 nflag = np->n_flag;
977 mtx_unlock(&np->n_mtx);
978 int needrestart = 0;
979 if (nmp->nm_wcommitsize < uio->uio_resid) {
980 /*
981 * If this request could not possibly be completed
982 * without exceeding the maximum outstanding write
983 * commit size, see if we can convert it into a
984 * synchronous write operation.
985 */
986 if (ioflag & IO_NDELAY)
987 return (EAGAIN);
988 ioflag |= IO_SYNC;
989 if (nflag & NMODIFIED)
990 needrestart = 1;
991 } else if (nflag & NMODIFIED) {
992 int wouldcommit = 0;
993 BO_LOCK(&vp->v_bufobj);
994 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
995 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
996 b_bobufs) {
997 if (bp->b_flags & B_NEEDCOMMIT)
998 wouldcommit += bp->b_bcount;
999 }
1000 }
1001 BO_UNLOCK(&vp->v_bufobj);
1002 /*
1003 * Since we're not operating synchronously and
1004 * bypassing the buffer cache, we are in a commit
1005 * and holding all of these buffers whether
1006 * transmitted or not. If not limited, this
1007 * will lead to the buffer cache deadlocking,
1008 * as no one else can flush our uncommitted buffers.
1009 */
1010 wouldcommit += uio->uio_resid;
1011 /*
1012 * If we would initially exceed the maximum
1013 * outstanding write commit size, flush and restart.
1014 */
1015 if (wouldcommit > nmp->nm_wcommitsize)
1016 needrestart = 1;
1017 }
1018 if (needrestart)
1019 goto flush_and_restart;
1020 }
1021
1022 do {
1023 NFSINCRGLOBAL(newnfsstats.biocache_writes);
1024 lbn = uio->uio_offset / biosize;
1025 on = uio->uio_offset & (biosize-1);
1026 n = min((unsigned)(biosize - on), uio->uio_resid);
1027 again:
1028 /*
1029 * Handle direct append and file extension cases, calculate
1030 * unaligned buffer size.
1031 */
1032 mtx_lock(&np->n_mtx);
1033 if (uio->uio_offset == np->n_size && n) {
1034 mtx_unlock(&np->n_mtx);
1035 /*
1036 * Get the buffer (in its pre-append state to maintain
1037 * B_CACHE if it was previously set). Resize the
1038 * nfsnode after we have locked the buffer to prevent
1039 * readers from reading garbage.
1040 */
1041 bcount = on;
1042 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1043
1044 if (bp != NULL) {
1045 long save;
1046
1047 mtx_lock(&np->n_mtx);
1048 np->n_size = uio->uio_offset + n;
1049 np->n_flag |= NMODIFIED;
1050 vnode_pager_setsize(vp, np->n_size);
1051 mtx_unlock(&np->n_mtx);
1052
1053 save = bp->b_flags & B_CACHE;
1054 bcount += n;
1055 allocbuf(bp, bcount);
1056 bp->b_flags |= save;
1057 }
1058 } else {
1059 /*
1060 * Obtain the locked cache block first, and then
1061 * adjust the file's size as appropriate.
1062 */
1063 bcount = on + n;
1064 if ((off_t)lbn * biosize + bcount < np->n_size) {
1065 if ((off_t)(lbn + 1) * biosize < np->n_size)
1066 bcount = biosize;
1067 else
1068 bcount = np->n_size - (off_t)lbn * biosize;
1069 }
1070 mtx_unlock(&np->n_mtx);
1071 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1072 mtx_lock(&np->n_mtx);
1073 if (uio->uio_offset + n > np->n_size) {
1074 np->n_size = uio->uio_offset + n;
1075 np->n_flag |= NMODIFIED;
1076 vnode_pager_setsize(vp, np->n_size);
1077 }
1078 mtx_unlock(&np->n_mtx);
1079 }
1080
1081 if (!bp) {
1082 error = newnfs_sigintr(nmp, td);
1083 if (!error)
1084 error = EINTR;
1085 break;
1086 }
1087
1088 /*
1089 * Issue a READ if B_CACHE is not set. In special-append
1090 * mode, B_CACHE is based on the buffer prior to the write
1091 * op and is typically set, avoiding the read. If a read
1092 * is required in special append mode, the server will
1093 * probably send us a short-read since we extended the file
1094 * on our end, resulting in b_resid == 0 and, thusly,
1095 * B_CACHE getting set.
1096 *
1097 * We can also avoid issuing the read if the write covers
1098 * the entire buffer. We have to make sure the buffer state
1099 * is reasonable in this case since we will not be initiating
1100 * I/O. See the comments in kern/vfs_bio.c's getblk() for
1101 * more information.
1102 *
1103 * B_CACHE may also be set due to the buffer being cached
1104 * normally.
1105 */
1106
1107 if (on == 0 && n == bcount) {
1108 bp->b_flags |= B_CACHE;
1109 bp->b_flags &= ~B_INVAL;
1110 bp->b_ioflags &= ~BIO_ERROR;
1111 }
1112
1113 if ((bp->b_flags & B_CACHE) == 0) {
1114 bp->b_iocmd = BIO_READ;
1115 vfs_busy_pages(bp, 0);
1116 error = ncl_doio(vp, bp, cred, td, 0);
1117 if (error) {
1118 brelse(bp);
1119 break;
1120 }
1121 }
1122 if (bp->b_wcred == NOCRED)
1123 bp->b_wcred = crhold(cred);
1124 mtx_lock(&np->n_mtx);
1125 np->n_flag |= NMODIFIED;
1126 mtx_unlock(&np->n_mtx);
1127
1128 /*
1129 * If dirtyend exceeds file size, chop it down. This should
1130 * not normally occur but there is an append race where it
1131 * might occur XXX, so we log it.
1132 *
1133 * If the chopping creates a reverse-indexed or degenerate
1134 * situation with dirtyoff/end, we 0 both of them.
1135 */
1136
1137 if (bp->b_dirtyend > bcount) {
1138 ncl_printf("NFS append race @%lx:%d\n",
1139 (long)bp->b_blkno * DEV_BSIZE,
1140 bp->b_dirtyend - bcount);
1141 bp->b_dirtyend = bcount;
1142 }
1143
1144 if (bp->b_dirtyoff >= bp->b_dirtyend)
1145 bp->b_dirtyoff = bp->b_dirtyend = 0;
1146
1147 /*
1148 * If the new write will leave a contiguous dirty
1149 * area, just update the b_dirtyoff and b_dirtyend,
1150 * otherwise force a write rpc of the old dirty area.
1151 *
1152 * While it is possible to merge discontiguous writes due to
1153 * our having a B_CACHE buffer ( and thus valid read data
1154 * for the hole), we don't because it could lead to
1155 * significant cache coherency problems with multiple clients,
1156 * especially if locking is implemented later on.
1157 *
1158 * as an optimization we could theoretically maintain
1159 * a linked list of discontinuous areas, but we would still
1160 * have to commit them separately so there isn't much
1161 * advantage to it except perhaps a bit of asynchronization.
1162 */
1163
1164 if (bp->b_dirtyend > 0 &&
1165 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1166 if (bwrite(bp) == EINTR) {
1167 error = EINTR;
1168 break;
1169 }
1170 goto again;
1171 }
1172
1173 error = uiomove((char *)bp->b_data + on, n, uio);
1174
1175 /*
1176 * Since this block is being modified, it must be written
1177 * again and not just committed. Since write clustering does
1178 * not work for the stage 1 data write, only the stage 2
1179 * commit rpc, we have to clear B_CLUSTEROK as well.
1180 */
1181 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1182
1183 if (error) {
1184 bp->b_ioflags |= BIO_ERROR;
1185 brelse(bp);
1186 break;
1187 }
1188
1189 /*
1190 * Only update dirtyoff/dirtyend if not a degenerate
1191 * condition.
1192 */
1193 if (n) {
1194 if (bp->b_dirtyend > 0) {
1195 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1196 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1197 } else {
1198 bp->b_dirtyoff = on;
1199 bp->b_dirtyend = on + n;
1200 }
1201 vfs_bio_set_valid(bp, on, n);
1202 }
1203
1204 /*
1205 * If IO_SYNC do bwrite().
1206 *
1207 * IO_INVAL appears to be unused. The idea appears to be
1208 * to turn off caching in this case. Very odd. XXX
1209 */
1210 if ((ioflag & IO_SYNC)) {
1211 if (ioflag & IO_INVAL)
1212 bp->b_flags |= B_NOCACHE;
1213 error = bwrite(bp);
1214 if (error)
1215 break;
1216 } else if ((n + on) == biosize) {
1217 bp->b_flags |= B_ASYNC;
1218 (void) ncl_writebp(bp, 0, NULL);
1219 } else {
1220 bdwrite(bp);
1221 }
1222 } while (uio->uio_resid > 0 && n > 0);
1223
1224 return (error);
1225 }
1226
1227 /*
1228 * Get an nfs cache block.
1229 *
1230 * Allocate a new one if the block isn't currently in the cache
1231 * and return the block marked busy. If the calling process is
1232 * interrupted by a signal for an interruptible mount point, return
1233 * NULL.
1234 *
1235 * The caller must carefully deal with the possible B_INVAL state of
1236 * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1237 * indirectly), so synchronous reads can be issued without worrying about
1238 * the B_INVAL state. We have to be a little more careful when dealing
1239 * with writes (see comments in nfs_write()) when extending a file past
1240 * its EOF.
1241 */
1242 static struct buf *
1243 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1244 {
1245 struct buf *bp;
1246 struct mount *mp;
1247 struct nfsmount *nmp;
1248
1249 mp = vp->v_mount;
1250 nmp = VFSTONFS(mp);
1251
1252 if (nmp->nm_flag & NFSMNT_INT) {
1253 sigset_t oldset;
1254
1255 newnfs_set_sigmask(td, &oldset);
1256 bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0);
1257 newnfs_restore_sigmask(td, &oldset);
1258 while (bp == NULL) {
1259 if (newnfs_sigintr(nmp, td))
1260 return (NULL);
1261 bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1262 }
1263 } else {
1264 bp = getblk(vp, bn, size, 0, 0, 0);
1265 }
1266
1267 if (vp->v_type == VREG) {
1268 int biosize;
1269
1270 biosize = mp->mnt_stat.f_iosize;
1271 bp->b_blkno = bn * (biosize / DEV_BSIZE);
1272 }
1273 return (bp);
1274 }
1275
1276 /*
1277 * Flush and invalidate all dirty buffers. If another process is already
1278 * doing the flush, just wait for completion.
1279 */
1280 int
1281 ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1282 {
1283 struct nfsnode *np = VTONFS(vp);
1284 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1285 int error = 0, slpflag, slptimeo;
1286 int old_lock = 0;
1287
1288 ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1289
1290 if ((nmp->nm_flag & NFSMNT_INT) == 0)
1291 intrflg = 0;
1292 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1293 intrflg = 1;
1294 if (intrflg) {
1295 slpflag = NFS_PCATCH;
1296 slptimeo = 2 * hz;
1297 } else {
1298 slpflag = 0;
1299 slptimeo = 0;
1300 }
1301
1302 old_lock = ncl_upgrade_vnlock(vp);
1303 if (vp->v_iflag & VI_DOOMED) {
1304 /*
1305 * Since vgonel() uses the generic vinvalbuf() to flush
1306 * dirty buffers and it does not call this function, it
1307 * is safe to just return OK when VI_DOOMED is set.
1308 */
1309 ncl_downgrade_vnlock(vp, old_lock);
1310 return (0);
1311 }
1312
1313 /*
1314 * Now, flush as required.
1315 */
1316 if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1317 VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1318 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1319 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1320 /*
1321 * If the page clean was interrupted, fail the invalidation.
1322 * Not doing so, we run the risk of losing dirty pages in the
1323 * vinvalbuf() call below.
1324 */
1325 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1326 goto out;
1327 }
1328
1329 error = vinvalbuf(vp, flags, slpflag, 0);
1330 while (error) {
1331 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1332 goto out;
1333 error = vinvalbuf(vp, flags, 0, slptimeo);
1334 }
1335 mtx_lock(&np->n_mtx);
1336 if (np->n_directio_asyncwr == 0)
1337 np->n_flag &= ~NMODIFIED;
1338 mtx_unlock(&np->n_mtx);
1339 out:
1340 ncl_downgrade_vnlock(vp, old_lock);
1341 return error;
1342 }
1343
1344 /*
1345 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1346 * This is mainly to avoid queueing async I/O requests when the nfsiods
1347 * are all hung on a dead server.
1348 *
1349 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1350 * is eventually dequeued by the async daemon, ncl_doio() *will*.
1351 */
1352 int
1353 ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1354 {
1355 int iod;
1356 int gotiod;
1357 int slpflag = 0;
1358 int slptimeo = 0;
1359 int error, error2;
1360
1361 /*
1362 * Commits are usually short and sweet so lets save some cpu and
1363 * leave the async daemons for more important rpc's (such as reads
1364 * and writes).
1365 */
1366 mtx_lock(&ncl_iod_mutex);
1367 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1368 (nmp->nm_bufqiods > ncl_numasync / 2)) {
1369 mtx_unlock(&ncl_iod_mutex);
1370 return(EIO);
1371 }
1372 again:
1373 if (nmp->nm_flag & NFSMNT_INT)
1374 slpflag = NFS_PCATCH;
1375 gotiod = FALSE;
1376
1377 /*
1378 * Find a free iod to process this request.
1379 */
1380 for (iod = 0; iod < ncl_numasync; iod++)
1381 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1382 gotiod = TRUE;
1383 break;
1384 }
1385
1386 /*
1387 * Try to create one if none are free.
1388 */
1389 if (!gotiod)
1390 ncl_nfsiodnew();
1391 else {
1392 /*
1393 * Found one, so wake it up and tell it which
1394 * mount to process.
1395 */
1396 NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1397 iod, nmp));
1398 ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1399 ncl_iodmount[iod] = nmp;
1400 nmp->nm_bufqiods++;
1401 wakeup(&ncl_iodwant[iod]);
1402 }
1403
1404 /*
1405 * If none are free, we may already have an iod working on this mount
1406 * point. If so, it will process our request.
1407 */
1408 if (!gotiod) {
1409 if (nmp->nm_bufqiods > 0) {
1410 NFS_DPF(ASYNCIO,
1411 ("ncl_asyncio: %d iods are already processing mount %p\n",
1412 nmp->nm_bufqiods, nmp));
1413 gotiod = TRUE;
1414 }
1415 }
1416
1417 /*
1418 * If we have an iod which can process the request, then queue
1419 * the buffer.
1420 */
1421 if (gotiod) {
1422 /*
1423 * Ensure that the queue never grows too large. We still want
1424 * to asynchronize so we block rather then return EIO.
1425 */
1426 while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1427 NFS_DPF(ASYNCIO,
1428 ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1429 nmp->nm_bufqwant = TRUE;
1430 error = newnfs_msleep(td, &nmp->nm_bufq,
1431 &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1432 slptimeo);
1433 if (error) {
1434 error2 = newnfs_sigintr(nmp, td);
1435 if (error2) {
1436 mtx_unlock(&ncl_iod_mutex);
1437 return (error2);
1438 }
1439 if (slpflag == NFS_PCATCH) {
1440 slpflag = 0;
1441 slptimeo = 2 * hz;
1442 }
1443 }
1444 /*
1445 * We might have lost our iod while sleeping,
1446 * so check and loop if nescessary.
1447 */
1448 goto again;
1449 }
1450
1451 /* We might have lost our nfsiod */
1452 if (nmp->nm_bufqiods == 0) {
1453 NFS_DPF(ASYNCIO,
1454 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1455 goto again;
1456 }
1457
1458 if (bp->b_iocmd == BIO_READ) {
1459 if (bp->b_rcred == NOCRED && cred != NOCRED)
1460 bp->b_rcred = crhold(cred);
1461 } else {
1462 if (bp->b_wcred == NOCRED && cred != NOCRED)
1463 bp->b_wcred = crhold(cred);
1464 }
1465
1466 if (bp->b_flags & B_REMFREE)
1467 bremfreef(bp);
1468 BUF_KERNPROC(bp);
1469 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1470 nmp->nm_bufqlen++;
1471 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1472 mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1473 VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1474 VTONFS(bp->b_vp)->n_directio_asyncwr++;
1475 mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1476 }
1477 mtx_unlock(&ncl_iod_mutex);
1478 return (0);
1479 }
1480
1481 mtx_unlock(&ncl_iod_mutex);
1482
1483 /*
1484 * All the iods are busy on other mounts, so return EIO to
1485 * force the caller to process the i/o synchronously.
1486 */
1487 NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1488 return (EIO);
1489 }
1490
1491 void
1492 ncl_doio_directwrite(struct buf *bp)
1493 {
1494 int iomode, must_commit;
1495 struct uio *uiop = (struct uio *)bp->b_caller1;
1496 char *iov_base = uiop->uio_iov->iov_base;
1497
1498 iomode = NFSWRITE_FILESYNC;
1499 uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1500 ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0);
1501 KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1502 free(iov_base, M_NFSDIRECTIO);
1503 free(uiop->uio_iov, M_NFSDIRECTIO);
1504 free(uiop, M_NFSDIRECTIO);
1505 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1506 struct nfsnode *np = VTONFS(bp->b_vp);
1507 mtx_lock(&np->n_mtx);
1508 np->n_directio_asyncwr--;
1509 if (np->n_directio_asyncwr == 0) {
1510 np->n_flag &= ~NMODIFIED;
1511 if ((np->n_flag & NFSYNCWAIT)) {
1512 np->n_flag &= ~NFSYNCWAIT;
1513 wakeup((caddr_t)&np->n_directio_asyncwr);
1514 }
1515 }
1516 mtx_unlock(&np->n_mtx);
1517 }
1518 bp->b_vp = NULL;
1519 relpbuf(bp, &ncl_pbuf_freecnt);
1520 }
1521
1522 /*
1523 * Do an I/O operation to/from a cache block. This may be called
1524 * synchronously or from an nfsiod.
1525 */
1526 int
1527 ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
1528 int called_from_strategy)
1529 {
1530 struct uio *uiop;
1531 struct nfsnode *np;
1532 struct nfsmount *nmp;
1533 int error = 0, iomode, must_commit = 0;
1534 struct uio uio;
1535 struct iovec io;
1536 struct proc *p = td ? td->td_proc : NULL;
1537 uint8_t iocmd;
1538
1539 np = VTONFS(vp);
1540 nmp = VFSTONFS(vp->v_mount);
1541 uiop = &uio;
1542 uiop->uio_iov = &io;
1543 uiop->uio_iovcnt = 1;
1544 uiop->uio_segflg = UIO_SYSSPACE;
1545 uiop->uio_td = td;
1546
1547 /*
1548 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1549 * do this here so we do not have to do it in all the code that
1550 * calls us.
1551 */
1552 bp->b_flags &= ~B_INVAL;
1553 bp->b_ioflags &= ~BIO_ERROR;
1554
1555 KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1556 iocmd = bp->b_iocmd;
1557 if (iocmd == BIO_READ) {
1558 io.iov_len = uiop->uio_resid = bp->b_bcount;
1559 io.iov_base = bp->b_data;
1560 uiop->uio_rw = UIO_READ;
1561
1562 switch (vp->v_type) {
1563 case VREG:
1564 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1565 NFSINCRGLOBAL(newnfsstats.read_bios);
1566 error = ncl_readrpc(vp, uiop, cr);
1567
1568 if (!error) {
1569 if (uiop->uio_resid) {
1570 /*
1571 * If we had a short read with no error, we must have
1572 * hit a file hole. We should zero-fill the remainder.
1573 * This can also occur if the server hits the file EOF.
1574 *
1575 * Holes used to be able to occur due to pending
1576 * writes, but that is not possible any longer.
1577 */
1578 int nread = bp->b_bcount - uiop->uio_resid;
1579 int left = uiop->uio_resid;
1580
1581 if (left > 0)
1582 bzero((char *)bp->b_data + nread, left);
1583 uiop->uio_resid = 0;
1584 }
1585 }
1586 /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1587 if (p && (vp->v_vflag & VV_TEXT)) {
1588 mtx_lock(&np->n_mtx);
1589 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1590 mtx_unlock(&np->n_mtx);
1591 PROC_LOCK(p);
1592 killproc(p, "text file modification");
1593 PROC_UNLOCK(p);
1594 } else
1595 mtx_unlock(&np->n_mtx);
1596 }
1597 break;
1598 case VLNK:
1599 uiop->uio_offset = (off_t)0;
1600 NFSINCRGLOBAL(newnfsstats.readlink_bios);
1601 error = ncl_readlinkrpc(vp, uiop, cr);
1602 break;
1603 case VDIR:
1604 NFSINCRGLOBAL(newnfsstats.readdir_bios);
1605 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1606 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1607 error = ncl_readdirplusrpc(vp, uiop, cr, td);
1608 if (error == NFSERR_NOTSUPP)
1609 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1610 }
1611 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1612 error = ncl_readdirrpc(vp, uiop, cr, td);
1613 /*
1614 * end-of-directory sets B_INVAL but does not generate an
1615 * error.
1616 */
1617 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1618 bp->b_flags |= B_INVAL;
1619 break;
1620 default:
1621 ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type);
1622 break;
1623 };
1624 if (error) {
1625 bp->b_ioflags |= BIO_ERROR;
1626 bp->b_error = error;
1627 }
1628 } else {
1629 /*
1630 * If we only need to commit, try to commit
1631 */
1632 if (bp->b_flags & B_NEEDCOMMIT) {
1633 int retv;
1634 off_t off;
1635
1636 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1637 retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1638 bp->b_wcred, td);
1639 if (retv == 0) {
1640 bp->b_dirtyoff = bp->b_dirtyend = 0;
1641 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1642 bp->b_resid = 0;
1643 bufdone(bp);
1644 return (0);
1645 }
1646 if (retv == NFSERR_STALEWRITEVERF) {
1647 ncl_clearcommit(vp->v_mount);
1648 }
1649 }
1650
1651 /*
1652 * Setup for actual write
1653 */
1654 mtx_lock(&np->n_mtx);
1655 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1656 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1657 mtx_unlock(&np->n_mtx);
1658
1659 if (bp->b_dirtyend > bp->b_dirtyoff) {
1660 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1661 - bp->b_dirtyoff;
1662 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1663 + bp->b_dirtyoff;
1664 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1665 uiop->uio_rw = UIO_WRITE;
1666 NFSINCRGLOBAL(newnfsstats.write_bios);
1667
1668 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1669 iomode = NFSWRITE_UNSTABLE;
1670 else
1671 iomode = NFSWRITE_FILESYNC;
1672
1673 error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
1674 called_from_strategy);
1675
1676 /*
1677 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1678 * to cluster the buffers needing commit. This will allow
1679 * the system to submit a single commit rpc for the whole
1680 * cluster. We can do this even if the buffer is not 100%
1681 * dirty (relative to the NFS blocksize), so we optimize the
1682 * append-to-file-case.
1683 *
1684 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1685 * cleared because write clustering only works for commit
1686 * rpc's, not for the data portion of the write).
1687 */
1688
1689 if (!error && iomode == NFSWRITE_UNSTABLE) {
1690 bp->b_flags |= B_NEEDCOMMIT;
1691 if (bp->b_dirtyoff == 0
1692 && bp->b_dirtyend == bp->b_bcount)
1693 bp->b_flags |= B_CLUSTEROK;
1694 } else {
1695 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1696 }
1697
1698 /*
1699 * For an interrupted write, the buffer is still valid
1700 * and the write hasn't been pushed to the server yet,
1701 * so we can't set BIO_ERROR and report the interruption
1702 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1703 * is not relevant, so the rpc attempt is essentially
1704 * a noop. For the case of a V3 write rpc not being
1705 * committed to stable storage, the block is still
1706 * dirty and requires either a commit rpc or another
1707 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1708 * the block is reused. This is indicated by setting
1709 * the B_DELWRI and B_NEEDCOMMIT flags.
1710 *
1711 * EIO is returned by ncl_writerpc() to indicate a recoverable
1712 * write error and is handled as above, except that
1713 * B_EINTR isn't set. One cause of this is a stale stateid
1714 * error for the RPC that indicates recovery is required,
1715 * when called with called_from_strategy != 0.
1716 *
1717 * If the buffer is marked B_PAGING, it does not reside on
1718 * the vp's paging queues so we cannot call bdirty(). The
1719 * bp in this case is not an NFS cache block so we should
1720 * be safe. XXX
1721 *
1722 * The logic below breaks up errors into recoverable and
1723 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1724 * and keep the buffer around for potential write retries.
1725 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1726 * and save the error in the nfsnode. This is less than ideal
1727 * but necessary. Keeping such buffers around could potentially
1728 * cause buffer exhaustion eventually (they can never be written
1729 * out, so will get constantly be re-dirtied). It also causes
1730 * all sorts of vfs panics. For non-recoverable write errors,
1731 * also invalidate the attrcache, so we'll be forced to go over
1732 * the wire for this object, returning an error to user on next
1733 * call (most of the time).
1734 */
1735 if (error == EINTR || error == EIO || error == ETIMEDOUT
1736 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1737 int s;
1738
1739 s = splbio();
1740 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1741 if ((bp->b_flags & B_PAGING) == 0) {
1742 bdirty(bp);
1743 bp->b_flags &= ~B_DONE;
1744 }
1745 if ((error == EINTR || error == ETIMEDOUT) &&
1746 (bp->b_flags & B_ASYNC) == 0)
1747 bp->b_flags |= B_EINTR;
1748 splx(s);
1749 } else {
1750 if (error) {
1751 bp->b_ioflags |= BIO_ERROR;
1752 bp->b_flags |= B_INVAL;
1753 bp->b_error = np->n_error = error;
1754 mtx_lock(&np->n_mtx);
1755 np->n_flag |= NWRITEERR;
1756 np->n_attrstamp = 0;
1757 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1758 mtx_unlock(&np->n_mtx);
1759 }
1760 bp->b_dirtyoff = bp->b_dirtyend = 0;
1761 }
1762 } else {
1763 bp->b_resid = 0;
1764 bufdone(bp);
1765 return (0);
1766 }
1767 }
1768 bp->b_resid = uiop->uio_resid;
1769 if (must_commit)
1770 ncl_clearcommit(vp->v_mount);
1771 bufdone(bp);
1772 return (error);
1773 }
1774
1775 /*
1776 * Used to aid in handling ftruncate() operations on the NFS client side.
1777 * Truncation creates a number of special problems for NFS. We have to
1778 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1779 * we have to properly handle VM pages or (potentially dirty) buffers
1780 * that straddle the truncation point.
1781 */
1782
1783 int
1784 ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1785 {
1786 struct nfsnode *np = VTONFS(vp);
1787 u_quad_t tsize;
1788 int biosize = vp->v_mount->mnt_stat.f_iosize;
1789 int error = 0;
1790
1791 mtx_lock(&np->n_mtx);
1792 tsize = np->n_size;
1793 np->n_size = nsize;
1794 mtx_unlock(&np->n_mtx);
1795
1796 if (nsize < tsize) {
1797 struct buf *bp;
1798 daddr_t lbn;
1799 int bufsize;
1800
1801 /*
1802 * vtruncbuf() doesn't get the buffer overlapping the
1803 * truncation point. We may have a B_DELWRI and/or B_CACHE
1804 * buffer that now needs to be truncated.
1805 */
1806 error = vtruncbuf(vp, cred, td, nsize, biosize);
1807 lbn = nsize / biosize;
1808 bufsize = nsize & (biosize - 1);
1809 bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1810 if (!bp)
1811 return EINTR;
1812 if (bp->b_dirtyoff > bp->b_bcount)
1813 bp->b_dirtyoff = bp->b_bcount;
1814 if (bp->b_dirtyend > bp->b_bcount)
1815 bp->b_dirtyend = bp->b_bcount;
1816 bp->b_flags |= B_RELBUF; /* don't leave garbage around */
1817 brelse(bp);
1818 } else {
1819 vnode_pager_setsize(vp, nsize);
1820 }
1821 return(error);
1822 }
1823
Cache object: 96c1ddcd6c7f4c5edcc8e23a5d4e2597
|