1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/kernel.h>
43 #include <sys/mount.h>
44 #include <sys/vmmeter.h>
45 #include <sys/vnode.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_pager.h>
52 #include <vm/vnode_pager.h>
53
54 #include <fs/nfs/nfsport.h>
55 #include <fs/nfsclient/nfsmount.h>
56 #include <fs/nfsclient/nfs.h>
57 #include <fs/nfsclient/nfsnode.h>
58
59 extern int newnfs_directio_allow_mmap;
60 extern struct nfsstats newnfsstats;
61 extern struct mtx ncl_iod_mutex;
62 extern int ncl_numasync;
63 extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
64 extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
65 extern int newnfs_directio_enable;
66 extern int newnfs_keep_dirty_on_error;
67
68 int ncl_pbuf_freecnt = -1; /* start out unlimited */
69
70 static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
71 struct thread *td);
72 static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
73 struct ucred *cred, int ioflag);
74
75 /*
76 * Vnode op for VM getpages.
77 */
78 int
79 ncl_getpages(struct vop_getpages_args *ap)
80 {
81 int i, error, nextoff, size, toff, count, npages;
82 struct uio uio;
83 struct iovec iov;
84 vm_offset_t kva;
85 struct buf *bp;
86 struct vnode *vp;
87 struct thread *td;
88 struct ucred *cred;
89 struct nfsmount *nmp;
90 vm_object_t object;
91 vm_page_t *pages;
92 struct nfsnode *np;
93
94 vp = ap->a_vp;
95 np = VTONFS(vp);
96 td = curthread; /* XXX */
97 cred = curthread->td_ucred; /* XXX */
98 nmp = VFSTONFS(vp->v_mount);
99 pages = ap->a_m;
100 count = ap->a_count;
101
102 if ((object = vp->v_object) == NULL) {
103 ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
104 return (VM_PAGER_ERROR);
105 }
106
107 if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
108 mtx_lock(&np->n_mtx);
109 if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
110 mtx_unlock(&np->n_mtx);
111 ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
112 return (VM_PAGER_ERROR);
113 } else
114 mtx_unlock(&np->n_mtx);
115 }
116
117 mtx_lock(&nmp->nm_mtx);
118 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
119 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
120 mtx_unlock(&nmp->nm_mtx);
121 /* We'll never get here for v4, because we always have fsinfo */
122 (void)ncl_fsinfo(nmp, vp, cred, td);
123 } else
124 mtx_unlock(&nmp->nm_mtx);
125
126 npages = btoc(count);
127
128 /*
129 * If the requested page is partially valid, just return it and
130 * allow the pager to zero-out the blanks. Partially valid pages
131 * can only occur at the file EOF.
132 */
133 VM_OBJECT_LOCK(object);
134 if (pages[ap->a_reqpage]->valid != 0) {
135 vm_page_lock_queues();
136 for (i = 0; i < npages; ++i) {
137 if (i != ap->a_reqpage)
138 vm_page_free(pages[i]);
139 }
140 vm_page_unlock_queues();
141 VM_OBJECT_UNLOCK(object);
142 return (0);
143 }
144 VM_OBJECT_UNLOCK(object);
145
146 /*
147 * We use only the kva address for the buffer, but this is extremely
148 * convienient and fast.
149 */
150 bp = getpbuf(&ncl_pbuf_freecnt);
151
152 kva = (vm_offset_t) bp->b_data;
153 pmap_qenter(kva, pages, npages);
154 PCPU_INC(cnt.v_vnodein);
155 PCPU_ADD(cnt.v_vnodepgsin, npages);
156
157 iov.iov_base = (caddr_t) kva;
158 iov.iov_len = count;
159 uio.uio_iov = &iov;
160 uio.uio_iovcnt = 1;
161 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
162 uio.uio_resid = count;
163 uio.uio_segflg = UIO_SYSSPACE;
164 uio.uio_rw = UIO_READ;
165 uio.uio_td = td;
166
167 error = ncl_readrpc(vp, &uio, cred);
168 pmap_qremove(kva, npages);
169
170 relpbuf(bp, &ncl_pbuf_freecnt);
171
172 if (error && (uio.uio_resid == count)) {
173 ncl_printf("nfs_getpages: error %d\n", error);
174 VM_OBJECT_LOCK(object);
175 vm_page_lock_queues();
176 for (i = 0; i < npages; ++i) {
177 if (i != ap->a_reqpage)
178 vm_page_free(pages[i]);
179 }
180 vm_page_unlock_queues();
181 VM_OBJECT_UNLOCK(object);
182 return (VM_PAGER_ERROR);
183 }
184
185 /*
186 * Calculate the number of bytes read and validate only that number
187 * of bytes. Note that due to pending writes, size may be 0. This
188 * does not mean that the remaining data is invalid!
189 */
190
191 size = count - uio.uio_resid;
192 VM_OBJECT_LOCK(object);
193 vm_page_lock_queues();
194 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
195 vm_page_t m;
196 nextoff = toff + PAGE_SIZE;
197 m = pages[i];
198
199 if (nextoff <= size) {
200 /*
201 * Read operation filled an entire page
202 */
203 m->valid = VM_PAGE_BITS_ALL;
204 KASSERT(m->dirty == 0,
205 ("nfs_getpages: page %p is dirty", m));
206 } else if (size > toff) {
207 /*
208 * Read operation filled a partial page.
209 */
210 m->valid = 0;
211 vm_page_set_valid(m, 0, size - toff);
212 KASSERT(m->dirty == 0,
213 ("nfs_getpages: page %p is dirty", m));
214 } else {
215 /*
216 * Read operation was short. If no error occured
217 * we may have hit a zero-fill section. We simply
218 * leave valid set to 0.
219 */
220 ;
221 }
222 if (i != ap->a_reqpage) {
223 /*
224 * Whether or not to leave the page activated is up in
225 * the air, but we should put the page on a page queue
226 * somewhere (it already is in the object). Result:
227 * It appears that emperical results show that
228 * deactivating pages is best.
229 */
230
231 /*
232 * Just in case someone was asking for this page we
233 * now tell them that it is ok to use.
234 */
235 if (!error) {
236 if (m->oflags & VPO_WANTED)
237 vm_page_activate(m);
238 else
239 vm_page_deactivate(m);
240 vm_page_wakeup(m);
241 } else {
242 vm_page_free(m);
243 }
244 }
245 }
246 vm_page_unlock_queues();
247 VM_OBJECT_UNLOCK(object);
248 return (0);
249 }
250
251 /*
252 * Vnode op for VM putpages.
253 */
254 int
255 ncl_putpages(struct vop_putpages_args *ap)
256 {
257 struct uio uio;
258 struct iovec iov;
259 vm_offset_t kva;
260 struct buf *bp;
261 int iomode, must_commit, i, error, npages, count;
262 off_t offset;
263 int *rtvals;
264 struct vnode *vp;
265 struct thread *td;
266 struct ucred *cred;
267 struct nfsmount *nmp;
268 struct nfsnode *np;
269 vm_page_t *pages;
270
271 vp = ap->a_vp;
272 np = VTONFS(vp);
273 td = curthread; /* XXX */
274 /* Set the cred to n_writecred for the write rpcs. */
275 if (np->n_writecred != NULL)
276 cred = crhold(np->n_writecred);
277 else
278 cred = crhold(curthread->td_ucred); /* XXX */
279 nmp = VFSTONFS(vp->v_mount);
280 pages = ap->a_m;
281 count = ap->a_count;
282 rtvals = ap->a_rtvals;
283 npages = btoc(count);
284 offset = IDX_TO_OFF(pages[0]->pindex);
285
286 mtx_lock(&nmp->nm_mtx);
287 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
288 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
289 mtx_unlock(&nmp->nm_mtx);
290 (void)ncl_fsinfo(nmp, vp, cred, td);
291 } else
292 mtx_unlock(&nmp->nm_mtx);
293
294 mtx_lock(&np->n_mtx);
295 if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
296 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
297 mtx_unlock(&np->n_mtx);
298 ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
299 mtx_lock(&np->n_mtx);
300 }
301
302 for (i = 0; i < npages; i++)
303 rtvals[i] = VM_PAGER_ERROR;
304
305 /*
306 * When putting pages, do not extend file past EOF.
307 */
308 if (offset + count > np->n_size) {
309 count = np->n_size - offset;
310 if (count < 0)
311 count = 0;
312 }
313 mtx_unlock(&np->n_mtx);
314
315 /*
316 * We use only the kva address for the buffer, but this is extremely
317 * convienient and fast.
318 */
319 bp = getpbuf(&ncl_pbuf_freecnt);
320
321 kva = (vm_offset_t) bp->b_data;
322 pmap_qenter(kva, pages, npages);
323 PCPU_INC(cnt.v_vnodeout);
324 PCPU_ADD(cnt.v_vnodepgsout, count);
325
326 iov.iov_base = (caddr_t) kva;
327 iov.iov_len = count;
328 uio.uio_iov = &iov;
329 uio.uio_iovcnt = 1;
330 uio.uio_offset = offset;
331 uio.uio_resid = count;
332 uio.uio_segflg = UIO_SYSSPACE;
333 uio.uio_rw = UIO_WRITE;
334 uio.uio_td = td;
335
336 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
337 iomode = NFSWRITE_UNSTABLE;
338 else
339 iomode = NFSWRITE_FILESYNC;
340
341 error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0);
342 crfree(cred);
343
344 pmap_qremove(kva, npages);
345 relpbuf(bp, &ncl_pbuf_freecnt);
346
347 if (error == 0 || !newnfs_keep_dirty_on_error) {
348 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
349 if (must_commit)
350 ncl_clearcommit(vp->v_mount);
351 }
352 return rtvals[0];
353 }
354
355 /*
356 * For nfs, cache consistency can only be maintained approximately.
357 * Although RFC1094 does not specify the criteria, the following is
358 * believed to be compatible with the reference port.
359 * For nfs:
360 * If the file's modify time on the server has changed since the
361 * last read rpc or you have written to the file,
362 * you may have lost data cache consistency with the
363 * server, so flush all of the file's data out of the cache.
364 * Then force a getattr rpc to ensure that you have up to date
365 * attributes.
366 * NB: This implies that cache data can be read when up to
367 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
368 * attributes this could be forced by setting n_attrstamp to 0 before
369 * the VOP_GETATTR() call.
370 */
371 static inline int
372 nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
373 {
374 int error = 0;
375 struct vattr vattr;
376 struct nfsnode *np = VTONFS(vp);
377 int old_lock;
378
379 /*
380 * Grab the exclusive lock before checking whether the cache is
381 * consistent.
382 * XXX - We can make this cheaper later (by acquiring cheaper locks).
383 * But for now, this suffices.
384 */
385 old_lock = ncl_upgrade_vnlock(vp);
386 if (vp->v_iflag & VI_DOOMED) {
387 ncl_downgrade_vnlock(vp, old_lock);
388 return (EBADF);
389 }
390
391 mtx_lock(&np->n_mtx);
392 if (np->n_flag & NMODIFIED) {
393 mtx_unlock(&np->n_mtx);
394 if (vp->v_type != VREG) {
395 if (vp->v_type != VDIR)
396 panic("nfs: bioread, not dir");
397 ncl_invaldir(vp);
398 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
399 if (error)
400 goto out;
401 }
402 np->n_attrstamp = 0;
403 error = VOP_GETATTR(vp, &vattr, cred);
404 if (error)
405 goto out;
406 mtx_lock(&np->n_mtx);
407 np->n_mtime = vattr.va_mtime;
408 mtx_unlock(&np->n_mtx);
409 } else {
410 mtx_unlock(&np->n_mtx);
411 error = VOP_GETATTR(vp, &vattr, cred);
412 if (error)
413 return (error);
414 mtx_lock(&np->n_mtx);
415 if ((np->n_flag & NSIZECHANGED)
416 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
417 mtx_unlock(&np->n_mtx);
418 if (vp->v_type == VDIR)
419 ncl_invaldir(vp);
420 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
421 if (error)
422 goto out;
423 mtx_lock(&np->n_mtx);
424 np->n_mtime = vattr.va_mtime;
425 np->n_flag &= ~NSIZECHANGED;
426 }
427 mtx_unlock(&np->n_mtx);
428 }
429 out:
430 ncl_downgrade_vnlock(vp, old_lock);
431 return error;
432 }
433
434 /*
435 * Vnode op for read using bio
436 */
437 int
438 ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
439 {
440 struct nfsnode *np = VTONFS(vp);
441 int biosize, i;
442 struct buf *bp, *rabp;
443 struct thread *td;
444 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
445 daddr_t lbn, rabn;
446 int bcount;
447 int seqcount;
448 int nra, error = 0, n = 0, on = 0;
449 off_t tmp_off;
450
451 KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode"));
452 if (uio->uio_resid == 0)
453 return (0);
454 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
455 return (EINVAL);
456 td = uio->uio_td;
457
458 mtx_lock(&nmp->nm_mtx);
459 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
460 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
461 mtx_unlock(&nmp->nm_mtx);
462 (void)ncl_fsinfo(nmp, vp, cred, td);
463 mtx_lock(&nmp->nm_mtx);
464 }
465 if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
466 (void) newnfs_iosize(nmp);
467
468 tmp_off = uio->uio_offset + uio->uio_resid;
469 if (vp->v_type != VDIR &&
470 (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) {
471 mtx_unlock(&nmp->nm_mtx);
472 return (EFBIG);
473 }
474 mtx_unlock(&nmp->nm_mtx);
475
476 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
477 /* No caching/ no readaheads. Just read data into the user buffer */
478 return ncl_readrpc(vp, uio, cred);
479
480 biosize = vp->v_bufobj.bo_bsize;
481 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
482
483 error = nfs_bioread_check_cons(vp, td, cred);
484 if (error)
485 return error;
486
487 do {
488 u_quad_t nsize;
489
490 mtx_lock(&np->n_mtx);
491 nsize = np->n_size;
492 mtx_unlock(&np->n_mtx);
493
494 switch (vp->v_type) {
495 case VREG:
496 NFSINCRGLOBAL(newnfsstats.biocache_reads);
497 lbn = uio->uio_offset / biosize;
498 on = uio->uio_offset & (biosize - 1);
499
500 /*
501 * Start the read ahead(s), as required.
502 */
503 if (nmp->nm_readahead > 0) {
504 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
505 (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
506 rabn = lbn + 1 + nra;
507 if (incore(&vp->v_bufobj, rabn) == NULL) {
508 rabp = nfs_getcacheblk(vp, rabn, biosize, td);
509 if (!rabp) {
510 error = newnfs_sigintr(nmp, td);
511 return (error ? error : EINTR);
512 }
513 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
514 rabp->b_flags |= B_ASYNC;
515 rabp->b_iocmd = BIO_READ;
516 vfs_busy_pages(rabp, 0);
517 if (ncl_asyncio(nmp, rabp, cred, td)) {
518 rabp->b_flags |= B_INVAL;
519 rabp->b_ioflags |= BIO_ERROR;
520 vfs_unbusy_pages(rabp);
521 brelse(rabp);
522 break;
523 }
524 } else {
525 brelse(rabp);
526 }
527 }
528 }
529 }
530
531 /* Note that bcount is *not* DEV_BSIZE aligned. */
532 bcount = biosize;
533 if ((off_t)lbn * biosize >= nsize) {
534 bcount = 0;
535 } else if ((off_t)(lbn + 1) * biosize > nsize) {
536 bcount = nsize - (off_t)lbn * biosize;
537 }
538 bp = nfs_getcacheblk(vp, lbn, bcount, td);
539
540 if (!bp) {
541 error = newnfs_sigintr(nmp, td);
542 return (error ? error : EINTR);
543 }
544
545 /*
546 * If B_CACHE is not set, we must issue the read. If this
547 * fails, we return an error.
548 */
549
550 if ((bp->b_flags & B_CACHE) == 0) {
551 bp->b_iocmd = BIO_READ;
552 vfs_busy_pages(bp, 0);
553 error = ncl_doio(vp, bp, cred, td, 0);
554 if (error) {
555 brelse(bp);
556 return (error);
557 }
558 }
559
560 /*
561 * on is the offset into the current bp. Figure out how many
562 * bytes we can copy out of the bp. Note that bcount is
563 * NOT DEV_BSIZE aligned.
564 *
565 * Then figure out how many bytes we can copy into the uio.
566 */
567
568 n = 0;
569 if (on < bcount)
570 n = min((unsigned)(bcount - on), uio->uio_resid);
571 break;
572 case VLNK:
573 NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
574 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
575 if (!bp) {
576 error = newnfs_sigintr(nmp, td);
577 return (error ? error : EINTR);
578 }
579 if ((bp->b_flags & B_CACHE) == 0) {
580 bp->b_iocmd = BIO_READ;
581 vfs_busy_pages(bp, 0);
582 error = ncl_doio(vp, bp, cred, td, 0);
583 if (error) {
584 bp->b_ioflags |= BIO_ERROR;
585 brelse(bp);
586 return (error);
587 }
588 }
589 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
590 on = 0;
591 break;
592 case VDIR:
593 NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
594 if (np->n_direofoffset
595 && uio->uio_offset >= np->n_direofoffset) {
596 return (0);
597 }
598 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
599 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
600 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
601 if (!bp) {
602 error = newnfs_sigintr(nmp, td);
603 return (error ? error : EINTR);
604 }
605 if ((bp->b_flags & B_CACHE) == 0) {
606 bp->b_iocmd = BIO_READ;
607 vfs_busy_pages(bp, 0);
608 error = ncl_doio(vp, bp, cred, td, 0);
609 if (error) {
610 brelse(bp);
611 }
612 while (error == NFSERR_BAD_COOKIE) {
613 ncl_invaldir(vp);
614 error = ncl_vinvalbuf(vp, 0, td, 1);
615 /*
616 * Yuck! The directory has been modified on the
617 * server. The only way to get the block is by
618 * reading from the beginning to get all the
619 * offset cookies.
620 *
621 * Leave the last bp intact unless there is an error.
622 * Loop back up to the while if the error is another
623 * NFSERR_BAD_COOKIE (double yuch!).
624 */
625 for (i = 0; i <= lbn && !error; i++) {
626 if (np->n_direofoffset
627 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
628 return (0);
629 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
630 if (!bp) {
631 error = newnfs_sigintr(nmp, td);
632 return (error ? error : EINTR);
633 }
634 if ((bp->b_flags & B_CACHE) == 0) {
635 bp->b_iocmd = BIO_READ;
636 vfs_busy_pages(bp, 0);
637 error = ncl_doio(vp, bp, cred, td, 0);
638 /*
639 * no error + B_INVAL == directory EOF,
640 * use the block.
641 */
642 if (error == 0 && (bp->b_flags & B_INVAL))
643 break;
644 }
645 /*
646 * An error will throw away the block and the
647 * for loop will break out. If no error and this
648 * is not the block we want, we throw away the
649 * block and go for the next one via the for loop.
650 */
651 if (error || i < lbn)
652 brelse(bp);
653 }
654 }
655 /*
656 * The above while is repeated if we hit another cookie
657 * error. If we hit an error and it wasn't a cookie error,
658 * we give up.
659 */
660 if (error)
661 return (error);
662 }
663
664 /*
665 * If not eof and read aheads are enabled, start one.
666 * (You need the current block first, so that you have the
667 * directory offset cookie of the next block.)
668 */
669 if (nmp->nm_readahead > 0 &&
670 (bp->b_flags & B_INVAL) == 0 &&
671 (np->n_direofoffset == 0 ||
672 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
673 incore(&vp->v_bufobj, lbn + 1) == NULL) {
674 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
675 if (rabp) {
676 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
677 rabp->b_flags |= B_ASYNC;
678 rabp->b_iocmd = BIO_READ;
679 vfs_busy_pages(rabp, 0);
680 if (ncl_asyncio(nmp, rabp, cred, td)) {
681 rabp->b_flags |= B_INVAL;
682 rabp->b_ioflags |= BIO_ERROR;
683 vfs_unbusy_pages(rabp);
684 brelse(rabp);
685 }
686 } else {
687 brelse(rabp);
688 }
689 }
690 }
691 /*
692 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
693 * chopped for the EOF condition, we cannot tell how large
694 * NFS directories are going to be until we hit EOF. So
695 * an NFS directory buffer is *not* chopped to its EOF. Now,
696 * it just so happens that b_resid will effectively chop it
697 * to EOF. *BUT* this information is lost if the buffer goes
698 * away and is reconstituted into a B_CACHE state ( due to
699 * being VMIO ) later. So we keep track of the directory eof
700 * in np->n_direofoffset and chop it off as an extra step
701 * right here.
702 */
703 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
704 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
705 n = np->n_direofoffset - uio->uio_offset;
706 break;
707 default:
708 ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
709 bp = NULL;
710 break;
711 };
712
713 if (n > 0) {
714 error = uiomove(bp->b_data + on, (int)n, uio);
715 }
716 if (vp->v_type == VLNK)
717 n = 0;
718 if (bp != NULL)
719 brelse(bp);
720 } while (error == 0 && uio->uio_resid > 0 && n > 0);
721 return (error);
722 }
723
724 /*
725 * The NFS write path cannot handle iovecs with len > 1. So we need to
726 * break up iovecs accordingly (restricting them to wsize).
727 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
728 * For the ASYNC case, 2 copies are needed. The first a copy from the
729 * user buffer to a staging buffer and then a second copy from the staging
730 * buffer to mbufs. This can be optimized by copying from the user buffer
731 * directly into mbufs and passing the chain down, but that requires a
732 * fair amount of re-working of the relevant codepaths (and can be done
733 * later).
734 */
735 static int
736 nfs_directio_write(vp, uiop, cred, ioflag)
737 struct vnode *vp;
738 struct uio *uiop;
739 struct ucred *cred;
740 int ioflag;
741 {
742 int error;
743 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
744 struct thread *td = uiop->uio_td;
745 int size;
746 int wsize;
747
748 mtx_lock(&nmp->nm_mtx);
749 wsize = nmp->nm_wsize;
750 mtx_unlock(&nmp->nm_mtx);
751 if (ioflag & IO_SYNC) {
752 int iomode, must_commit;
753 struct uio uio;
754 struct iovec iov;
755 do_sync:
756 while (uiop->uio_resid > 0) {
757 size = min(uiop->uio_resid, wsize);
758 size = min(uiop->uio_iov->iov_len, size);
759 iov.iov_base = uiop->uio_iov->iov_base;
760 iov.iov_len = size;
761 uio.uio_iov = &iov;
762 uio.uio_iovcnt = 1;
763 uio.uio_offset = uiop->uio_offset;
764 uio.uio_resid = size;
765 uio.uio_segflg = UIO_USERSPACE;
766 uio.uio_rw = UIO_WRITE;
767 uio.uio_td = td;
768 iomode = NFSWRITE_FILESYNC;
769 error = ncl_writerpc(vp, &uio, cred, &iomode,
770 &must_commit, 0);
771 KASSERT((must_commit == 0),
772 ("ncl_directio_write: Did not commit write"));
773 if (error)
774 return (error);
775 uiop->uio_offset += size;
776 uiop->uio_resid -= size;
777 if (uiop->uio_iov->iov_len <= size) {
778 uiop->uio_iovcnt--;
779 uiop->uio_iov++;
780 } else {
781 uiop->uio_iov->iov_base =
782 (char *)uiop->uio_iov->iov_base + size;
783 uiop->uio_iov->iov_len -= size;
784 }
785 }
786 } else {
787 struct uio *t_uio;
788 struct iovec *t_iov;
789 struct buf *bp;
790
791 /*
792 * Break up the write into blocksize chunks and hand these
793 * over to nfsiod's for write back.
794 * Unfortunately, this incurs a copy of the data. Since
795 * the user could modify the buffer before the write is
796 * initiated.
797 *
798 * The obvious optimization here is that one of the 2 copies
799 * in the async write path can be eliminated by copying the
800 * data here directly into mbufs and passing the mbuf chain
801 * down. But that will require a fair amount of re-working
802 * of the code and can be done if there's enough interest
803 * in NFS directio access.
804 */
805 while (uiop->uio_resid > 0) {
806 size = min(uiop->uio_resid, wsize);
807 size = min(uiop->uio_iov->iov_len, size);
808 bp = getpbuf(&ncl_pbuf_freecnt);
809 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
810 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
811 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
812 t_iov->iov_len = size;
813 t_uio->uio_iov = t_iov;
814 t_uio->uio_iovcnt = 1;
815 t_uio->uio_offset = uiop->uio_offset;
816 t_uio->uio_resid = size;
817 t_uio->uio_segflg = UIO_SYSSPACE;
818 t_uio->uio_rw = UIO_WRITE;
819 t_uio->uio_td = td;
820 KASSERT(uiop->uio_segflg == UIO_USERSPACE ||
821 uiop->uio_segflg == UIO_SYSSPACE,
822 ("nfs_directio_write: Bad uio_segflg"));
823 if (uiop->uio_segflg == UIO_USERSPACE) {
824 error = copyin(uiop->uio_iov->iov_base,
825 t_iov->iov_base, size);
826 if (error != 0)
827 goto err_free;
828 } else
829 /*
830 * UIO_SYSSPACE may never happen, but handle
831 * it just in case it does.
832 */
833 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base,
834 size);
835 bp->b_flags |= B_DIRECT;
836 bp->b_iocmd = BIO_WRITE;
837 if (cred != NOCRED) {
838 crhold(cred);
839 bp->b_wcred = cred;
840 } else
841 bp->b_wcred = NOCRED;
842 bp->b_caller1 = (void *)t_uio;
843 bp->b_vp = vp;
844 error = ncl_asyncio(nmp, bp, NOCRED, td);
845 err_free:
846 if (error) {
847 free(t_iov->iov_base, M_NFSDIRECTIO);
848 free(t_iov, M_NFSDIRECTIO);
849 free(t_uio, M_NFSDIRECTIO);
850 bp->b_vp = NULL;
851 relpbuf(bp, &ncl_pbuf_freecnt);
852 if (error == EINTR)
853 return (error);
854 goto do_sync;
855 }
856 uiop->uio_offset += size;
857 uiop->uio_resid -= size;
858 if (uiop->uio_iov->iov_len <= size) {
859 uiop->uio_iovcnt--;
860 uiop->uio_iov++;
861 } else {
862 uiop->uio_iov->iov_base =
863 (char *)uiop->uio_iov->iov_base + size;
864 uiop->uio_iov->iov_len -= size;
865 }
866 }
867 }
868 return (0);
869 }
870
871 /*
872 * Vnode op for write using bio
873 */
874 int
875 ncl_write(struct vop_write_args *ap)
876 {
877 int biosize;
878 struct uio *uio = ap->a_uio;
879 struct thread *td = uio->uio_td;
880 struct vnode *vp = ap->a_vp;
881 struct nfsnode *np = VTONFS(vp);
882 struct ucred *cred = ap->a_cred;
883 int ioflag = ap->a_ioflag;
884 struct buf *bp;
885 struct vattr vattr;
886 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
887 daddr_t lbn;
888 int bcount;
889 int n, on, error = 0;
890 off_t tmp_off;
891
892 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
893 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
894 ("ncl_write proc"));
895 if (vp->v_type != VREG)
896 return (EIO);
897 mtx_lock(&np->n_mtx);
898 if (np->n_flag & NWRITEERR) {
899 np->n_flag &= ~NWRITEERR;
900 mtx_unlock(&np->n_mtx);
901 return (np->n_error);
902 } else
903 mtx_unlock(&np->n_mtx);
904 mtx_lock(&nmp->nm_mtx);
905 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
906 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
907 mtx_unlock(&nmp->nm_mtx);
908 (void)ncl_fsinfo(nmp, vp, cred, td);
909 mtx_lock(&nmp->nm_mtx);
910 }
911 if (nmp->nm_wsize == 0)
912 (void) newnfs_iosize(nmp);
913 mtx_unlock(&nmp->nm_mtx);
914
915 /*
916 * Synchronously flush pending buffers if we are in synchronous
917 * mode or if we are appending.
918 */
919 if (ioflag & (IO_APPEND | IO_SYNC)) {
920 mtx_lock(&np->n_mtx);
921 if (np->n_flag & NMODIFIED) {
922 mtx_unlock(&np->n_mtx);
923 #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
924 /*
925 * Require non-blocking, synchronous writes to
926 * dirty files to inform the program it needs
927 * to fsync(2) explicitly.
928 */
929 if (ioflag & IO_NDELAY)
930 return (EAGAIN);
931 #endif
932 flush_and_restart:
933 np->n_attrstamp = 0;
934 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
935 if (error)
936 return (error);
937 } else
938 mtx_unlock(&np->n_mtx);
939 }
940
941 /*
942 * If IO_APPEND then load uio_offset. We restart here if we cannot
943 * get the append lock.
944 */
945 if (ioflag & IO_APPEND) {
946 np->n_attrstamp = 0;
947 error = VOP_GETATTR(vp, &vattr, cred);
948 if (error)
949 return (error);
950 mtx_lock(&np->n_mtx);
951 uio->uio_offset = np->n_size;
952 mtx_unlock(&np->n_mtx);
953 }
954
955 if (uio->uio_offset < 0)
956 return (EINVAL);
957 tmp_off = uio->uio_offset + uio->uio_resid;
958 if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)
959 return (EFBIG);
960 if (uio->uio_resid == 0)
961 return (0);
962
963 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
964 return nfs_directio_write(vp, uio, cred, ioflag);
965
966 /*
967 * Maybe this should be above the vnode op call, but so long as
968 * file servers have no limits, i don't think it matters
969 */
970 if (vn_rlimit_fsize(vp, uio, td))
971 return (EFBIG);
972
973 biosize = vp->v_bufobj.bo_bsize;
974 /*
975 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
976 * would exceed the local maximum per-file write commit size when
977 * combined with those, we must decide whether to flush,
978 * go synchronous, or return error. We don't bother checking
979 * IO_UNIT -- we just make all writes atomic anyway, as there's
980 * no point optimizing for something that really won't ever happen.
981 */
982 if (!(ioflag & IO_SYNC)) {
983 int nflag;
984
985 mtx_lock(&np->n_mtx);
986 nflag = np->n_flag;
987 mtx_unlock(&np->n_mtx);
988 int needrestart = 0;
989 if (nmp->nm_wcommitsize < uio->uio_resid) {
990 /*
991 * If this request could not possibly be completed
992 * without exceeding the maximum outstanding write
993 * commit size, see if we can convert it into a
994 * synchronous write operation.
995 */
996 if (ioflag & IO_NDELAY)
997 return (EAGAIN);
998 ioflag |= IO_SYNC;
999 if (nflag & NMODIFIED)
1000 needrestart = 1;
1001 } else if (nflag & NMODIFIED) {
1002 int wouldcommit = 0;
1003 BO_LOCK(&vp->v_bufobj);
1004 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
1005 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
1006 b_bobufs) {
1007 if (bp->b_flags & B_NEEDCOMMIT)
1008 wouldcommit += bp->b_bcount;
1009 }
1010 }
1011 BO_UNLOCK(&vp->v_bufobj);
1012 /*
1013 * Since we're not operating synchronously and
1014 * bypassing the buffer cache, we are in a commit
1015 * and holding all of these buffers whether
1016 * transmitted or not. If not limited, this
1017 * will lead to the buffer cache deadlocking,
1018 * as no one else can flush our uncommitted buffers.
1019 */
1020 wouldcommit += uio->uio_resid;
1021 /*
1022 * If we would initially exceed the maximum
1023 * outstanding write commit size, flush and restart.
1024 */
1025 if (wouldcommit > nmp->nm_wcommitsize)
1026 needrestart = 1;
1027 }
1028 if (needrestart)
1029 goto flush_and_restart;
1030 }
1031
1032 do {
1033 NFSINCRGLOBAL(newnfsstats.biocache_writes);
1034 lbn = uio->uio_offset / biosize;
1035 on = uio->uio_offset & (biosize-1);
1036 n = min((unsigned)(biosize - on), uio->uio_resid);
1037 again:
1038 /*
1039 * Handle direct append and file extension cases, calculate
1040 * unaligned buffer size.
1041 */
1042 mtx_lock(&np->n_mtx);
1043 if (uio->uio_offset == np->n_size && n) {
1044 mtx_unlock(&np->n_mtx);
1045 /*
1046 * Get the buffer (in its pre-append state to maintain
1047 * B_CACHE if it was previously set). Resize the
1048 * nfsnode after we have locked the buffer to prevent
1049 * readers from reading garbage.
1050 */
1051 bcount = on;
1052 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1053
1054 if (bp != NULL) {
1055 long save;
1056
1057 mtx_lock(&np->n_mtx);
1058 np->n_size = uio->uio_offset + n;
1059 np->n_flag |= NMODIFIED;
1060 vnode_pager_setsize(vp, np->n_size);
1061 mtx_unlock(&np->n_mtx);
1062
1063 save = bp->b_flags & B_CACHE;
1064 bcount += n;
1065 allocbuf(bp, bcount);
1066 bp->b_flags |= save;
1067 }
1068 } else {
1069 /*
1070 * Obtain the locked cache block first, and then
1071 * adjust the file's size as appropriate.
1072 */
1073 bcount = on + n;
1074 if ((off_t)lbn * biosize + bcount < np->n_size) {
1075 if ((off_t)(lbn + 1) * biosize < np->n_size)
1076 bcount = biosize;
1077 else
1078 bcount = np->n_size - (off_t)lbn * biosize;
1079 }
1080 mtx_unlock(&np->n_mtx);
1081 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1082 mtx_lock(&np->n_mtx);
1083 if (uio->uio_offset + n > np->n_size) {
1084 np->n_size = uio->uio_offset + n;
1085 np->n_flag |= NMODIFIED;
1086 vnode_pager_setsize(vp, np->n_size);
1087 }
1088 mtx_unlock(&np->n_mtx);
1089 }
1090
1091 if (!bp) {
1092 error = newnfs_sigintr(nmp, td);
1093 if (!error)
1094 error = EINTR;
1095 break;
1096 }
1097
1098 /*
1099 * Issue a READ if B_CACHE is not set. In special-append
1100 * mode, B_CACHE is based on the buffer prior to the write
1101 * op and is typically set, avoiding the read. If a read
1102 * is required in special append mode, the server will
1103 * probably send us a short-read since we extended the file
1104 * on our end, resulting in b_resid == 0 and, thusly,
1105 * B_CACHE getting set.
1106 *
1107 * We can also avoid issuing the read if the write covers
1108 * the entire buffer. We have to make sure the buffer state
1109 * is reasonable in this case since we will not be initiating
1110 * I/O. See the comments in kern/vfs_bio.c's getblk() for
1111 * more information.
1112 *
1113 * B_CACHE may also be set due to the buffer being cached
1114 * normally.
1115 */
1116
1117 if (on == 0 && n == bcount) {
1118 bp->b_flags |= B_CACHE;
1119 bp->b_flags &= ~B_INVAL;
1120 bp->b_ioflags &= ~BIO_ERROR;
1121 }
1122
1123 if ((bp->b_flags & B_CACHE) == 0) {
1124 bp->b_iocmd = BIO_READ;
1125 vfs_busy_pages(bp, 0);
1126 error = ncl_doio(vp, bp, cred, td, 0);
1127 if (error) {
1128 brelse(bp);
1129 break;
1130 }
1131 }
1132 if (bp->b_wcred == NOCRED)
1133 bp->b_wcred = crhold(cred);
1134 mtx_lock(&np->n_mtx);
1135 np->n_flag |= NMODIFIED;
1136 mtx_unlock(&np->n_mtx);
1137
1138 /*
1139 * If dirtyend exceeds file size, chop it down. This should
1140 * not normally occur but there is an append race where it
1141 * might occur XXX, so we log it.
1142 *
1143 * If the chopping creates a reverse-indexed or degenerate
1144 * situation with dirtyoff/end, we 0 both of them.
1145 */
1146
1147 if (bp->b_dirtyend > bcount) {
1148 ncl_printf("NFS append race @%lx:%d\n",
1149 (long)bp->b_blkno * DEV_BSIZE,
1150 bp->b_dirtyend - bcount);
1151 bp->b_dirtyend = bcount;
1152 }
1153
1154 if (bp->b_dirtyoff >= bp->b_dirtyend)
1155 bp->b_dirtyoff = bp->b_dirtyend = 0;
1156
1157 /*
1158 * If the new write will leave a contiguous dirty
1159 * area, just update the b_dirtyoff and b_dirtyend,
1160 * otherwise force a write rpc of the old dirty area.
1161 *
1162 * While it is possible to merge discontiguous writes due to
1163 * our having a B_CACHE buffer ( and thus valid read data
1164 * for the hole), we don't because it could lead to
1165 * significant cache coherency problems with multiple clients,
1166 * especially if locking is implemented later on.
1167 *
1168 * As an optimization we could theoretically maintain
1169 * a linked list of discontinuous areas, but we would still
1170 * have to commit them separately so there isn't much
1171 * advantage to it except perhaps a bit of asynchronization.
1172 */
1173
1174 if (bp->b_dirtyend > 0 &&
1175 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1176 if (bwrite(bp) == EINTR) {
1177 error = EINTR;
1178 break;
1179 }
1180 goto again;
1181 }
1182
1183 error = uiomove((char *)bp->b_data + on, n, uio);
1184
1185 /*
1186 * Since this block is being modified, it must be written
1187 * again and not just committed. Since write clustering does
1188 * not work for the stage 1 data write, only the stage 2
1189 * commit rpc, we have to clear B_CLUSTEROK as well.
1190 */
1191 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1192
1193 if (error) {
1194 bp->b_ioflags |= BIO_ERROR;
1195 brelse(bp);
1196 break;
1197 }
1198
1199 /*
1200 * Only update dirtyoff/dirtyend if not a degenerate
1201 * condition.
1202 */
1203 if (n) {
1204 if (bp->b_dirtyend > 0) {
1205 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1206 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1207 } else {
1208 bp->b_dirtyoff = on;
1209 bp->b_dirtyend = on + n;
1210 }
1211 vfs_bio_set_valid(bp, on, n);
1212 }
1213
1214 /*
1215 * If IO_SYNC do bwrite().
1216 *
1217 * IO_INVAL appears to be unused. The idea appears to be
1218 * to turn off caching in this case. Very odd. XXX
1219 */
1220 if ((ioflag & IO_SYNC)) {
1221 if (ioflag & IO_INVAL)
1222 bp->b_flags |= B_NOCACHE;
1223 error = bwrite(bp);
1224 if (error)
1225 break;
1226 } else if ((n + on) == biosize) {
1227 bp->b_flags |= B_ASYNC;
1228 (void) ncl_writebp(bp, 0, NULL);
1229 } else {
1230 bdwrite(bp);
1231 }
1232 } while (uio->uio_resid > 0 && n > 0);
1233
1234 return (error);
1235 }
1236
1237 /*
1238 * Get an nfs cache block.
1239 *
1240 * Allocate a new one if the block isn't currently in the cache
1241 * and return the block marked busy. If the calling process is
1242 * interrupted by a signal for an interruptible mount point, return
1243 * NULL.
1244 *
1245 * The caller must carefully deal with the possible B_INVAL state of
1246 * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1247 * indirectly), so synchronous reads can be issued without worrying about
1248 * the B_INVAL state. We have to be a little more careful when dealing
1249 * with writes (see comments in nfs_write()) when extending a file past
1250 * its EOF.
1251 */
1252 static struct buf *
1253 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1254 {
1255 struct buf *bp;
1256 struct mount *mp;
1257 struct nfsmount *nmp;
1258
1259 mp = vp->v_mount;
1260 nmp = VFSTONFS(mp);
1261
1262 if (nmp->nm_flag & NFSMNT_INT) {
1263 sigset_t oldset;
1264
1265 newnfs_set_sigmask(td, &oldset);
1266 bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0);
1267 newnfs_restore_sigmask(td, &oldset);
1268 while (bp == NULL) {
1269 if (newnfs_sigintr(nmp, td))
1270 return (NULL);
1271 bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1272 }
1273 } else {
1274 bp = getblk(vp, bn, size, 0, 0, 0);
1275 }
1276
1277 if (vp->v_type == VREG)
1278 bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
1279 return (bp);
1280 }
1281
1282 /*
1283 * Flush and invalidate all dirty buffers. If another process is already
1284 * doing the flush, just wait for completion.
1285 */
1286 int
1287 ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1288 {
1289 struct nfsnode *np = VTONFS(vp);
1290 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1291 int error = 0, slpflag, slptimeo;
1292 int old_lock = 0;
1293
1294 ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1295
1296 if ((nmp->nm_flag & NFSMNT_INT) == 0)
1297 intrflg = 0;
1298 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1299 intrflg = 1;
1300 if (intrflg) {
1301 slpflag = NFS_PCATCH;
1302 slptimeo = 2 * hz;
1303 } else {
1304 slpflag = 0;
1305 slptimeo = 0;
1306 }
1307
1308 old_lock = ncl_upgrade_vnlock(vp);
1309 if (vp->v_iflag & VI_DOOMED) {
1310 /*
1311 * Since vgonel() uses the generic vinvalbuf() to flush
1312 * dirty buffers and it does not call this function, it
1313 * is safe to just return OK when VI_DOOMED is set.
1314 */
1315 ncl_downgrade_vnlock(vp, old_lock);
1316 return (0);
1317 }
1318
1319 /*
1320 * Now, flush as required.
1321 */
1322 if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1323 VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1324 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1325 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1326 /*
1327 * If the page clean was interrupted, fail the invalidation.
1328 * Not doing so, we run the risk of losing dirty pages in the
1329 * vinvalbuf() call below.
1330 */
1331 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1332 goto out;
1333 }
1334
1335 error = vinvalbuf(vp, flags, slpflag, 0);
1336 while (error) {
1337 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1338 goto out;
1339 error = vinvalbuf(vp, flags, 0, slptimeo);
1340 }
1341 mtx_lock(&np->n_mtx);
1342 if (np->n_directio_asyncwr == 0)
1343 np->n_flag &= ~NMODIFIED;
1344 mtx_unlock(&np->n_mtx);
1345 out:
1346 ncl_downgrade_vnlock(vp, old_lock);
1347 return error;
1348 }
1349
1350 /*
1351 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1352 * This is mainly to avoid queueing async I/O requests when the nfsiods
1353 * are all hung on a dead server.
1354 *
1355 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1356 * is eventually dequeued by the async daemon, ncl_doio() *will*.
1357 */
1358 int
1359 ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1360 {
1361 int iod;
1362 int gotiod;
1363 int slpflag = 0;
1364 int slptimeo = 0;
1365 int error, error2;
1366
1367 /*
1368 * Commits are usually short and sweet so lets save some cpu and
1369 * leave the async daemons for more important rpc's (such as reads
1370 * and writes).
1371 *
1372 * Readdirplus RPCs do vget()s to acquire the vnodes for entries
1373 * in the directory in order to update attributes. This can deadlock
1374 * with another thread that is waiting for async I/O to be done by
1375 * an nfsiod thread while holding a lock on one of these vnodes.
1376 * To avoid this deadlock, don't allow the async nfsiod threads to
1377 * perform Readdirplus RPCs.
1378 */
1379 mtx_lock(&ncl_iod_mutex);
1380 if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1381 (nmp->nm_bufqiods > ncl_numasync / 2)) ||
1382 (bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) {
1383 mtx_unlock(&ncl_iod_mutex);
1384 return(EIO);
1385 }
1386 again:
1387 if (nmp->nm_flag & NFSMNT_INT)
1388 slpflag = NFS_PCATCH;
1389 gotiod = FALSE;
1390
1391 /*
1392 * Find a free iod to process this request.
1393 */
1394 for (iod = 0; iod < ncl_numasync; iod++)
1395 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1396 gotiod = TRUE;
1397 break;
1398 }
1399
1400 /*
1401 * Try to create one if none are free.
1402 */
1403 if (!gotiod)
1404 ncl_nfsiodnew();
1405 else {
1406 /*
1407 * Found one, so wake it up and tell it which
1408 * mount to process.
1409 */
1410 NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1411 iod, nmp));
1412 ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1413 ncl_iodmount[iod] = nmp;
1414 nmp->nm_bufqiods++;
1415 wakeup(&ncl_iodwant[iod]);
1416 }
1417
1418 /*
1419 * If none are free, we may already have an iod working on this mount
1420 * point. If so, it will process our request.
1421 */
1422 if (!gotiod) {
1423 if (nmp->nm_bufqiods > 0) {
1424 NFS_DPF(ASYNCIO,
1425 ("ncl_asyncio: %d iods are already processing mount %p\n",
1426 nmp->nm_bufqiods, nmp));
1427 gotiod = TRUE;
1428 }
1429 }
1430
1431 /*
1432 * If we have an iod which can process the request, then queue
1433 * the buffer.
1434 */
1435 if (gotiod) {
1436 /*
1437 * Ensure that the queue never grows too large. We still want
1438 * to asynchronize so we block rather then return EIO.
1439 */
1440 while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1441 NFS_DPF(ASYNCIO,
1442 ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1443 nmp->nm_bufqwant = TRUE;
1444 error = newnfs_msleep(td, &nmp->nm_bufq,
1445 &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1446 slptimeo);
1447 if (error) {
1448 error2 = newnfs_sigintr(nmp, td);
1449 if (error2) {
1450 mtx_unlock(&ncl_iod_mutex);
1451 return (error2);
1452 }
1453 if (slpflag == NFS_PCATCH) {
1454 slpflag = 0;
1455 slptimeo = 2 * hz;
1456 }
1457 }
1458 /*
1459 * We might have lost our iod while sleeping,
1460 * so check and loop if nescessary.
1461 */
1462 goto again;
1463 }
1464
1465 /* We might have lost our nfsiod */
1466 if (nmp->nm_bufqiods == 0) {
1467 NFS_DPF(ASYNCIO,
1468 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1469 goto again;
1470 }
1471
1472 if (bp->b_iocmd == BIO_READ) {
1473 if (bp->b_rcred == NOCRED && cred != NOCRED)
1474 bp->b_rcred = crhold(cred);
1475 } else {
1476 if (bp->b_wcred == NOCRED && cred != NOCRED)
1477 bp->b_wcred = crhold(cred);
1478 }
1479
1480 if (bp->b_flags & B_REMFREE)
1481 bremfreef(bp);
1482 BUF_KERNPROC(bp);
1483 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1484 nmp->nm_bufqlen++;
1485 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1486 mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1487 VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1488 VTONFS(bp->b_vp)->n_directio_asyncwr++;
1489 mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1490 }
1491 mtx_unlock(&ncl_iod_mutex);
1492 return (0);
1493 }
1494
1495 mtx_unlock(&ncl_iod_mutex);
1496
1497 /*
1498 * All the iods are busy on other mounts, so return EIO to
1499 * force the caller to process the i/o synchronously.
1500 */
1501 NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1502 return (EIO);
1503 }
1504
1505 void
1506 ncl_doio_directwrite(struct buf *bp)
1507 {
1508 int iomode, must_commit;
1509 struct uio *uiop = (struct uio *)bp->b_caller1;
1510 char *iov_base = uiop->uio_iov->iov_base;
1511
1512 iomode = NFSWRITE_FILESYNC;
1513 uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1514 ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0);
1515 KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1516 free(iov_base, M_NFSDIRECTIO);
1517 free(uiop->uio_iov, M_NFSDIRECTIO);
1518 free(uiop, M_NFSDIRECTIO);
1519 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1520 struct nfsnode *np = VTONFS(bp->b_vp);
1521 mtx_lock(&np->n_mtx);
1522 np->n_directio_asyncwr--;
1523 if (np->n_directio_asyncwr == 0) {
1524 np->n_flag &= ~NMODIFIED;
1525 if ((np->n_flag & NFSYNCWAIT)) {
1526 np->n_flag &= ~NFSYNCWAIT;
1527 wakeup((caddr_t)&np->n_directio_asyncwr);
1528 }
1529 }
1530 mtx_unlock(&np->n_mtx);
1531 }
1532 bp->b_vp = NULL;
1533 relpbuf(bp, &ncl_pbuf_freecnt);
1534 }
1535
1536 /*
1537 * Do an I/O operation to/from a cache block. This may be called
1538 * synchronously or from an nfsiod.
1539 */
1540 int
1541 ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
1542 int called_from_strategy)
1543 {
1544 struct uio *uiop;
1545 struct nfsnode *np;
1546 struct nfsmount *nmp;
1547 int error = 0, iomode, must_commit = 0;
1548 struct uio uio;
1549 struct iovec io;
1550 struct proc *p = td ? td->td_proc : NULL;
1551 uint8_t iocmd;
1552
1553 np = VTONFS(vp);
1554 nmp = VFSTONFS(vp->v_mount);
1555 uiop = &uio;
1556 uiop->uio_iov = &io;
1557 uiop->uio_iovcnt = 1;
1558 uiop->uio_segflg = UIO_SYSSPACE;
1559 uiop->uio_td = td;
1560
1561 /*
1562 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1563 * do this here so we do not have to do it in all the code that
1564 * calls us.
1565 */
1566 bp->b_flags &= ~B_INVAL;
1567 bp->b_ioflags &= ~BIO_ERROR;
1568
1569 KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1570 iocmd = bp->b_iocmd;
1571 if (iocmd == BIO_READ) {
1572 io.iov_len = uiop->uio_resid = bp->b_bcount;
1573 io.iov_base = bp->b_data;
1574 uiop->uio_rw = UIO_READ;
1575
1576 switch (vp->v_type) {
1577 case VREG:
1578 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1579 NFSINCRGLOBAL(newnfsstats.read_bios);
1580 error = ncl_readrpc(vp, uiop, cr);
1581
1582 if (!error) {
1583 if (uiop->uio_resid) {
1584 /*
1585 * If we had a short read with no error, we must have
1586 * hit a file hole. We should zero-fill the remainder.
1587 * This can also occur if the server hits the file EOF.
1588 *
1589 * Holes used to be able to occur due to pending
1590 * writes, but that is not possible any longer.
1591 */
1592 int nread = bp->b_bcount - uiop->uio_resid;
1593 int left = uiop->uio_resid;
1594
1595 if (left > 0)
1596 bzero((char *)bp->b_data + nread, left);
1597 uiop->uio_resid = 0;
1598 }
1599 }
1600 /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1601 if (p && (vp->v_vflag & VV_TEXT)) {
1602 mtx_lock(&np->n_mtx);
1603 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1604 mtx_unlock(&np->n_mtx);
1605 PROC_LOCK(p);
1606 killproc(p, "text file modification");
1607 PROC_UNLOCK(p);
1608 } else
1609 mtx_unlock(&np->n_mtx);
1610 }
1611 break;
1612 case VLNK:
1613 uiop->uio_offset = (off_t)0;
1614 NFSINCRGLOBAL(newnfsstats.readlink_bios);
1615 error = ncl_readlinkrpc(vp, uiop, cr);
1616 break;
1617 case VDIR:
1618 NFSINCRGLOBAL(newnfsstats.readdir_bios);
1619 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1620 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1621 error = ncl_readdirplusrpc(vp, uiop, cr, td);
1622 if (error == NFSERR_NOTSUPP)
1623 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1624 }
1625 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1626 error = ncl_readdirrpc(vp, uiop, cr, td);
1627 /*
1628 * end-of-directory sets B_INVAL but does not generate an
1629 * error.
1630 */
1631 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1632 bp->b_flags |= B_INVAL;
1633 break;
1634 default:
1635 ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type);
1636 break;
1637 };
1638 if (error) {
1639 bp->b_ioflags |= BIO_ERROR;
1640 bp->b_error = error;
1641 }
1642 } else {
1643 /*
1644 * If we only need to commit, try to commit
1645 */
1646 if (bp->b_flags & B_NEEDCOMMIT) {
1647 int retv;
1648 off_t off;
1649
1650 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1651 retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1652 bp->b_wcred, td);
1653 if (retv == 0) {
1654 bp->b_dirtyoff = bp->b_dirtyend = 0;
1655 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1656 bp->b_resid = 0;
1657 bufdone(bp);
1658 return (0);
1659 }
1660 if (retv == NFSERR_STALEWRITEVERF) {
1661 ncl_clearcommit(vp->v_mount);
1662 }
1663 }
1664
1665 /*
1666 * Setup for actual write
1667 */
1668 mtx_lock(&np->n_mtx);
1669 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1670 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1671 mtx_unlock(&np->n_mtx);
1672
1673 if (bp->b_dirtyend > bp->b_dirtyoff) {
1674 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1675 - bp->b_dirtyoff;
1676 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1677 + bp->b_dirtyoff;
1678 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1679 uiop->uio_rw = UIO_WRITE;
1680 NFSINCRGLOBAL(newnfsstats.write_bios);
1681
1682 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1683 iomode = NFSWRITE_UNSTABLE;
1684 else
1685 iomode = NFSWRITE_FILESYNC;
1686
1687 error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
1688 called_from_strategy);
1689
1690 /*
1691 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1692 * to cluster the buffers needing commit. This will allow
1693 * the system to submit a single commit rpc for the whole
1694 * cluster. We can do this even if the buffer is not 100%
1695 * dirty (relative to the NFS blocksize), so we optimize the
1696 * append-to-file-case.
1697 *
1698 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1699 * cleared because write clustering only works for commit
1700 * rpc's, not for the data portion of the write).
1701 */
1702
1703 if (!error && iomode == NFSWRITE_UNSTABLE) {
1704 bp->b_flags |= B_NEEDCOMMIT;
1705 if (bp->b_dirtyoff == 0
1706 && bp->b_dirtyend == bp->b_bcount)
1707 bp->b_flags |= B_CLUSTEROK;
1708 } else {
1709 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1710 }
1711
1712 /*
1713 * For an interrupted write, the buffer is still valid
1714 * and the write hasn't been pushed to the server yet,
1715 * so we can't set BIO_ERROR and report the interruption
1716 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1717 * is not relevant, so the rpc attempt is essentially
1718 * a noop. For the case of a V3 write rpc not being
1719 * committed to stable storage, the block is still
1720 * dirty and requires either a commit rpc or another
1721 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1722 * the block is reused. This is indicated by setting
1723 * the B_DELWRI and B_NEEDCOMMIT flags.
1724 *
1725 * EIO is returned by ncl_writerpc() to indicate a recoverable
1726 * write error and is handled as above, except that
1727 * B_EINTR isn't set. One cause of this is a stale stateid
1728 * error for the RPC that indicates recovery is required,
1729 * when called with called_from_strategy != 0.
1730 *
1731 * If the buffer is marked B_PAGING, it does not reside on
1732 * the vp's paging queues so we cannot call bdirty(). The
1733 * bp in this case is not an NFS cache block so we should
1734 * be safe. XXX
1735 *
1736 * The logic below breaks up errors into recoverable and
1737 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1738 * and keep the buffer around for potential write retries.
1739 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1740 * and save the error in the nfsnode. This is less than ideal
1741 * but necessary. Keeping such buffers around could potentially
1742 * cause buffer exhaustion eventually (they can never be written
1743 * out, so will get constantly be re-dirtied). It also causes
1744 * all sorts of vfs panics. For non-recoverable write errors,
1745 * also invalidate the attrcache, so we'll be forced to go over
1746 * the wire for this object, returning an error to user on next
1747 * call (most of the time).
1748 */
1749 if (error == EINTR || error == EIO || error == ETIMEDOUT
1750 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1751 int s;
1752
1753 s = splbio();
1754 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1755 if ((bp->b_flags & B_PAGING) == 0) {
1756 bdirty(bp);
1757 bp->b_flags &= ~B_DONE;
1758 }
1759 if ((error == EINTR || error == ETIMEDOUT) &&
1760 (bp->b_flags & B_ASYNC) == 0)
1761 bp->b_flags |= B_EINTR;
1762 splx(s);
1763 } else {
1764 if (error) {
1765 bp->b_ioflags |= BIO_ERROR;
1766 bp->b_flags |= B_INVAL;
1767 bp->b_error = np->n_error = error;
1768 mtx_lock(&np->n_mtx);
1769 np->n_flag |= NWRITEERR;
1770 np->n_attrstamp = 0;
1771 mtx_unlock(&np->n_mtx);
1772 }
1773 bp->b_dirtyoff = bp->b_dirtyend = 0;
1774 }
1775 } else {
1776 bp->b_resid = 0;
1777 bufdone(bp);
1778 return (0);
1779 }
1780 }
1781 bp->b_resid = uiop->uio_resid;
1782 if (must_commit)
1783 ncl_clearcommit(vp->v_mount);
1784 bufdone(bp);
1785 return (error);
1786 }
1787
1788 /*
1789 * Used to aid in handling ftruncate() operations on the NFS client side.
1790 * Truncation creates a number of special problems for NFS. We have to
1791 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1792 * we have to properly handle VM pages or (potentially dirty) buffers
1793 * that straddle the truncation point.
1794 */
1795
1796 int
1797 ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1798 {
1799 struct nfsnode *np = VTONFS(vp);
1800 u_quad_t tsize;
1801 int biosize = vp->v_bufobj.bo_bsize;
1802 int error = 0;
1803
1804 mtx_lock(&np->n_mtx);
1805 tsize = np->n_size;
1806 np->n_size = nsize;
1807 mtx_unlock(&np->n_mtx);
1808
1809 if (nsize < tsize) {
1810 struct buf *bp;
1811 daddr_t lbn;
1812 int bufsize;
1813
1814 /*
1815 * vtruncbuf() doesn't get the buffer overlapping the
1816 * truncation point. We may have a B_DELWRI and/or B_CACHE
1817 * buffer that now needs to be truncated.
1818 */
1819 error = vtruncbuf(vp, cred, td, nsize, biosize);
1820 lbn = nsize / biosize;
1821 bufsize = nsize & (biosize - 1);
1822 bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1823 if (!bp)
1824 return EINTR;
1825 if (bp->b_dirtyoff > bp->b_bcount)
1826 bp->b_dirtyoff = bp->b_bcount;
1827 if (bp->b_dirtyend > bp->b_bcount)
1828 bp->b_dirtyend = bp->b_bcount;
1829 bp->b_flags |= B_RELBUF; /* don't leave garbage around */
1830 brelse(bp);
1831 } else {
1832 vnode_pager_setsize(vp, nsize);
1833 }
1834 return(error);
1835 }
1836
Cache object: 23f1ce4d7e5663b24b9cd4e2a3f5930b
|