1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/8.2/sys/fs/nfsclient/nfs_clbio.c 212836 2010-09-19 02:07:30Z rmacklem $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/kernel.h>
43 #include <sys/mount.h>
44 #include <sys/proc.h>
45 #include <sys/resourcevar.h>
46 #include <sys/signalvar.h>
47 #include <sys/vmmeter.h>
48 #include <sys/vnode.h>
49
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vnode_pager.h>
56
57 #include <fs/nfs/nfsport.h>
58 #include <fs/nfsclient/nfsmount.h>
59 #include <fs/nfsclient/nfs.h>
60 #include <fs/nfsclient/nfsnode.h>
61
62 extern int newnfs_directio_allow_mmap;
63 extern struct nfsstats newnfsstats;
64 extern struct mtx ncl_iod_mutex;
65 extern int ncl_numasync;
66 extern enum nfsiod_state ncl_iodwant[NFS_MAXRAHEAD];
67 extern struct nfsmount *ncl_iodmount[NFS_MAXRAHEAD];
68 extern int newnfs_directio_enable;
69
70 int ncl_pbuf_freecnt = -1; /* start out unlimited */
71
72 static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
73 struct thread *td);
74 static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
75 struct ucred *cred, int ioflag);
76
77 /*
78 * Vnode op for VM getpages.
79 */
80 int
81 ncl_getpages(struct vop_getpages_args *ap)
82 {
83 int i, error, nextoff, size, toff, count, npages;
84 struct uio uio;
85 struct iovec iov;
86 vm_offset_t kva;
87 struct buf *bp;
88 struct vnode *vp;
89 struct thread *td;
90 struct ucred *cred;
91 struct nfsmount *nmp;
92 vm_object_t object;
93 vm_page_t *pages;
94 struct nfsnode *np;
95
96 vp = ap->a_vp;
97 np = VTONFS(vp);
98 td = curthread; /* XXX */
99 cred = curthread->td_ucred; /* XXX */
100 nmp = VFSTONFS(vp->v_mount);
101 pages = ap->a_m;
102 count = ap->a_count;
103
104 if ((object = vp->v_object) == NULL) {
105 ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
106 return (VM_PAGER_ERROR);
107 }
108
109 if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
110 mtx_lock(&np->n_mtx);
111 if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
112 mtx_unlock(&np->n_mtx);
113 ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
114 return (VM_PAGER_ERROR);
115 } else
116 mtx_unlock(&np->n_mtx);
117 }
118
119 mtx_lock(&nmp->nm_mtx);
120 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
121 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
122 mtx_unlock(&nmp->nm_mtx);
123 /* We'll never get here for v4, because we always have fsinfo */
124 (void)ncl_fsinfo(nmp, vp, cred, td);
125 } else
126 mtx_unlock(&nmp->nm_mtx);
127
128 npages = btoc(count);
129
130 /*
131 * If the requested page is partially valid, just return it and
132 * allow the pager to zero-out the blanks. Partially valid pages
133 * can only occur at the file EOF.
134 */
135 VM_OBJECT_LOCK(object);
136 if (pages[ap->a_reqpage]->valid != 0) {
137 vm_page_lock_queues();
138 for (i = 0; i < npages; ++i) {
139 if (i != ap->a_reqpage)
140 vm_page_free(pages[i]);
141 }
142 vm_page_unlock_queues();
143 VM_OBJECT_UNLOCK(object);
144 return (0);
145 }
146 VM_OBJECT_UNLOCK(object);
147
148 /*
149 * We use only the kva address for the buffer, but this is extremely
150 * convienient and fast.
151 */
152 bp = getpbuf(&ncl_pbuf_freecnt);
153
154 kva = (vm_offset_t) bp->b_data;
155 pmap_qenter(kva, pages, npages);
156 PCPU_INC(cnt.v_vnodein);
157 PCPU_ADD(cnt.v_vnodepgsin, npages);
158
159 iov.iov_base = (caddr_t) kva;
160 iov.iov_len = count;
161 uio.uio_iov = &iov;
162 uio.uio_iovcnt = 1;
163 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
164 uio.uio_resid = count;
165 uio.uio_segflg = UIO_SYSSPACE;
166 uio.uio_rw = UIO_READ;
167 uio.uio_td = td;
168
169 error = ncl_readrpc(vp, &uio, cred);
170 pmap_qremove(kva, npages);
171
172 relpbuf(bp, &ncl_pbuf_freecnt);
173
174 if (error && (uio.uio_resid == count)) {
175 ncl_printf("nfs_getpages: error %d\n", error);
176 VM_OBJECT_LOCK(object);
177 vm_page_lock_queues();
178 for (i = 0; i < npages; ++i) {
179 if (i != ap->a_reqpage)
180 vm_page_free(pages[i]);
181 }
182 vm_page_unlock_queues();
183 VM_OBJECT_UNLOCK(object);
184 return (VM_PAGER_ERROR);
185 }
186
187 /*
188 * Calculate the number of bytes read and validate only that number
189 * of bytes. Note that due to pending writes, size may be 0. This
190 * does not mean that the remaining data is invalid!
191 */
192
193 size = count - uio.uio_resid;
194 VM_OBJECT_LOCK(object);
195 vm_page_lock_queues();
196 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
197 vm_page_t m;
198 nextoff = toff + PAGE_SIZE;
199 m = pages[i];
200
201 if (nextoff <= size) {
202 /*
203 * Read operation filled an entire page
204 */
205 m->valid = VM_PAGE_BITS_ALL;
206 KASSERT(m->dirty == 0,
207 ("nfs_getpages: page %p is dirty", m));
208 } else if (size > toff) {
209 /*
210 * Read operation filled a partial page.
211 */
212 m->valid = 0;
213 vm_page_set_valid(m, 0, size - toff);
214 KASSERT(m->dirty == 0,
215 ("nfs_getpages: page %p is dirty", m));
216 } else {
217 /*
218 * Read operation was short. If no error occured
219 * we may have hit a zero-fill section. We simply
220 * leave valid set to 0.
221 */
222 ;
223 }
224 if (i != ap->a_reqpage) {
225 /*
226 * Whether or not to leave the page activated is up in
227 * the air, but we should put the page on a page queue
228 * somewhere (it already is in the object). Result:
229 * It appears that emperical results show that
230 * deactivating pages is best.
231 */
232
233 /*
234 * Just in case someone was asking for this page we
235 * now tell them that it is ok to use.
236 */
237 if (!error) {
238 if (m->oflags & VPO_WANTED)
239 vm_page_activate(m);
240 else
241 vm_page_deactivate(m);
242 vm_page_wakeup(m);
243 } else {
244 vm_page_free(m);
245 }
246 }
247 }
248 vm_page_unlock_queues();
249 VM_OBJECT_UNLOCK(object);
250 return (0);
251 }
252
253 /*
254 * Vnode op for VM putpages.
255 */
256 int
257 ncl_putpages(struct vop_putpages_args *ap)
258 {
259 struct uio uio;
260 struct iovec iov;
261 vm_offset_t kva;
262 struct buf *bp;
263 int iomode, must_commit, i, error, npages, count;
264 off_t offset;
265 int *rtvals;
266 struct vnode *vp;
267 struct thread *td;
268 struct ucred *cred;
269 struct nfsmount *nmp;
270 struct nfsnode *np;
271 vm_page_t *pages;
272
273 vp = ap->a_vp;
274 np = VTONFS(vp);
275 td = curthread; /* XXX */
276 cred = curthread->td_ucred; /* XXX */
277 nmp = VFSTONFS(vp->v_mount);
278 pages = ap->a_m;
279 count = ap->a_count;
280 rtvals = ap->a_rtvals;
281 npages = btoc(count);
282 offset = IDX_TO_OFF(pages[0]->pindex);
283
284 mtx_lock(&nmp->nm_mtx);
285 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
286 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
287 mtx_unlock(&nmp->nm_mtx);
288 (void)ncl_fsinfo(nmp, vp, cred, td);
289 } else
290 mtx_unlock(&nmp->nm_mtx);
291
292 mtx_lock(&np->n_mtx);
293 if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
294 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
295 mtx_unlock(&np->n_mtx);
296 ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
297 mtx_lock(&np->n_mtx);
298 }
299
300 for (i = 0; i < npages; i++)
301 rtvals[i] = VM_PAGER_AGAIN;
302
303 /*
304 * When putting pages, do not extend file past EOF.
305 */
306 if (offset + count > np->n_size) {
307 count = np->n_size - offset;
308 if (count < 0)
309 count = 0;
310 }
311 mtx_unlock(&np->n_mtx);
312
313 /*
314 * We use only the kva address for the buffer, but this is extremely
315 * convienient and fast.
316 */
317 bp = getpbuf(&ncl_pbuf_freecnt);
318
319 kva = (vm_offset_t) bp->b_data;
320 pmap_qenter(kva, pages, npages);
321 PCPU_INC(cnt.v_vnodeout);
322 PCPU_ADD(cnt.v_vnodepgsout, count);
323
324 iov.iov_base = (caddr_t) kva;
325 iov.iov_len = count;
326 uio.uio_iov = &iov;
327 uio.uio_iovcnt = 1;
328 uio.uio_offset = offset;
329 uio.uio_resid = count;
330 uio.uio_segflg = UIO_SYSSPACE;
331 uio.uio_rw = UIO_WRITE;
332 uio.uio_td = td;
333
334 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
335 iomode = NFSWRITE_UNSTABLE;
336 else
337 iomode = NFSWRITE_FILESYNC;
338
339 error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0);
340
341 pmap_qremove(kva, npages);
342 relpbuf(bp, &ncl_pbuf_freecnt);
343
344 if (!error) {
345 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
346 for (i = 0; i < nwritten; i++) {
347 rtvals[i] = VM_PAGER_OK;
348 vm_page_undirty(pages[i]);
349 }
350 if (must_commit) {
351 ncl_clearcommit(vp->v_mount);
352 }
353 }
354 return rtvals[0];
355 }
356
357 /*
358 * For nfs, cache consistency can only be maintained approximately.
359 * Although RFC1094 does not specify the criteria, the following is
360 * believed to be compatible with the reference port.
361 * For nfs:
362 * If the file's modify time on the server has changed since the
363 * last read rpc or you have written to the file,
364 * you may have lost data cache consistency with the
365 * server, so flush all of the file's data out of the cache.
366 * Then force a getattr rpc to ensure that you have up to date
367 * attributes.
368 * NB: This implies that cache data can be read when up to
369 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
370 * attributes this could be forced by setting n_attrstamp to 0 before
371 * the VOP_GETATTR() call.
372 */
373 static inline int
374 nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
375 {
376 int error = 0;
377 struct vattr vattr;
378 struct nfsnode *np = VTONFS(vp);
379 int old_lock;
380
381 /*
382 * Grab the exclusive lock before checking whether the cache is
383 * consistent.
384 * XXX - We can make this cheaper later (by acquiring cheaper locks).
385 * But for now, this suffices.
386 */
387 old_lock = ncl_upgrade_vnlock(vp);
388 if (vp->v_iflag & VI_DOOMED) {
389 ncl_downgrade_vnlock(vp, old_lock);
390 return (EBADF);
391 }
392
393 mtx_lock(&np->n_mtx);
394 if (np->n_flag & NMODIFIED) {
395 mtx_unlock(&np->n_mtx);
396 if (vp->v_type != VREG) {
397 if (vp->v_type != VDIR)
398 panic("nfs: bioread, not dir");
399 ncl_invaldir(vp);
400 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
401 if (error)
402 goto out;
403 }
404 np->n_attrstamp = 0;
405 error = VOP_GETATTR(vp, &vattr, cred);
406 if (error)
407 goto out;
408 mtx_lock(&np->n_mtx);
409 np->n_mtime = vattr.va_mtime;
410 mtx_unlock(&np->n_mtx);
411 } else {
412 mtx_unlock(&np->n_mtx);
413 error = VOP_GETATTR(vp, &vattr, cred);
414 if (error)
415 return (error);
416 mtx_lock(&np->n_mtx);
417 if ((np->n_flag & NSIZECHANGED)
418 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
419 mtx_unlock(&np->n_mtx);
420 if (vp->v_type == VDIR)
421 ncl_invaldir(vp);
422 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
423 if (error)
424 goto out;
425 mtx_lock(&np->n_mtx);
426 np->n_mtime = vattr.va_mtime;
427 np->n_flag &= ~NSIZECHANGED;
428 }
429 mtx_unlock(&np->n_mtx);
430 }
431 out:
432 ncl_downgrade_vnlock(vp, old_lock);
433 return error;
434 }
435
436 /*
437 * Vnode op for read using bio
438 */
439 int
440 ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
441 {
442 struct nfsnode *np = VTONFS(vp);
443 int biosize, i;
444 struct buf *bp, *rabp;
445 struct thread *td;
446 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
447 daddr_t lbn, rabn;
448 int bcount;
449 int seqcount;
450 int nra, error = 0, n = 0, on = 0;
451
452 KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode"));
453 if (uio->uio_resid == 0)
454 return (0);
455 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
456 return (EINVAL);
457 td = uio->uio_td;
458
459 mtx_lock(&nmp->nm_mtx);
460 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
461 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
462 mtx_unlock(&nmp->nm_mtx);
463 (void)ncl_fsinfo(nmp, vp, cred, td);
464 mtx_lock(&nmp->nm_mtx);
465 }
466 if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
467 (void) newnfs_iosize(nmp);
468 mtx_unlock(&nmp->nm_mtx);
469
470 if (vp->v_type != VDIR &&
471 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
472 return (EFBIG);
473
474 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
475 /* No caching/ no readaheads. Just read data into the user buffer */
476 return ncl_readrpc(vp, uio, cred);
477
478 biosize = vp->v_mount->mnt_stat.f_iosize;
479 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
480
481 error = nfs_bioread_check_cons(vp, td, cred);
482 if (error)
483 return error;
484
485 do {
486 u_quad_t nsize;
487
488 mtx_lock(&np->n_mtx);
489 nsize = np->n_size;
490 mtx_unlock(&np->n_mtx);
491
492 switch (vp->v_type) {
493 case VREG:
494 NFSINCRGLOBAL(newnfsstats.biocache_reads);
495 lbn = uio->uio_offset / biosize;
496 on = uio->uio_offset & (biosize - 1);
497
498 /*
499 * Start the read ahead(s), as required.
500 */
501 if (nmp->nm_readahead > 0) {
502 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
503 (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
504 rabn = lbn + 1 + nra;
505 if (incore(&vp->v_bufobj, rabn) == NULL) {
506 rabp = nfs_getcacheblk(vp, rabn, biosize, td);
507 if (!rabp) {
508 error = newnfs_sigintr(nmp, td);
509 return (error ? error : EINTR);
510 }
511 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
512 rabp->b_flags |= B_ASYNC;
513 rabp->b_iocmd = BIO_READ;
514 vfs_busy_pages(rabp, 0);
515 if (ncl_asyncio(nmp, rabp, cred, td)) {
516 rabp->b_flags |= B_INVAL;
517 rabp->b_ioflags |= BIO_ERROR;
518 vfs_unbusy_pages(rabp);
519 brelse(rabp);
520 break;
521 }
522 } else {
523 brelse(rabp);
524 }
525 }
526 }
527 }
528
529 /* Note that bcount is *not* DEV_BSIZE aligned. */
530 bcount = biosize;
531 if ((off_t)lbn * biosize >= nsize) {
532 bcount = 0;
533 } else if ((off_t)(lbn + 1) * biosize > nsize) {
534 bcount = nsize - (off_t)lbn * biosize;
535 }
536 bp = nfs_getcacheblk(vp, lbn, bcount, td);
537
538 if (!bp) {
539 error = newnfs_sigintr(nmp, td);
540 return (error ? error : EINTR);
541 }
542
543 /*
544 * If B_CACHE is not set, we must issue the read. If this
545 * fails, we return an error.
546 */
547
548 if ((bp->b_flags & B_CACHE) == 0) {
549 bp->b_iocmd = BIO_READ;
550 vfs_busy_pages(bp, 0);
551 error = ncl_doio(vp, bp, cred, td, 0);
552 if (error) {
553 brelse(bp);
554 return (error);
555 }
556 }
557
558 /*
559 * on is the offset into the current bp. Figure out how many
560 * bytes we can copy out of the bp. Note that bcount is
561 * NOT DEV_BSIZE aligned.
562 *
563 * Then figure out how many bytes we can copy into the uio.
564 */
565
566 n = 0;
567 if (on < bcount)
568 n = min((unsigned)(bcount - on), uio->uio_resid);
569 break;
570 case VLNK:
571 NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
572 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
573 if (!bp) {
574 error = newnfs_sigintr(nmp, td);
575 return (error ? error : EINTR);
576 }
577 if ((bp->b_flags & B_CACHE) == 0) {
578 bp->b_iocmd = BIO_READ;
579 vfs_busy_pages(bp, 0);
580 error = ncl_doio(vp, bp, cred, td, 0);
581 if (error) {
582 bp->b_ioflags |= BIO_ERROR;
583 brelse(bp);
584 return (error);
585 }
586 }
587 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
588 on = 0;
589 break;
590 case VDIR:
591 NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
592 if (np->n_direofoffset
593 && uio->uio_offset >= np->n_direofoffset) {
594 return (0);
595 }
596 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
597 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
598 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
599 if (!bp) {
600 error = newnfs_sigintr(nmp, td);
601 return (error ? error : EINTR);
602 }
603 if ((bp->b_flags & B_CACHE) == 0) {
604 bp->b_iocmd = BIO_READ;
605 vfs_busy_pages(bp, 0);
606 error = ncl_doio(vp, bp, cred, td, 0);
607 if (error) {
608 brelse(bp);
609 }
610 while (error == NFSERR_BAD_COOKIE) {
611 ncl_invaldir(vp);
612 error = ncl_vinvalbuf(vp, 0, td, 1);
613 /*
614 * Yuck! The directory has been modified on the
615 * server. The only way to get the block is by
616 * reading from the beginning to get all the
617 * offset cookies.
618 *
619 * Leave the last bp intact unless there is an error.
620 * Loop back up to the while if the error is another
621 * NFSERR_BAD_COOKIE (double yuch!).
622 */
623 for (i = 0; i <= lbn && !error; i++) {
624 if (np->n_direofoffset
625 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
626 return (0);
627 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
628 if (!bp) {
629 error = newnfs_sigintr(nmp, td);
630 return (error ? error : EINTR);
631 }
632 if ((bp->b_flags & B_CACHE) == 0) {
633 bp->b_iocmd = BIO_READ;
634 vfs_busy_pages(bp, 0);
635 error = ncl_doio(vp, bp, cred, td, 0);
636 /*
637 * no error + B_INVAL == directory EOF,
638 * use the block.
639 */
640 if (error == 0 && (bp->b_flags & B_INVAL))
641 break;
642 }
643 /*
644 * An error will throw away the block and the
645 * for loop will break out. If no error and this
646 * is not the block we want, we throw away the
647 * block and go for the next one via the for loop.
648 */
649 if (error || i < lbn)
650 brelse(bp);
651 }
652 }
653 /*
654 * The above while is repeated if we hit another cookie
655 * error. If we hit an error and it wasn't a cookie error,
656 * we give up.
657 */
658 if (error)
659 return (error);
660 }
661
662 /*
663 * If not eof and read aheads are enabled, start one.
664 * (You need the current block first, so that you have the
665 * directory offset cookie of the next block.)
666 */
667 if (nmp->nm_readahead > 0 &&
668 (bp->b_flags & B_INVAL) == 0 &&
669 (np->n_direofoffset == 0 ||
670 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
671 incore(&vp->v_bufobj, lbn + 1) == NULL) {
672 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
673 if (rabp) {
674 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
675 rabp->b_flags |= B_ASYNC;
676 rabp->b_iocmd = BIO_READ;
677 vfs_busy_pages(rabp, 0);
678 if (ncl_asyncio(nmp, rabp, cred, td)) {
679 rabp->b_flags |= B_INVAL;
680 rabp->b_ioflags |= BIO_ERROR;
681 vfs_unbusy_pages(rabp);
682 brelse(rabp);
683 }
684 } else {
685 brelse(rabp);
686 }
687 }
688 }
689 /*
690 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
691 * chopped for the EOF condition, we cannot tell how large
692 * NFS directories are going to be until we hit EOF. So
693 * an NFS directory buffer is *not* chopped to its EOF. Now,
694 * it just so happens that b_resid will effectively chop it
695 * to EOF. *BUT* this information is lost if the buffer goes
696 * away and is reconstituted into a B_CACHE state ( due to
697 * being VMIO ) later. So we keep track of the directory eof
698 * in np->n_direofoffset and chop it off as an extra step
699 * right here.
700 */
701 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
702 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
703 n = np->n_direofoffset - uio->uio_offset;
704 break;
705 default:
706 ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
707 bp = NULL;
708 break;
709 };
710
711 if (n > 0) {
712 error = uiomove(bp->b_data + on, (int)n, uio);
713 }
714 if (vp->v_type == VLNK)
715 n = 0;
716 if (bp != NULL)
717 brelse(bp);
718 } while (error == 0 && uio->uio_resid > 0 && n > 0);
719 return (error);
720 }
721
722 /*
723 * The NFS write path cannot handle iovecs with len > 1. So we need to
724 * break up iovecs accordingly (restricting them to wsize).
725 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
726 * For the ASYNC case, 2 copies are needed. The first a copy from the
727 * user buffer to a staging buffer and then a second copy from the staging
728 * buffer to mbufs. This can be optimized by copying from the user buffer
729 * directly into mbufs and passing the chain down, but that requires a
730 * fair amount of re-working of the relevant codepaths (and can be done
731 * later).
732 */
733 static int
734 nfs_directio_write(vp, uiop, cred, ioflag)
735 struct vnode *vp;
736 struct uio *uiop;
737 struct ucred *cred;
738 int ioflag;
739 {
740 int error;
741 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
742 struct thread *td = uiop->uio_td;
743 int size;
744 int wsize;
745
746 mtx_lock(&nmp->nm_mtx);
747 wsize = nmp->nm_wsize;
748 mtx_unlock(&nmp->nm_mtx);
749 if (ioflag & IO_SYNC) {
750 int iomode, must_commit;
751 struct uio uio;
752 struct iovec iov;
753 do_sync:
754 while (uiop->uio_resid > 0) {
755 size = min(uiop->uio_resid, wsize);
756 size = min(uiop->uio_iov->iov_len, size);
757 iov.iov_base = uiop->uio_iov->iov_base;
758 iov.iov_len = size;
759 uio.uio_iov = &iov;
760 uio.uio_iovcnt = 1;
761 uio.uio_offset = uiop->uio_offset;
762 uio.uio_resid = size;
763 uio.uio_segflg = UIO_USERSPACE;
764 uio.uio_rw = UIO_WRITE;
765 uio.uio_td = td;
766 iomode = NFSWRITE_FILESYNC;
767 error = ncl_writerpc(vp, &uio, cred, &iomode,
768 &must_commit, 0);
769 KASSERT((must_commit == 0),
770 ("ncl_directio_write: Did not commit write"));
771 if (error)
772 return (error);
773 uiop->uio_offset += size;
774 uiop->uio_resid -= size;
775 if (uiop->uio_iov->iov_len <= size) {
776 uiop->uio_iovcnt--;
777 uiop->uio_iov++;
778 } else {
779 uiop->uio_iov->iov_base =
780 (char *)uiop->uio_iov->iov_base + size;
781 uiop->uio_iov->iov_len -= size;
782 }
783 }
784 } else {
785 struct uio *t_uio;
786 struct iovec *t_iov;
787 struct buf *bp;
788
789 /*
790 * Break up the write into blocksize chunks and hand these
791 * over to nfsiod's for write back.
792 * Unfortunately, this incurs a copy of the data. Since
793 * the user could modify the buffer before the write is
794 * initiated.
795 *
796 * The obvious optimization here is that one of the 2 copies
797 * in the async write path can be eliminated by copying the
798 * data here directly into mbufs and passing the mbuf chain
799 * down. But that will require a fair amount of re-working
800 * of the code and can be done if there's enough interest
801 * in NFS directio access.
802 */
803 while (uiop->uio_resid > 0) {
804 size = min(uiop->uio_resid, wsize);
805 size = min(uiop->uio_iov->iov_len, size);
806 bp = getpbuf(&ncl_pbuf_freecnt);
807 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
808 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
809 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
810 t_iov->iov_len = size;
811 t_uio->uio_iov = t_iov;
812 t_uio->uio_iovcnt = 1;
813 t_uio->uio_offset = uiop->uio_offset;
814 t_uio->uio_resid = size;
815 t_uio->uio_segflg = UIO_SYSSPACE;
816 t_uio->uio_rw = UIO_WRITE;
817 t_uio->uio_td = td;
818 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
819 bp->b_flags |= B_DIRECT;
820 bp->b_iocmd = BIO_WRITE;
821 if (cred != NOCRED) {
822 crhold(cred);
823 bp->b_wcred = cred;
824 } else
825 bp->b_wcred = NOCRED;
826 bp->b_caller1 = (void *)t_uio;
827 bp->b_vp = vp;
828 error = ncl_asyncio(nmp, bp, NOCRED, td);
829 if (error) {
830 free(t_iov->iov_base, M_NFSDIRECTIO);
831 free(t_iov, M_NFSDIRECTIO);
832 free(t_uio, M_NFSDIRECTIO);
833 bp->b_vp = NULL;
834 relpbuf(bp, &ncl_pbuf_freecnt);
835 if (error == EINTR)
836 return (error);
837 goto do_sync;
838 }
839 uiop->uio_offset += size;
840 uiop->uio_resid -= size;
841 if (uiop->uio_iov->iov_len <= size) {
842 uiop->uio_iovcnt--;
843 uiop->uio_iov++;
844 } else {
845 uiop->uio_iov->iov_base =
846 (char *)uiop->uio_iov->iov_base + size;
847 uiop->uio_iov->iov_len -= size;
848 }
849 }
850 }
851 return (0);
852 }
853
854 /*
855 * Vnode op for write using bio
856 */
857 int
858 ncl_write(struct vop_write_args *ap)
859 {
860 int biosize;
861 struct uio *uio = ap->a_uio;
862 struct thread *td = uio->uio_td;
863 struct vnode *vp = ap->a_vp;
864 struct nfsnode *np = VTONFS(vp);
865 struct ucred *cred = ap->a_cred;
866 int ioflag = ap->a_ioflag;
867 struct buf *bp;
868 struct vattr vattr;
869 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
870 daddr_t lbn;
871 int bcount;
872 int n, on, error = 0;
873 struct proc *p = td?td->td_proc:NULL;
874
875 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
876 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
877 ("ncl_write proc"));
878 if (vp->v_type != VREG)
879 return (EIO);
880 mtx_lock(&np->n_mtx);
881 if (np->n_flag & NWRITEERR) {
882 np->n_flag &= ~NWRITEERR;
883 mtx_unlock(&np->n_mtx);
884 return (np->n_error);
885 } else
886 mtx_unlock(&np->n_mtx);
887 mtx_lock(&nmp->nm_mtx);
888 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
889 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
890 mtx_unlock(&nmp->nm_mtx);
891 (void)ncl_fsinfo(nmp, vp, cred, td);
892 mtx_lock(&nmp->nm_mtx);
893 }
894 if (nmp->nm_wsize == 0)
895 (void) newnfs_iosize(nmp);
896 mtx_unlock(&nmp->nm_mtx);
897
898 /*
899 * Synchronously flush pending buffers if we are in synchronous
900 * mode or if we are appending.
901 */
902 if (ioflag & (IO_APPEND | IO_SYNC)) {
903 mtx_lock(&np->n_mtx);
904 if (np->n_flag & NMODIFIED) {
905 mtx_unlock(&np->n_mtx);
906 #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
907 /*
908 * Require non-blocking, synchronous writes to
909 * dirty files to inform the program it needs
910 * to fsync(2) explicitly.
911 */
912 if (ioflag & IO_NDELAY)
913 return (EAGAIN);
914 #endif
915 flush_and_restart:
916 np->n_attrstamp = 0;
917 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
918 if (error)
919 return (error);
920 } else
921 mtx_unlock(&np->n_mtx);
922 }
923
924 /*
925 * If IO_APPEND then load uio_offset. We restart here if we cannot
926 * get the append lock.
927 */
928 if (ioflag & IO_APPEND) {
929 np->n_attrstamp = 0;
930 error = VOP_GETATTR(vp, &vattr, cred);
931 if (error)
932 return (error);
933 mtx_lock(&np->n_mtx);
934 uio->uio_offset = np->n_size;
935 mtx_unlock(&np->n_mtx);
936 }
937
938 if (uio->uio_offset < 0)
939 return (EINVAL);
940 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
941 return (EFBIG);
942 if (uio->uio_resid == 0)
943 return (0);
944
945 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
946 return nfs_directio_write(vp, uio, cred, ioflag);
947
948 /*
949 * Maybe this should be above the vnode op call, but so long as
950 * file servers have no limits, i don't think it matters
951 */
952 if (p != NULL) {
953 PROC_LOCK(p);
954 if (uio->uio_offset + uio->uio_resid >
955 lim_cur(p, RLIMIT_FSIZE)) {
956 psignal(p, SIGXFSZ);
957 PROC_UNLOCK(p);
958 return (EFBIG);
959 }
960 PROC_UNLOCK(p);
961 }
962
963 biosize = vp->v_mount->mnt_stat.f_iosize;
964 /*
965 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
966 * would exceed the local maximum per-file write commit size when
967 * combined with those, we must decide whether to flush,
968 * go synchronous, or return error. We don't bother checking
969 * IO_UNIT -- we just make all writes atomic anyway, as there's
970 * no point optimizing for something that really won't ever happen.
971 */
972 if (!(ioflag & IO_SYNC)) {
973 int nflag;
974
975 mtx_lock(&np->n_mtx);
976 nflag = np->n_flag;
977 mtx_unlock(&np->n_mtx);
978 int needrestart = 0;
979 if (nmp->nm_wcommitsize < uio->uio_resid) {
980 /*
981 * If this request could not possibly be completed
982 * without exceeding the maximum outstanding write
983 * commit size, see if we can convert it into a
984 * synchronous write operation.
985 */
986 if (ioflag & IO_NDELAY)
987 return (EAGAIN);
988 ioflag |= IO_SYNC;
989 if (nflag & NMODIFIED)
990 needrestart = 1;
991 } else if (nflag & NMODIFIED) {
992 int wouldcommit = 0;
993 BO_LOCK(&vp->v_bufobj);
994 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
995 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
996 b_bobufs) {
997 if (bp->b_flags & B_NEEDCOMMIT)
998 wouldcommit += bp->b_bcount;
999 }
1000 }
1001 BO_UNLOCK(&vp->v_bufobj);
1002 /*
1003 * Since we're not operating synchronously and
1004 * bypassing the buffer cache, we are in a commit
1005 * and holding all of these buffers whether
1006 * transmitted or not. If not limited, this
1007 * will lead to the buffer cache deadlocking,
1008 * as no one else can flush our uncommitted buffers.
1009 */
1010 wouldcommit += uio->uio_resid;
1011 /*
1012 * If we would initially exceed the maximum
1013 * outstanding write commit size, flush and restart.
1014 */
1015 if (wouldcommit > nmp->nm_wcommitsize)
1016 needrestart = 1;
1017 }
1018 if (needrestart)
1019 goto flush_and_restart;
1020 }
1021
1022 do {
1023 NFSINCRGLOBAL(newnfsstats.biocache_writes);
1024 lbn = uio->uio_offset / biosize;
1025 on = uio->uio_offset & (biosize-1);
1026 n = min((unsigned)(biosize - on), uio->uio_resid);
1027 again:
1028 /*
1029 * Handle direct append and file extension cases, calculate
1030 * unaligned buffer size.
1031 */
1032 mtx_lock(&np->n_mtx);
1033 if (uio->uio_offset == np->n_size && n) {
1034 mtx_unlock(&np->n_mtx);
1035 /*
1036 * Get the buffer (in its pre-append state to maintain
1037 * B_CACHE if it was previously set). Resize the
1038 * nfsnode after we have locked the buffer to prevent
1039 * readers from reading garbage.
1040 */
1041 bcount = on;
1042 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1043
1044 if (bp != NULL) {
1045 long save;
1046
1047 mtx_lock(&np->n_mtx);
1048 np->n_size = uio->uio_offset + n;
1049 np->n_flag |= NMODIFIED;
1050 vnode_pager_setsize(vp, np->n_size);
1051 mtx_unlock(&np->n_mtx);
1052
1053 save = bp->b_flags & B_CACHE;
1054 bcount += n;
1055 allocbuf(bp, bcount);
1056 bp->b_flags |= save;
1057 }
1058 } else {
1059 /*
1060 * Obtain the locked cache block first, and then
1061 * adjust the file's size as appropriate.
1062 */
1063 bcount = on + n;
1064 if ((off_t)lbn * biosize + bcount < np->n_size) {
1065 if ((off_t)(lbn + 1) * biosize < np->n_size)
1066 bcount = biosize;
1067 else
1068 bcount = np->n_size - (off_t)lbn * biosize;
1069 }
1070 mtx_unlock(&np->n_mtx);
1071 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1072 mtx_lock(&np->n_mtx);
1073 if (uio->uio_offset + n > np->n_size) {
1074 np->n_size = uio->uio_offset + n;
1075 np->n_flag |= NMODIFIED;
1076 vnode_pager_setsize(vp, np->n_size);
1077 }
1078 mtx_unlock(&np->n_mtx);
1079 }
1080
1081 if (!bp) {
1082 error = newnfs_sigintr(nmp, td);
1083 if (!error)
1084 error = EINTR;
1085 break;
1086 }
1087
1088 /*
1089 * Issue a READ if B_CACHE is not set. In special-append
1090 * mode, B_CACHE is based on the buffer prior to the write
1091 * op and is typically set, avoiding the read. If a read
1092 * is required in special append mode, the server will
1093 * probably send us a short-read since we extended the file
1094 * on our end, resulting in b_resid == 0 and, thusly,
1095 * B_CACHE getting set.
1096 *
1097 * We can also avoid issuing the read if the write covers
1098 * the entire buffer. We have to make sure the buffer state
1099 * is reasonable in this case since we will not be initiating
1100 * I/O. See the comments in kern/vfs_bio.c's getblk() for
1101 * more information.
1102 *
1103 * B_CACHE may also be set due to the buffer being cached
1104 * normally.
1105 */
1106
1107 if (on == 0 && n == bcount) {
1108 bp->b_flags |= B_CACHE;
1109 bp->b_flags &= ~B_INVAL;
1110 bp->b_ioflags &= ~BIO_ERROR;
1111 }
1112
1113 if ((bp->b_flags & B_CACHE) == 0) {
1114 bp->b_iocmd = BIO_READ;
1115 vfs_busy_pages(bp, 0);
1116 error = ncl_doio(vp, bp, cred, td, 0);
1117 if (error) {
1118 brelse(bp);
1119 break;
1120 }
1121 }
1122 if (bp->b_wcred == NOCRED)
1123 bp->b_wcred = crhold(cred);
1124 mtx_lock(&np->n_mtx);
1125 np->n_flag |= NMODIFIED;
1126 mtx_unlock(&np->n_mtx);
1127
1128 /*
1129 * If dirtyend exceeds file size, chop it down. This should
1130 * not normally occur but there is an append race where it
1131 * might occur XXX, so we log it.
1132 *
1133 * If the chopping creates a reverse-indexed or degenerate
1134 * situation with dirtyoff/end, we 0 both of them.
1135 */
1136
1137 if (bp->b_dirtyend > bcount) {
1138 ncl_printf("NFS append race @%lx:%d\n",
1139 (long)bp->b_blkno * DEV_BSIZE,
1140 bp->b_dirtyend - bcount);
1141 bp->b_dirtyend = bcount;
1142 }
1143
1144 if (bp->b_dirtyoff >= bp->b_dirtyend)
1145 bp->b_dirtyoff = bp->b_dirtyend = 0;
1146
1147 /*
1148 * If the new write will leave a contiguous dirty
1149 * area, just update the b_dirtyoff and b_dirtyend,
1150 * otherwise force a write rpc of the old dirty area.
1151 *
1152 * While it is possible to merge discontiguous writes due to
1153 * our having a B_CACHE buffer ( and thus valid read data
1154 * for the hole), we don't because it could lead to
1155 * significant cache coherency problems with multiple clients,
1156 * especially if locking is implemented later on.
1157 *
1158 * as an optimization we could theoretically maintain
1159 * a linked list of discontinuous areas, but we would still
1160 * have to commit them separately so there isn't much
1161 * advantage to it except perhaps a bit of asynchronization.
1162 */
1163
1164 if (bp->b_dirtyend > 0 &&
1165 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1166 if (bwrite(bp) == EINTR) {
1167 error = EINTR;
1168 break;
1169 }
1170 goto again;
1171 }
1172
1173 error = uiomove((char *)bp->b_data + on, n, uio);
1174
1175 /*
1176 * Since this block is being modified, it must be written
1177 * again and not just committed. Since write clustering does
1178 * not work for the stage 1 data write, only the stage 2
1179 * commit rpc, we have to clear B_CLUSTEROK as well.
1180 */
1181 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1182
1183 if (error) {
1184 bp->b_ioflags |= BIO_ERROR;
1185 brelse(bp);
1186 break;
1187 }
1188
1189 /*
1190 * Only update dirtyoff/dirtyend if not a degenerate
1191 * condition.
1192 */
1193 if (n) {
1194 if (bp->b_dirtyend > 0) {
1195 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1196 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1197 } else {
1198 bp->b_dirtyoff = on;
1199 bp->b_dirtyend = on + n;
1200 }
1201 vfs_bio_set_valid(bp, on, n);
1202 }
1203
1204 /*
1205 * If IO_SYNC do bwrite().
1206 *
1207 * IO_INVAL appears to be unused. The idea appears to be
1208 * to turn off caching in this case. Very odd. XXX
1209 */
1210 if ((ioflag & IO_SYNC)) {
1211 if (ioflag & IO_INVAL)
1212 bp->b_flags |= B_NOCACHE;
1213 error = bwrite(bp);
1214 if (error)
1215 break;
1216 } else if ((n + on) == biosize) {
1217 bp->b_flags |= B_ASYNC;
1218 (void) ncl_writebp(bp, 0, NULL);
1219 } else {
1220 bdwrite(bp);
1221 }
1222 } while (uio->uio_resid > 0 && n > 0);
1223
1224 return (error);
1225 }
1226
1227 /*
1228 * Get an nfs cache block.
1229 *
1230 * Allocate a new one if the block isn't currently in the cache
1231 * and return the block marked busy. If the calling process is
1232 * interrupted by a signal for an interruptible mount point, return
1233 * NULL.
1234 *
1235 * The caller must carefully deal with the possible B_INVAL state of
1236 * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1237 * indirectly), so synchronous reads can be issued without worrying about
1238 * the B_INVAL state. We have to be a little more careful when dealing
1239 * with writes (see comments in nfs_write()) when extending a file past
1240 * its EOF.
1241 */
1242 static struct buf *
1243 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1244 {
1245 struct buf *bp;
1246 struct mount *mp;
1247 struct nfsmount *nmp;
1248
1249 mp = vp->v_mount;
1250 nmp = VFSTONFS(mp);
1251
1252 if (nmp->nm_flag & NFSMNT_INT) {
1253 sigset_t oldset;
1254
1255 newnfs_set_sigmask(td, &oldset);
1256 bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0);
1257 newnfs_restore_sigmask(td, &oldset);
1258 while (bp == NULL) {
1259 if (newnfs_sigintr(nmp, td))
1260 return (NULL);
1261 bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1262 }
1263 } else {
1264 bp = getblk(vp, bn, size, 0, 0, 0);
1265 }
1266
1267 if (vp->v_type == VREG) {
1268 int biosize;
1269
1270 biosize = mp->mnt_stat.f_iosize;
1271 bp->b_blkno = bn * (biosize / DEV_BSIZE);
1272 }
1273 return (bp);
1274 }
1275
1276 /*
1277 * Flush and invalidate all dirty buffers. If another process is already
1278 * doing the flush, just wait for completion.
1279 */
1280 int
1281 ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1282 {
1283 struct nfsnode *np = VTONFS(vp);
1284 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1285 int error = 0, slpflag, slptimeo;
1286 int old_lock = 0;
1287
1288 ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1289
1290 if ((nmp->nm_flag & NFSMNT_INT) == 0)
1291 intrflg = 0;
1292 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1293 intrflg = 1;
1294 if (intrflg) {
1295 slpflag = NFS_PCATCH;
1296 slptimeo = 2 * hz;
1297 } else {
1298 slpflag = 0;
1299 slptimeo = 0;
1300 }
1301
1302 old_lock = ncl_upgrade_vnlock(vp);
1303 if (vp->v_iflag & VI_DOOMED) {
1304 /*
1305 * Since vgonel() uses the generic vinvalbuf() to flush
1306 * dirty buffers and it does not call this function, it
1307 * is safe to just return OK when VI_DOOMED is set.
1308 */
1309 ncl_downgrade_vnlock(vp, old_lock);
1310 return (0);
1311 }
1312
1313 /*
1314 * Now, flush as required.
1315 */
1316 if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1317 VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1318 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1319 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1320 /*
1321 * If the page clean was interrupted, fail the invalidation.
1322 * Not doing so, we run the risk of losing dirty pages in the
1323 * vinvalbuf() call below.
1324 */
1325 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1326 goto out;
1327 }
1328
1329 error = vinvalbuf(vp, flags, slpflag, 0);
1330 while (error) {
1331 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1332 goto out;
1333 error = vinvalbuf(vp, flags, 0, slptimeo);
1334 }
1335 mtx_lock(&np->n_mtx);
1336 if (np->n_directio_asyncwr == 0)
1337 np->n_flag &= ~NMODIFIED;
1338 mtx_unlock(&np->n_mtx);
1339 out:
1340 ncl_downgrade_vnlock(vp, old_lock);
1341 return error;
1342 }
1343
1344 /*
1345 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1346 * This is mainly to avoid queueing async I/O requests when the nfsiods
1347 * are all hung on a dead server.
1348 *
1349 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1350 * is eventually dequeued by the async daemon, ncl_doio() *will*.
1351 */
1352 int
1353 ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1354 {
1355 int iod;
1356 int gotiod;
1357 int slpflag = 0;
1358 int slptimeo = 0;
1359 int error, error2;
1360
1361 /*
1362 * Unless iothreadcnt is set > 0, don't bother with async I/O
1363 * threads. For LAN environments, they don't buy any significant
1364 * performance improvement that you can't get with large block
1365 * sizes.
1366 */
1367 if (nmp->nm_readahead == 0)
1368 return (EPERM);
1369
1370 /*
1371 * Commits are usually short and sweet so lets save some cpu and
1372 * leave the async daemons for more important rpc's (such as reads
1373 * and writes).
1374 */
1375 mtx_lock(&ncl_iod_mutex);
1376 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1377 (nmp->nm_bufqiods > ncl_numasync / 2)) {
1378 mtx_unlock(&ncl_iod_mutex);
1379 return(EIO);
1380 }
1381 again:
1382 if (nmp->nm_flag & NFSMNT_INT)
1383 slpflag = NFS_PCATCH;
1384 gotiod = FALSE;
1385
1386 /*
1387 * Find a free iod to process this request.
1388 */
1389 for (iod = 0; iod < ncl_numasync; iod++)
1390 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1391 gotiod = TRUE;
1392 break;
1393 }
1394
1395 /*
1396 * Try to create one if none are free.
1397 */
1398 if (!gotiod) {
1399 iod = ncl_nfsiodnew(1);
1400 if (iod != -1)
1401 gotiod = TRUE;
1402 }
1403
1404 if (gotiod) {
1405 /*
1406 * Found one, so wake it up and tell it which
1407 * mount to process.
1408 */
1409 NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1410 iod, nmp));
1411 ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1412 ncl_iodmount[iod] = nmp;
1413 nmp->nm_bufqiods++;
1414 wakeup(&ncl_iodwant[iod]);
1415 }
1416
1417 /*
1418 * If none are free, we may already have an iod working on this mount
1419 * point. If so, it will process our request.
1420 */
1421 if (!gotiod) {
1422 if (nmp->nm_bufqiods > 0) {
1423 NFS_DPF(ASYNCIO,
1424 ("ncl_asyncio: %d iods are already processing mount %p\n",
1425 nmp->nm_bufqiods, nmp));
1426 gotiod = TRUE;
1427 }
1428 }
1429
1430 /*
1431 * If we have an iod which can process the request, then queue
1432 * the buffer.
1433 */
1434 if (gotiod) {
1435 /*
1436 * Ensure that the queue never grows too large. We still want
1437 * to asynchronize so we block rather then return EIO.
1438 */
1439 while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1440 NFS_DPF(ASYNCIO,
1441 ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1442 nmp->nm_bufqwant = TRUE;
1443 error = newnfs_msleep(td, &nmp->nm_bufq,
1444 &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1445 slptimeo);
1446 if (error) {
1447 error2 = newnfs_sigintr(nmp, td);
1448 if (error2) {
1449 mtx_unlock(&ncl_iod_mutex);
1450 return (error2);
1451 }
1452 if (slpflag == NFS_PCATCH) {
1453 slpflag = 0;
1454 slptimeo = 2 * hz;
1455 }
1456 }
1457 /*
1458 * We might have lost our iod while sleeping,
1459 * so check and loop if nescessary.
1460 */
1461 if (nmp->nm_bufqiods == 0) {
1462 NFS_DPF(ASYNCIO,
1463 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1464 goto again;
1465 }
1466 }
1467
1468 /* We might have lost our nfsiod */
1469 if (nmp->nm_bufqiods == 0) {
1470 NFS_DPF(ASYNCIO,
1471 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1472 goto again;
1473 }
1474
1475 if (bp->b_iocmd == BIO_READ) {
1476 if (bp->b_rcred == NOCRED && cred != NOCRED)
1477 bp->b_rcred = crhold(cred);
1478 } else {
1479 if (bp->b_wcred == NOCRED && cred != NOCRED)
1480 bp->b_wcred = crhold(cred);
1481 }
1482
1483 if (bp->b_flags & B_REMFREE)
1484 bremfreef(bp);
1485 BUF_KERNPROC(bp);
1486 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1487 nmp->nm_bufqlen++;
1488 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1489 mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1490 VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1491 VTONFS(bp->b_vp)->n_directio_asyncwr++;
1492 mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1493 }
1494 mtx_unlock(&ncl_iod_mutex);
1495 return (0);
1496 }
1497
1498 mtx_unlock(&ncl_iod_mutex);
1499
1500 /*
1501 * All the iods are busy on other mounts, so return EIO to
1502 * force the caller to process the i/o synchronously.
1503 */
1504 NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1505 return (EIO);
1506 }
1507
1508 void
1509 ncl_doio_directwrite(struct buf *bp)
1510 {
1511 int iomode, must_commit;
1512 struct uio *uiop = (struct uio *)bp->b_caller1;
1513 char *iov_base = uiop->uio_iov->iov_base;
1514
1515 iomode = NFSWRITE_FILESYNC;
1516 uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1517 ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0);
1518 KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1519 free(iov_base, M_NFSDIRECTIO);
1520 free(uiop->uio_iov, M_NFSDIRECTIO);
1521 free(uiop, M_NFSDIRECTIO);
1522 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1523 struct nfsnode *np = VTONFS(bp->b_vp);
1524 mtx_lock(&np->n_mtx);
1525 np->n_directio_asyncwr--;
1526 if (np->n_directio_asyncwr == 0) {
1527 np->n_flag &= ~NMODIFIED;
1528 if ((np->n_flag & NFSYNCWAIT)) {
1529 np->n_flag &= ~NFSYNCWAIT;
1530 wakeup((caddr_t)&np->n_directio_asyncwr);
1531 }
1532 }
1533 mtx_unlock(&np->n_mtx);
1534 }
1535 bp->b_vp = NULL;
1536 relpbuf(bp, &ncl_pbuf_freecnt);
1537 }
1538
1539 /*
1540 * Do an I/O operation to/from a cache block. This may be called
1541 * synchronously or from an nfsiod.
1542 */
1543 int
1544 ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
1545 int called_from_strategy)
1546 {
1547 struct uio *uiop;
1548 struct nfsnode *np;
1549 struct nfsmount *nmp;
1550 int error = 0, iomode, must_commit = 0;
1551 struct uio uio;
1552 struct iovec io;
1553 struct proc *p = td ? td->td_proc : NULL;
1554 uint8_t iocmd;
1555
1556 np = VTONFS(vp);
1557 nmp = VFSTONFS(vp->v_mount);
1558 uiop = &uio;
1559 uiop->uio_iov = &io;
1560 uiop->uio_iovcnt = 1;
1561 uiop->uio_segflg = UIO_SYSSPACE;
1562 uiop->uio_td = td;
1563
1564 /*
1565 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1566 * do this here so we do not have to do it in all the code that
1567 * calls us.
1568 */
1569 bp->b_flags &= ~B_INVAL;
1570 bp->b_ioflags &= ~BIO_ERROR;
1571
1572 KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1573 iocmd = bp->b_iocmd;
1574 if (iocmd == BIO_READ) {
1575 io.iov_len = uiop->uio_resid = bp->b_bcount;
1576 io.iov_base = bp->b_data;
1577 uiop->uio_rw = UIO_READ;
1578
1579 switch (vp->v_type) {
1580 case VREG:
1581 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1582 NFSINCRGLOBAL(newnfsstats.read_bios);
1583 error = ncl_readrpc(vp, uiop, cr);
1584
1585 if (!error) {
1586 if (uiop->uio_resid) {
1587 /*
1588 * If we had a short read with no error, we must have
1589 * hit a file hole. We should zero-fill the remainder.
1590 * This can also occur if the server hits the file EOF.
1591 *
1592 * Holes used to be able to occur due to pending
1593 * writes, but that is not possible any longer.
1594 */
1595 int nread = bp->b_bcount - uiop->uio_resid;
1596 int left = uiop->uio_resid;
1597
1598 if (left > 0)
1599 bzero((char *)bp->b_data + nread, left);
1600 uiop->uio_resid = 0;
1601 }
1602 }
1603 /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1604 if (p && (vp->v_vflag & VV_TEXT)) {
1605 mtx_lock(&np->n_mtx);
1606 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1607 mtx_unlock(&np->n_mtx);
1608 PROC_LOCK(p);
1609 killproc(p, "text file modification");
1610 PROC_UNLOCK(p);
1611 } else
1612 mtx_unlock(&np->n_mtx);
1613 }
1614 break;
1615 case VLNK:
1616 uiop->uio_offset = (off_t)0;
1617 NFSINCRGLOBAL(newnfsstats.readlink_bios);
1618 error = ncl_readlinkrpc(vp, uiop, cr);
1619 break;
1620 case VDIR:
1621 NFSINCRGLOBAL(newnfsstats.readdir_bios);
1622 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1623 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1624 error = ncl_readdirplusrpc(vp, uiop, cr, td);
1625 if (error == NFSERR_NOTSUPP)
1626 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1627 }
1628 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1629 error = ncl_readdirrpc(vp, uiop, cr, td);
1630 /*
1631 * end-of-directory sets B_INVAL but does not generate an
1632 * error.
1633 */
1634 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1635 bp->b_flags |= B_INVAL;
1636 break;
1637 default:
1638 ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type);
1639 break;
1640 };
1641 if (error) {
1642 bp->b_ioflags |= BIO_ERROR;
1643 bp->b_error = error;
1644 }
1645 } else {
1646 /*
1647 * If we only need to commit, try to commit
1648 */
1649 if (bp->b_flags & B_NEEDCOMMIT) {
1650 int retv;
1651 off_t off;
1652
1653 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1654 retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1655 bp->b_wcred, td);
1656 if (retv == 0) {
1657 bp->b_dirtyoff = bp->b_dirtyend = 0;
1658 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1659 bp->b_resid = 0;
1660 bufdone(bp);
1661 return (0);
1662 }
1663 if (retv == NFSERR_STALEWRITEVERF) {
1664 ncl_clearcommit(vp->v_mount);
1665 }
1666 }
1667
1668 /*
1669 * Setup for actual write
1670 */
1671 mtx_lock(&np->n_mtx);
1672 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1673 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1674 mtx_unlock(&np->n_mtx);
1675
1676 if (bp->b_dirtyend > bp->b_dirtyoff) {
1677 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1678 - bp->b_dirtyoff;
1679 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1680 + bp->b_dirtyoff;
1681 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1682 uiop->uio_rw = UIO_WRITE;
1683 NFSINCRGLOBAL(newnfsstats.write_bios);
1684
1685 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1686 iomode = NFSWRITE_UNSTABLE;
1687 else
1688 iomode = NFSWRITE_FILESYNC;
1689
1690 error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
1691 called_from_strategy);
1692
1693 /*
1694 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1695 * to cluster the buffers needing commit. This will allow
1696 * the system to submit a single commit rpc for the whole
1697 * cluster. We can do this even if the buffer is not 100%
1698 * dirty (relative to the NFS blocksize), so we optimize the
1699 * append-to-file-case.
1700 *
1701 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1702 * cleared because write clustering only works for commit
1703 * rpc's, not for the data portion of the write).
1704 */
1705
1706 if (!error && iomode == NFSWRITE_UNSTABLE) {
1707 bp->b_flags |= B_NEEDCOMMIT;
1708 if (bp->b_dirtyoff == 0
1709 && bp->b_dirtyend == bp->b_bcount)
1710 bp->b_flags |= B_CLUSTEROK;
1711 } else {
1712 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1713 }
1714
1715 /*
1716 * For an interrupted write, the buffer is still valid
1717 * and the write hasn't been pushed to the server yet,
1718 * so we can't set BIO_ERROR and report the interruption
1719 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1720 * is not relevant, so the rpc attempt is essentially
1721 * a noop. For the case of a V3 write rpc not being
1722 * committed to stable storage, the block is still
1723 * dirty and requires either a commit rpc or another
1724 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1725 * the block is reused. This is indicated by setting
1726 * the B_DELWRI and B_NEEDCOMMIT flags.
1727 *
1728 * EIO is returned by ncl_writerpc() to indicate a recoverable
1729 * write error and is handled as above, except that
1730 * B_EINTR isn't set. One cause of this is a stale stateid
1731 * error for the RPC that indicates recovery is required,
1732 * when called with called_from_strategy != 0.
1733 *
1734 * If the buffer is marked B_PAGING, it does not reside on
1735 * the vp's paging queues so we cannot call bdirty(). The
1736 * bp in this case is not an NFS cache block so we should
1737 * be safe. XXX
1738 *
1739 * The logic below breaks up errors into recoverable and
1740 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1741 * and keep the buffer around for potential write retries.
1742 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1743 * and save the error in the nfsnode. This is less than ideal
1744 * but necessary. Keeping such buffers around could potentially
1745 * cause buffer exhaustion eventually (they can never be written
1746 * out, so will get constantly be re-dirtied). It also causes
1747 * all sorts of vfs panics. For non-recoverable write errors,
1748 * also invalidate the attrcache, so we'll be forced to go over
1749 * the wire for this object, returning an error to user on next
1750 * call (most of the time).
1751 */
1752 if (error == EINTR || error == EIO || error == ETIMEDOUT
1753 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1754 int s;
1755
1756 s = splbio();
1757 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1758 if ((bp->b_flags & B_PAGING) == 0) {
1759 bdirty(bp);
1760 bp->b_flags &= ~B_DONE;
1761 }
1762 if ((error == EINTR || error == ETIMEDOUT) &&
1763 (bp->b_flags & B_ASYNC) == 0)
1764 bp->b_flags |= B_EINTR;
1765 splx(s);
1766 } else {
1767 if (error) {
1768 bp->b_ioflags |= BIO_ERROR;
1769 bp->b_flags |= B_INVAL;
1770 bp->b_error = np->n_error = error;
1771 mtx_lock(&np->n_mtx);
1772 np->n_flag |= NWRITEERR;
1773 np->n_attrstamp = 0;
1774 mtx_unlock(&np->n_mtx);
1775 }
1776 bp->b_dirtyoff = bp->b_dirtyend = 0;
1777 }
1778 } else {
1779 bp->b_resid = 0;
1780 bufdone(bp);
1781 return (0);
1782 }
1783 }
1784 bp->b_resid = uiop->uio_resid;
1785 if (must_commit)
1786 ncl_clearcommit(vp->v_mount);
1787 bufdone(bp);
1788 return (error);
1789 }
1790
1791 /*
1792 * Used to aid in handling ftruncate() operations on the NFS client side.
1793 * Truncation creates a number of special problems for NFS. We have to
1794 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1795 * we have to properly handle VM pages or (potentially dirty) buffers
1796 * that straddle the truncation point.
1797 */
1798
1799 int
1800 ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1801 {
1802 struct nfsnode *np = VTONFS(vp);
1803 u_quad_t tsize;
1804 int biosize = vp->v_mount->mnt_stat.f_iosize;
1805 int error = 0;
1806
1807 mtx_lock(&np->n_mtx);
1808 tsize = np->n_size;
1809 np->n_size = nsize;
1810 mtx_unlock(&np->n_mtx);
1811
1812 if (nsize < tsize) {
1813 struct buf *bp;
1814 daddr_t lbn;
1815 int bufsize;
1816
1817 /*
1818 * vtruncbuf() doesn't get the buffer overlapping the
1819 * truncation point. We may have a B_DELWRI and/or B_CACHE
1820 * buffer that now needs to be truncated.
1821 */
1822 error = vtruncbuf(vp, cred, td, nsize, biosize);
1823 lbn = nsize / biosize;
1824 bufsize = nsize & (biosize - 1);
1825 bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1826 if (!bp)
1827 return EINTR;
1828 if (bp->b_dirtyoff > bp->b_bcount)
1829 bp->b_dirtyoff = bp->b_bcount;
1830 if (bp->b_dirtyend > bp->b_bcount)
1831 bp->b_dirtyend = bp->b_bcount;
1832 bp->b_flags |= B_RELBUF; /* don't leave garbage around */
1833 brelse(bp);
1834 } else {
1835 vnode_pager_setsize(vp, nsize);
1836 }
1837 return(error);
1838 }
1839
Cache object: 2cad1e862c38beb4789a14937b2ae206
|