The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/nfsclient/nfs_bio.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1989, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * Rick Macklem at The University of Guelph.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)nfs_bio.c   8.9 (Berkeley) 3/30/95
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/5.3/sys/nfsclient/nfs_bio.c 136330 2004-10-09 17:11:22Z das $");
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/bio.h>
   41 #include <sys/buf.h>
   42 #include <sys/kernel.h>
   43 #include <sys/mount.h>
   44 #include <sys/proc.h>
   45 #include <sys/resourcevar.h>
   46 #include <sys/signalvar.h>
   47 #include <sys/vmmeter.h>
   48 #include <sys/vnode.h>
   49 
   50 #include <vm/vm.h>
   51 #include <vm/vm_extern.h>
   52 #include <vm/vm_page.h>
   53 #include <vm/vm_object.h>
   54 #include <vm/vm_pager.h>
   55 #include <vm/vnode_pager.h>
   56 
   57 #include <rpc/rpcclnt.h>
   58 
   59 #include <nfs/rpcv2.h>
   60 #include <nfs/nfsproto.h>
   61 #include <nfsclient/nfs.h>
   62 #include <nfsclient/nfsmount.h>
   63 #include <nfsclient/nfsnode.h>
   64 
   65 #include <nfs4client/nfs4.h>
   66 
   67 /*
   68  * Just call nfs_writebp() with the force argument set to 1.
   69  *
   70  * NOTE: B_DONE may or may not be set in a_bp on call.
   71  */
   72 static int
   73 nfs4_bwrite(struct buf *bp)
   74 {
   75 
   76         return (nfs4_writebp(bp, 1, curthread));
   77 }
   78 
   79 static int
   80 nfs_bwrite(struct buf *bp)
   81 {
   82 
   83         return (nfs_writebp(bp, 1, curthread));
   84 }
   85 
   86 struct buf_ops buf_ops_nfs4 = {
   87         "buf_ops_nfs4",
   88         nfs4_bwrite
   89 };
   90 
   91 struct buf_ops buf_ops_nfs = {
   92         "buf_ops_nfs",
   93         nfs_bwrite
   94 };
   95 
   96 static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
   97                     struct thread *td);
   98 
   99 /*
  100  * Vnode op for VM getpages.
  101  */
  102 int
  103 nfs_getpages(struct vop_getpages_args *ap)
  104 {
  105         int i, error, nextoff, size, toff, count, npages;
  106         struct uio uio;
  107         struct iovec iov;
  108         vm_offset_t kva;
  109         struct buf *bp;
  110         struct vnode *vp;
  111         struct thread *td;
  112         struct ucred *cred;
  113         struct nfsmount *nmp;
  114         vm_object_t object;
  115         vm_page_t *pages;
  116 
  117         GIANT_REQUIRED;
  118 
  119         vp = ap->a_vp;
  120         td = curthread;                         /* XXX */
  121         cred = curthread->td_ucred;             /* XXX */
  122         nmp = VFSTONFS(vp->v_mount);
  123         pages = ap->a_m;
  124         count = ap->a_count;
  125 
  126         if ((object = vp->v_object) == NULL) {
  127                 printf("nfs_getpages: called with non-merged cache vnode??\n");
  128                 return VM_PAGER_ERROR;
  129         }
  130 
  131         if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
  132             (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
  133                 /* We'll never get here for v4, because we always have fsinfo */
  134                 (void)nfs_fsinfo(nmp, vp, cred, td);
  135         }
  136 
  137         npages = btoc(count);
  138 
  139         /*
  140          * If the requested page is partially valid, just return it and
  141          * allow the pager to zero-out the blanks.  Partially valid pages
  142          * can only occur at the file EOF.
  143          */
  144 
  145         {
  146                 vm_page_t m = pages[ap->a_reqpage];
  147 
  148                 VM_OBJECT_LOCK(object);
  149                 vm_page_lock_queues();
  150                 if (m->valid != 0) {
  151                         /* handled by vm_fault now        */
  152                         /* vm_page_zero_invalid(m, TRUE); */
  153                         for (i = 0; i < npages; ++i) {
  154                                 if (i != ap->a_reqpage)
  155                                         vm_page_free(pages[i]);
  156                         }
  157                         vm_page_unlock_queues();
  158                         VM_OBJECT_UNLOCK(object);
  159                         return(0);
  160                 }
  161                 vm_page_unlock_queues();
  162                 VM_OBJECT_UNLOCK(object);
  163         }
  164 
  165         /*
  166          * We use only the kva address for the buffer, but this is extremely
  167          * convienient and fast.
  168          */
  169         bp = getpbuf(&nfs_pbuf_freecnt);
  170 
  171         kva = (vm_offset_t) bp->b_data;
  172         pmap_qenter(kva, pages, npages);
  173         cnt.v_vnodein++;
  174         cnt.v_vnodepgsin += npages;
  175 
  176         iov.iov_base = (caddr_t) kva;
  177         iov.iov_len = count;
  178         uio.uio_iov = &iov;
  179         uio.uio_iovcnt = 1;
  180         uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
  181         uio.uio_resid = count;
  182         uio.uio_segflg = UIO_SYSSPACE;
  183         uio.uio_rw = UIO_READ;
  184         uio.uio_td = td;
  185 
  186         error = (nmp->nm_rpcops->nr_readrpc)(vp, &uio, cred);
  187         pmap_qremove(kva, npages);
  188 
  189         relpbuf(bp, &nfs_pbuf_freecnt);
  190 
  191         if (error && (uio.uio_resid == count)) {
  192                 printf("nfs_getpages: error %d\n", error);
  193                 VM_OBJECT_LOCK(object);
  194                 vm_page_lock_queues();
  195                 for (i = 0; i < npages; ++i) {
  196                         if (i != ap->a_reqpage)
  197                                 vm_page_free(pages[i]);
  198                 }
  199                 vm_page_unlock_queues();
  200                 VM_OBJECT_UNLOCK(object);
  201                 return VM_PAGER_ERROR;
  202         }
  203 
  204         /*
  205          * Calculate the number of bytes read and validate only that number
  206          * of bytes.  Note that due to pending writes, size may be 0.  This
  207          * does not mean that the remaining data is invalid!
  208          */
  209 
  210         size = count - uio.uio_resid;
  211         VM_OBJECT_LOCK(object);
  212         vm_page_lock_queues();
  213         for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
  214                 vm_page_t m;
  215                 nextoff = toff + PAGE_SIZE;
  216                 m = pages[i];
  217 
  218                 if (nextoff <= size) {
  219                         /*
  220                          * Read operation filled an entire page
  221                          */
  222                         m->valid = VM_PAGE_BITS_ALL;
  223                         vm_page_undirty(m);
  224                 } else if (size > toff) {
  225                         /*
  226                          * Read operation filled a partial page.
  227                          */
  228                         m->valid = 0;
  229                         vm_page_set_validclean(m, 0, size - toff);
  230                         /* handled by vm_fault now        */
  231                         /* vm_page_zero_invalid(m, TRUE); */
  232                 } else {
  233                         /*
  234                          * Read operation was short.  If no error occured
  235                          * we may have hit a zero-fill section.   We simply
  236                          * leave valid set to 0.
  237                          */
  238                         ;
  239                 }
  240                 if (i != ap->a_reqpage) {
  241                         /*
  242                          * Whether or not to leave the page activated is up in
  243                          * the air, but we should put the page on a page queue
  244                          * somewhere (it already is in the object).  Result:
  245                          * It appears that emperical results show that
  246                          * deactivating pages is best.
  247                          */
  248 
  249                         /*
  250                          * Just in case someone was asking for this page we
  251                          * now tell them that it is ok to use.
  252                          */
  253                         if (!error) {
  254                                 if (m->flags & PG_WANTED)
  255                                         vm_page_activate(m);
  256                                 else
  257                                         vm_page_deactivate(m);
  258                                 vm_page_wakeup(m);
  259                         } else {
  260                                 vm_page_free(m);
  261                         }
  262                 }
  263         }
  264         vm_page_unlock_queues();
  265         VM_OBJECT_UNLOCK(object);
  266         return 0;
  267 }
  268 
  269 /*
  270  * Vnode op for VM putpages.
  271  */
  272 int
  273 nfs_putpages(struct vop_putpages_args *ap)
  274 {
  275         struct uio uio;
  276         struct iovec iov;
  277         vm_offset_t kva;
  278         struct buf *bp;
  279         int iomode, must_commit, i, error, npages, count;
  280         off_t offset;
  281         int *rtvals;
  282         struct vnode *vp;
  283         struct thread *td;
  284         struct ucred *cred;
  285         struct nfsmount *nmp;
  286         struct nfsnode *np;
  287         vm_page_t *pages;
  288 
  289         GIANT_REQUIRED;
  290 
  291         vp = ap->a_vp;
  292         np = VTONFS(vp);
  293         td = curthread;                         /* XXX */
  294         cred = curthread->td_ucred;             /* XXX */
  295         nmp = VFSTONFS(vp->v_mount);
  296         pages = ap->a_m;
  297         count = ap->a_count;
  298         rtvals = ap->a_rtvals;
  299         npages = btoc(count);
  300         offset = IDX_TO_OFF(pages[0]->pindex);
  301 
  302         if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
  303             (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
  304                 (void)nfs_fsinfo(nmp, vp, cred, td);
  305         }
  306 
  307         for (i = 0; i < npages; i++)
  308                 rtvals[i] = VM_PAGER_AGAIN;
  309 
  310         /*
  311          * When putting pages, do not extend file past EOF.
  312          */
  313 
  314         if (offset + count > np->n_size) {
  315                 count = np->n_size - offset;
  316                 if (count < 0)
  317                         count = 0;
  318         }
  319 
  320         /*
  321          * We use only the kva address for the buffer, but this is extremely
  322          * convienient and fast.
  323          */
  324         bp = getpbuf(&nfs_pbuf_freecnt);
  325 
  326         kva = (vm_offset_t) bp->b_data;
  327         pmap_qenter(kva, pages, npages);
  328         cnt.v_vnodeout++;
  329         cnt.v_vnodepgsout += count;
  330 
  331         iov.iov_base = (caddr_t) kva;
  332         iov.iov_len = count;
  333         uio.uio_iov = &iov;
  334         uio.uio_iovcnt = 1;
  335         uio.uio_offset = offset;
  336         uio.uio_resid = count;
  337         uio.uio_segflg = UIO_SYSSPACE;
  338         uio.uio_rw = UIO_WRITE;
  339         uio.uio_td = td;
  340 
  341         if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
  342             iomode = NFSV3WRITE_UNSTABLE;
  343         else
  344             iomode = NFSV3WRITE_FILESYNC;
  345 
  346         error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, &iomode, &must_commit);
  347 
  348         pmap_qremove(kva, npages);
  349         relpbuf(bp, &nfs_pbuf_freecnt);
  350 
  351         if (!error) {
  352                 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
  353                 for (i = 0; i < nwritten; i++) {
  354                         rtvals[i] = VM_PAGER_OK;
  355                         vm_page_undirty(pages[i]);
  356                 }
  357                 if (must_commit) {
  358                         nfs_clearcommit(vp->v_mount);
  359                 }
  360         }
  361         return rtvals[0];
  362 }
  363 
  364 /*
  365  * Vnode op for read using bio
  366  */
  367 int
  368 nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
  369 {
  370         struct nfsnode *np = VTONFS(vp);
  371         int biosize, i;
  372         struct buf *bp = 0, *rabp;
  373         struct vattr vattr;
  374         struct thread *td;
  375         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
  376         daddr_t lbn, rabn;
  377         int bcount;
  378         int seqcount;
  379         int nra, error = 0, n = 0, on = 0;
  380 
  381 #ifdef DIAGNOSTIC
  382         if (uio->uio_rw != UIO_READ)
  383                 panic("nfs_read mode");
  384 #endif
  385         if (uio->uio_resid == 0)
  386                 return (0);
  387         if (uio->uio_offset < 0)        /* XXX VDIR cookies can be negative */
  388                 return (EINVAL);
  389         td = uio->uio_td;
  390 
  391         if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
  392             (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
  393                 (void)nfs_fsinfo(nmp, vp, cred, td);
  394         if (vp->v_type != VDIR &&
  395             (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
  396                 return (EFBIG);
  397         biosize = vp->v_mount->mnt_stat.f_iosize;
  398         seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
  399         /*
  400          * For nfs, cache consistency can only be maintained approximately.
  401          * Although RFC1094 does not specify the criteria, the following is
  402          * believed to be compatible with the reference port.
  403          * For nfs:
  404          * If the file's modify time on the server has changed since the
  405          * last read rpc or you have written to the file,
  406          * you may have lost data cache consistency with the
  407          * server, so flush all of the file's data out of the cache.
  408          * Then force a getattr rpc to ensure that you have up to date
  409          * attributes.
  410          * NB: This implies that cache data can be read when up to
  411          * NFS_ATTRTIMEO seconds out of date. If you find that you need current
  412          * attributes this could be forced by setting n_attrstamp to 0 before
  413          * the VOP_GETATTR() call.
  414          */
  415         if (np->n_flag & NMODIFIED) {
  416                 if (vp->v_type != VREG) {
  417                         if (vp->v_type != VDIR)
  418                                 panic("nfs: bioread, not dir");
  419                         (nmp->nm_rpcops->nr_invaldir)(vp);
  420                         error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
  421                         if (error)
  422                                 return (error);
  423                 }
  424                 np->n_attrstamp = 0;
  425                 error = VOP_GETATTR(vp, &vattr, cred, td);
  426                 if (error)
  427                         return (error);
  428                 np->n_mtime = vattr.va_mtime.tv_sec;
  429         } else {
  430                 error = VOP_GETATTR(vp, &vattr, cred, td);
  431                 if (error)
  432                         return (error);
  433                 if ((np->n_flag & NSIZECHANGED)
  434                     || (np->n_mtime != vattr.va_mtime.tv_sec)) {
  435                         if (vp->v_type == VDIR)
  436                                 (nmp->nm_rpcops->nr_invaldir)(vp);
  437                         error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
  438                         if (error)
  439                                 return (error);
  440                         np->n_mtime = vattr.va_mtime.tv_sec;
  441                         np->n_flag &= ~NSIZECHANGED;
  442                 }
  443         }
  444         do {
  445             switch (vp->v_type) {
  446             case VREG:
  447                 nfsstats.biocache_reads++;
  448                 lbn = uio->uio_offset / biosize;
  449                 on = uio->uio_offset & (biosize - 1);
  450 
  451                 /*
  452                  * Start the read ahead(s), as required.
  453                  */
  454                 if (nmp->nm_readahead > 0) {
  455                     for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
  456                         (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
  457                         rabn = lbn + 1 + nra;
  458                         if (incore(vp, rabn) == NULL) {
  459                             rabp = nfs_getcacheblk(vp, rabn, biosize, td);
  460                             if (!rabp) {
  461                                 error = nfs_sigintr(nmp, NULL, td);
  462                                 return (error ? error : EINTR);
  463                             }
  464                             if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
  465                                 rabp->b_flags |= B_ASYNC;
  466                                 rabp->b_iocmd = BIO_READ;
  467                                 vfs_busy_pages(rabp, 0);
  468                                 if (nfs_asyncio(rabp, cred, td)) {
  469                                     rabp->b_flags |= B_INVAL;
  470                                     rabp->b_ioflags |= BIO_ERROR;
  471                                     vfs_unbusy_pages(rabp);
  472                                     brelse(rabp);
  473                                     break;
  474                                 }
  475                             } else {
  476                                 brelse(rabp);
  477                             }
  478                         }
  479                     }
  480                 }
  481 
  482                 /*
  483                  * Obtain the buffer cache block.  Figure out the buffer size
  484                  * when we are at EOF.  If we are modifying the size of the
  485                  * buffer based on an EOF condition we need to hold
  486                  * nfs_rslock() through obtaining the buffer to prevent
  487                  * a potential writer-appender from messing with n_size.
  488                  * Otherwise we may accidently truncate the buffer and
  489                  * lose dirty data.
  490                  *
  491                  * Note that bcount is *not* DEV_BSIZE aligned.
  492                  */
  493 
  494 again:
  495                 bcount = biosize;
  496                 if ((off_t)lbn * biosize >= np->n_size) {
  497                         bcount = 0;
  498                 } else if ((off_t)(lbn + 1) * biosize > np->n_size) {
  499                         bcount = np->n_size - (off_t)lbn * biosize;
  500                 }
  501                 if (bcount != biosize) {
  502                         switch(nfs_rslock(np, td)) {
  503                         case ENOLCK:
  504                                 goto again;
  505                                 /* not reached */
  506                         case EIO:
  507                                 return (EIO);
  508                         case EINTR:
  509                         case ERESTART:
  510                                 return(EINTR);
  511                                 /* not reached */
  512                         default:
  513                                 break;
  514                         }
  515                 }
  516 
  517                 bp = nfs_getcacheblk(vp, lbn, bcount, td);
  518 
  519                 if (bcount != biosize)
  520                         nfs_rsunlock(np, td);
  521                 if (!bp) {
  522                         error = nfs_sigintr(nmp, NULL, td);
  523                         return (error ? error : EINTR);
  524                 }
  525 
  526                 /*
  527                  * If B_CACHE is not set, we must issue the read.  If this
  528                  * fails, we return an error.
  529                  */
  530 
  531                 if ((bp->b_flags & B_CACHE) == 0) {
  532                     bp->b_iocmd = BIO_READ;
  533                     vfs_busy_pages(bp, 0);
  534                     error = nfs_doio(bp, cred, td);
  535                     if (error) {
  536                         brelse(bp);
  537                         return (error);
  538                     }
  539                 }
  540 
  541                 /*
  542                  * on is the offset into the current bp.  Figure out how many
  543                  * bytes we can copy out of the bp.  Note that bcount is
  544                  * NOT DEV_BSIZE aligned.
  545                  *
  546                  * Then figure out how many bytes we can copy into the uio.
  547                  */
  548 
  549                 n = 0;
  550                 if (on < bcount)
  551                         n = min((unsigned)(bcount - on), uio->uio_resid);
  552                 break;
  553             case VLNK:
  554                 nfsstats.biocache_readlinks++;
  555                 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
  556                 if (!bp) {
  557                         error = nfs_sigintr(nmp, NULL, td);
  558                         return (error ? error : EINTR);
  559                 }
  560                 if ((bp->b_flags & B_CACHE) == 0) {
  561                     bp->b_iocmd = BIO_READ;
  562                     vfs_busy_pages(bp, 0);
  563                     error = nfs_doio(bp, cred, td);
  564                     if (error) {
  565                         bp->b_ioflags |= BIO_ERROR;
  566                         brelse(bp);
  567                         return (error);
  568                     }
  569                 }
  570                 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
  571                 on = 0;
  572                 break;
  573             case VDIR:
  574                 nfsstats.biocache_readdirs++;
  575                 if (np->n_direofoffset
  576                     && uio->uio_offset >= np->n_direofoffset) {
  577                     return (0);
  578                 }
  579                 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
  580                 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
  581                 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
  582                 if (!bp) {
  583                     error = nfs_sigintr(nmp, NULL, td);
  584                     return (error ? error : EINTR);
  585                 }
  586                 if ((bp->b_flags & B_CACHE) == 0) {
  587                     bp->b_iocmd = BIO_READ;
  588                     vfs_busy_pages(bp, 0);
  589                     error = nfs_doio(bp, cred, td);
  590                     if (error) {
  591                             brelse(bp);
  592                     }
  593                     while (error == NFSERR_BAD_COOKIE) {
  594                         (nmp->nm_rpcops->nr_invaldir)(vp);
  595                         error = nfs_vinvalbuf(vp, 0, cred, td, 1);
  596                         /*
  597                          * Yuck! The directory has been modified on the
  598                          * server. The only way to get the block is by
  599                          * reading from the beginning to get all the
  600                          * offset cookies.
  601                          *
  602                          * Leave the last bp intact unless there is an error.
  603                          * Loop back up to the while if the error is another
  604                          * NFSERR_BAD_COOKIE (double yuch!).
  605                          */
  606                         for (i = 0; i <= lbn && !error; i++) {
  607                             if (np->n_direofoffset
  608                                 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
  609                                     return (0);
  610                             bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
  611                             if (!bp) {
  612                                 error = nfs_sigintr(nmp, NULL, td);
  613                                 return (error ? error : EINTR);
  614                             }
  615                             if ((bp->b_flags & B_CACHE) == 0) {
  616                                     bp->b_iocmd = BIO_READ;
  617                                     vfs_busy_pages(bp, 0);
  618                                     error = nfs_doio(bp, cred, td);
  619                                     /*
  620                                      * no error + B_INVAL == directory EOF,
  621                                      * use the block.
  622                                      */
  623                                     if (error == 0 && (bp->b_flags & B_INVAL))
  624                                             break;
  625                             }
  626                             /*
  627                              * An error will throw away the block and the
  628                              * for loop will break out.  If no error and this
  629                              * is not the block we want, we throw away the
  630                              * block and go for the next one via the for loop.
  631                              */
  632                             if (error || i < lbn)
  633                                     brelse(bp);
  634                         }
  635                     }
  636                     /*
  637                      * The above while is repeated if we hit another cookie
  638                      * error.  If we hit an error and it wasn't a cookie error,
  639                      * we give up.
  640                      */
  641                     if (error)
  642                             return (error);
  643                 }
  644 
  645                 /*
  646                  * If not eof and read aheads are enabled, start one.
  647                  * (You need the current block first, so that you have the
  648                  *  directory offset cookie of the next block.)
  649                  */
  650                 if (nmp->nm_readahead > 0 &&
  651                     (bp->b_flags & B_INVAL) == 0 &&
  652                     (np->n_direofoffset == 0 ||
  653                     (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
  654                     incore(vp, lbn + 1) == NULL) {
  655                         rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
  656                         if (rabp) {
  657                             if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
  658                                 rabp->b_flags |= B_ASYNC;
  659                                 rabp->b_iocmd = BIO_READ;
  660                                 vfs_busy_pages(rabp, 0);
  661                                 if (nfs_asyncio(rabp, cred, td)) {
  662                                     rabp->b_flags |= B_INVAL;
  663                                     rabp->b_ioflags |= BIO_ERROR;
  664                                     vfs_unbusy_pages(rabp);
  665                                     brelse(rabp);
  666                                 }
  667                             } else {
  668                                 brelse(rabp);
  669                             }
  670                         }
  671                 }
  672                 /*
  673                  * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
  674                  * chopped for the EOF condition, we cannot tell how large
  675                  * NFS directories are going to be until we hit EOF.  So
  676                  * an NFS directory buffer is *not* chopped to its EOF.  Now,
  677                  * it just so happens that b_resid will effectively chop it
  678                  * to EOF.  *BUT* this information is lost if the buffer goes
  679                  * away and is reconstituted into a B_CACHE state ( due to
  680                  * being VMIO ) later.  So we keep track of the directory eof
  681                  * in np->n_direofoffset and chop it off as an extra step
  682                  * right here.
  683                  */
  684                 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
  685                 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
  686                         n = np->n_direofoffset - uio->uio_offset;
  687                 break;
  688             default:
  689                 printf(" nfs_bioread: type %x unexpected\n", vp->v_type);
  690                 break;
  691             };
  692 
  693             if (n > 0) {
  694                     error = uiomove(bp->b_data + on, (int)n, uio);
  695             }
  696             switch (vp->v_type) {
  697             case VREG:
  698                 break;
  699             case VLNK:
  700                 n = 0;
  701                 break;
  702             case VDIR:
  703                 break;
  704             default:
  705                 printf(" nfs_bioread: type %x unexpected\n", vp->v_type);
  706             }
  707             brelse(bp);
  708         } while (error == 0 && uio->uio_resid > 0 && n > 0);
  709         return (error);
  710 }
  711 
  712 /*
  713  * Vnode op for write using bio
  714  */
  715 int
  716 nfs_write(struct vop_write_args *ap)
  717 {
  718         int biosize;
  719         struct uio *uio = ap->a_uio;
  720         struct thread *td = uio->uio_td;
  721         struct vnode *vp = ap->a_vp;
  722         struct nfsnode *np = VTONFS(vp);
  723         struct ucred *cred = ap->a_cred;
  724         int ioflag = ap->a_ioflag;
  725         struct buf *bp;
  726         struct vattr vattr;
  727         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
  728         daddr_t lbn;
  729         int bcount;
  730         int n, on, error = 0;
  731         int haverslock = 0;
  732         struct proc *p = td?td->td_proc:NULL;
  733 
  734         GIANT_REQUIRED;
  735 
  736 #ifdef DIAGNOSTIC
  737         if (uio->uio_rw != UIO_WRITE)
  738                 panic("nfs_write mode");
  739         if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
  740                 panic("nfs_write proc");
  741 #endif
  742         if (vp->v_type != VREG)
  743                 return (EIO);
  744         if (np->n_flag & NWRITEERR) {
  745                 np->n_flag &= ~NWRITEERR;
  746                 return (np->n_error);
  747         }
  748         if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
  749             (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
  750                 (void)nfs_fsinfo(nmp, vp, cred, td);
  751 
  752         /*
  753          * Synchronously flush pending buffers if we are in synchronous
  754          * mode or if we are appending.
  755          */
  756         if (ioflag & (IO_APPEND | IO_SYNC)) {
  757                 if (np->n_flag & NMODIFIED) {
  758                         np->n_attrstamp = 0;
  759                         error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
  760                         if (error)
  761                                 return (error);
  762                 }
  763         }
  764 
  765         /*
  766          * If IO_APPEND then load uio_offset.  We restart here if we cannot
  767          * get the append lock.
  768          */
  769 restart:
  770         if (ioflag & IO_APPEND) {
  771                 np->n_attrstamp = 0;
  772                 error = VOP_GETATTR(vp, &vattr, cred, td);
  773                 if (error)
  774                         return (error);
  775                 uio->uio_offset = np->n_size;
  776         }
  777 
  778         if (uio->uio_offset < 0)
  779                 return (EINVAL);
  780         if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
  781                 return (EFBIG);
  782         if (uio->uio_resid == 0)
  783                 return (0);
  784 
  785         /*
  786          * We need to obtain the rslock if we intend to modify np->n_size
  787          * in order to guarentee the append point with multiple contending
  788          * writers, to guarentee that no other appenders modify n_size
  789          * while we are trying to obtain a truncated buffer (i.e. to avoid
  790          * accidently truncating data written by another appender due to
  791          * the race), and to ensure that the buffer is populated prior to
  792          * our extending of the file.  We hold rslock through the entire
  793          * operation.
  794          *
  795          * Note that we do not synchronize the case where someone truncates
  796          * the file while we are appending to it because attempting to lock
  797          * this case may deadlock other parts of the system unexpectedly.
  798          */
  799         if ((ioflag & IO_APPEND) ||
  800             uio->uio_offset + uio->uio_resid > np->n_size) {
  801                 switch(nfs_rslock(np, td)) {
  802                 case ENOLCK:
  803                         goto restart;
  804                         /* not reached */
  805                 case EIO:
  806                         return (EIO);
  807                 case EINTR:
  808                 case ERESTART:
  809                         return(EINTR);
  810                         /* not reached */
  811                 default:
  812                         break;
  813                 }
  814                 haverslock = 1;
  815         }
  816 
  817         /*
  818          * Maybe this should be above the vnode op call, but so long as
  819          * file servers have no limits, i don't think it matters
  820          */
  821         if (p != NULL) {
  822                 PROC_LOCK(p);
  823                 if (uio->uio_offset + uio->uio_resid >
  824                     lim_cur(p, RLIMIT_FSIZE)) {
  825                         psignal(p, SIGXFSZ);
  826                         PROC_UNLOCK(p);
  827                         if (haverslock)
  828                                 nfs_rsunlock(np, td);
  829                         return (EFBIG);
  830                 }
  831                 PROC_UNLOCK(p);
  832         }
  833 
  834         biosize = vp->v_mount->mnt_stat.f_iosize;
  835 
  836         do {
  837                 nfsstats.biocache_writes++;
  838                 lbn = uio->uio_offset / biosize;
  839                 on = uio->uio_offset & (biosize-1);
  840                 n = min((unsigned)(biosize - on), uio->uio_resid);
  841 again:
  842                 /*
  843                  * Handle direct append and file extension cases, calculate
  844                  * unaligned buffer size.
  845                  */
  846 
  847                 if (uio->uio_offset == np->n_size && n) {
  848                         /*
  849                          * Get the buffer (in its pre-append state to maintain
  850                          * B_CACHE if it was previously set).  Resize the
  851                          * nfsnode after we have locked the buffer to prevent
  852                          * readers from reading garbage.
  853                          */
  854                         bcount = on;
  855                         bp = nfs_getcacheblk(vp, lbn, bcount, td);
  856 
  857                         if (bp != NULL) {
  858                                 long save;
  859 
  860                                 np->n_size = uio->uio_offset + n;
  861                                 np->n_flag |= NMODIFIED;
  862                                 vnode_pager_setsize(vp, np->n_size);
  863 
  864                                 save = bp->b_flags & B_CACHE;
  865                                 bcount += n;
  866                                 allocbuf(bp, bcount);
  867                                 bp->b_flags |= save;
  868                                 bp->b_magic = B_MAGIC_NFS;
  869                                 if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
  870                                         bp->b_op = &buf_ops_nfs4;
  871                                 else
  872                                         bp->b_op = &buf_ops_nfs;
  873                         }
  874                 } else {
  875                         /*
  876                          * Obtain the locked cache block first, and then
  877                          * adjust the file's size as appropriate.
  878                          */
  879                         bcount = on + n;
  880                         if ((off_t)lbn * biosize + bcount < np->n_size) {
  881                                 if ((off_t)(lbn + 1) * biosize < np->n_size)
  882                                         bcount = biosize;
  883                                 else
  884                                         bcount = np->n_size - (off_t)lbn * biosize;
  885                         }
  886                         bp = nfs_getcacheblk(vp, lbn, bcount, td);
  887                         if (uio->uio_offset + n > np->n_size) {
  888                                 np->n_size = uio->uio_offset + n;
  889                                 np->n_flag |= NMODIFIED;
  890                                 vnode_pager_setsize(vp, np->n_size);
  891                         }
  892                 }
  893 
  894                 if (!bp) {
  895                         error = nfs_sigintr(nmp, NULL, td);
  896                         if (!error)
  897                                 error = EINTR;
  898                         break;
  899                 }
  900 
  901                 /*
  902                  * Issue a READ if B_CACHE is not set.  In special-append
  903                  * mode, B_CACHE is based on the buffer prior to the write
  904                  * op and is typically set, avoiding the read.  If a read
  905                  * is required in special append mode, the server will
  906                  * probably send us a short-read since we extended the file
  907                  * on our end, resulting in b_resid == 0 and, thusly,
  908                  * B_CACHE getting set.
  909                  *
  910                  * We can also avoid issuing the read if the write covers
  911                  * the entire buffer.  We have to make sure the buffer state
  912                  * is reasonable in this case since we will not be initiating
  913                  * I/O.  See the comments in kern/vfs_bio.c's getblk() for
  914                  * more information.
  915                  *
  916                  * B_CACHE may also be set due to the buffer being cached
  917                  * normally.
  918                  */
  919 
  920                 if (on == 0 && n == bcount) {
  921                         bp->b_flags |= B_CACHE;
  922                         bp->b_flags &= ~B_INVAL;
  923                         bp->b_ioflags &= ~BIO_ERROR;
  924                 }
  925 
  926                 if ((bp->b_flags & B_CACHE) == 0) {
  927                         bp->b_iocmd = BIO_READ;
  928                         vfs_busy_pages(bp, 0);
  929                         error = nfs_doio(bp, cred, td);
  930                         if (error) {
  931                                 brelse(bp);
  932                                 break;
  933                         }
  934                 }
  935                 if (!bp) {
  936                         error = nfs_sigintr(nmp, NULL, td);
  937                         if (!error)
  938                                 error = EINTR;
  939                         break;
  940                 }
  941                 if (bp->b_wcred == NOCRED)
  942                         bp->b_wcred = crhold(cred);
  943                 np->n_flag |= NMODIFIED;
  944 
  945                 /*
  946                  * If dirtyend exceeds file size, chop it down.  This should
  947                  * not normally occur but there is an append race where it
  948                  * might occur XXX, so we log it.
  949                  *
  950                  * If the chopping creates a reverse-indexed or degenerate
  951                  * situation with dirtyoff/end, we 0 both of them.
  952                  */
  953 
  954                 if (bp->b_dirtyend > bcount) {
  955                         printf("NFS append race @%lx:%d\n",
  956                             (long)bp->b_blkno * DEV_BSIZE,
  957                             bp->b_dirtyend - bcount);
  958                         bp->b_dirtyend = bcount;
  959                 }
  960 
  961                 if (bp->b_dirtyoff >= bp->b_dirtyend)
  962                         bp->b_dirtyoff = bp->b_dirtyend = 0;
  963 
  964                 /*
  965                  * If the new write will leave a contiguous dirty
  966                  * area, just update the b_dirtyoff and b_dirtyend,
  967                  * otherwise force a write rpc of the old dirty area.
  968                  *
  969                  * While it is possible to merge discontiguous writes due to
  970                  * our having a B_CACHE buffer ( and thus valid read data
  971                  * for the hole), we don't because it could lead to
  972                  * significant cache coherency problems with multiple clients,
  973                  * especially if locking is implemented later on.
  974                  *
  975                  * as an optimization we could theoretically maintain
  976                  * a linked list of discontinuous areas, but we would still
  977                  * have to commit them separately so there isn't much
  978                  * advantage to it except perhaps a bit of asynchronization.
  979                  */
  980 
  981                 if (bp->b_dirtyend > 0 &&
  982                     (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
  983                         if (bwrite(bp) == EINTR) {
  984                                 error = EINTR;
  985                                 break;
  986                         }
  987                         goto again;
  988                 }
  989 
  990                 error = uiomove((char *)bp->b_data + on, n, uio);
  991 
  992                 /*
  993                  * Since this block is being modified, it must be written
  994                  * again and not just committed.  Since write clustering does
  995                  * not work for the stage 1 data write, only the stage 2
  996                  * commit rpc, we have to clear B_CLUSTEROK as well.
  997                  */
  998                 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
  999 
 1000                 if (error) {
 1001                         bp->b_ioflags |= BIO_ERROR;
 1002                         brelse(bp);
 1003                         break;
 1004                 }
 1005 
 1006                 /*
 1007                  * Only update dirtyoff/dirtyend if not a degenerate
 1008                  * condition.
 1009                  */
 1010                 if (n) {
 1011                         if (bp->b_dirtyend > 0) {
 1012                                 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
 1013                                 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
 1014                         } else {
 1015                                 bp->b_dirtyoff = on;
 1016                                 bp->b_dirtyend = on + n;
 1017                         }
 1018                         vfs_bio_set_validclean(bp, on, n);
 1019                 }
 1020 
 1021                 /*
 1022                  * If IO_SYNC do bwrite().
 1023                  *
 1024                  * IO_INVAL appears to be unused.  The idea appears to be
 1025                  * to turn off caching in this case.  Very odd.  XXX
 1026                  */
 1027                 if ((ioflag & IO_SYNC)) {
 1028                         if (ioflag & IO_INVAL)
 1029                                 bp->b_flags |= B_NOCACHE;
 1030                         error = bwrite(bp);
 1031                         if (error)
 1032                                 break;
 1033                 } else if ((n + on) == biosize) {
 1034                         bp->b_flags |= B_ASYNC;
 1035                         (void) (nmp->nm_rpcops->nr_writebp)(bp, 0, 0);
 1036                 } else {
 1037                         bdwrite(bp);
 1038                 }
 1039         } while (uio->uio_resid > 0 && n > 0);
 1040 
 1041         if (haverslock)
 1042                 nfs_rsunlock(np, td);
 1043 
 1044         return (error);
 1045 }
 1046 
 1047 /*
 1048  * Get an nfs cache block.
 1049  *
 1050  * Allocate a new one if the block isn't currently in the cache
 1051  * and return the block marked busy. If the calling process is
 1052  * interrupted by a signal for an interruptible mount point, return
 1053  * NULL.
 1054  *
 1055  * The caller must carefully deal with the possible B_INVAL state of
 1056  * the buffer.  nfs_doio() clears B_INVAL (and nfs_asyncio() clears it
 1057  * indirectly), so synchronous reads can be issued without worrying about
 1058  * the B_INVAL state.  We have to be a little more careful when dealing
 1059  * with writes (see comments in nfs_write()) when extending a file past
 1060  * its EOF.
 1061  */
 1062 static struct buf *
 1063 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
 1064 {
 1065         struct buf *bp;
 1066         struct mount *mp;
 1067         struct nfsmount *nmp;
 1068 
 1069         mp = vp->v_mount;
 1070         nmp = VFSTONFS(mp);
 1071 
 1072         if (nmp->nm_flag & NFSMNT_INT) {
 1073                 bp = getblk(vp, bn, size, PCATCH, 0, 0);
 1074                 while (bp == NULL) {
 1075                         if (nfs_sigintr(nmp, NULL, td))
 1076                                 return (NULL);
 1077                         bp = getblk(vp, bn, size, 0, 2 * hz, 0);
 1078                 }
 1079         } else {
 1080                 bp = getblk(vp, bn, size, 0, 0, 0);
 1081         }
 1082 
 1083         if (vp->v_type == VREG) {
 1084                 int biosize;
 1085 
 1086                 biosize = mp->mnt_stat.f_iosize;
 1087                 bp->b_blkno = bn * (biosize / DEV_BSIZE);
 1088         }
 1089         return (bp);
 1090 }
 1091 
 1092 /*
 1093  * Flush and invalidate all dirty buffers. If another process is already
 1094  * doing the flush, just wait for completion.
 1095  */
 1096 int
 1097 nfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred,
 1098     struct thread *td, int intrflg)
 1099 {
 1100         struct nfsnode *np = VTONFS(vp);
 1101         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 1102         int error = 0, slpflag, slptimeo;
 1103 
 1104         ASSERT_VOP_LOCKED(vp, "nfs_vinvalbuf");
 1105 
 1106         /*
 1107          * XXX This check stops us from needlessly doing a vinvalbuf when
 1108          * being called through vclean().  It is not clear that this is
 1109          * unsafe.
 1110          */
 1111         if (vp->v_iflag & VI_XLOCK)
 1112                 return (0);
 1113 
 1114         if ((nmp->nm_flag & NFSMNT_INT) == 0)
 1115                 intrflg = 0;
 1116         if (intrflg) {
 1117                 slpflag = PCATCH;
 1118                 slptimeo = 2 * hz;
 1119         } else {
 1120                 slpflag = 0;
 1121                 slptimeo = 0;
 1122         }
 1123         /*
 1124          * First wait for any other process doing a flush to complete.
 1125          */
 1126         while (np->n_flag & NFLUSHINPROG) {
 1127                 np->n_flag |= NFLUSHWANT;
 1128                 error = tsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
 1129                         slptimeo);
 1130                 if (error && intrflg &&
 1131                     nfs_sigintr(nmp, NULL, td))
 1132                         return (EINTR);
 1133         }
 1134 
 1135         /*
 1136          * Now, flush as required.
 1137          */
 1138         np->n_flag |= NFLUSHINPROG;
 1139         error = vinvalbuf(vp, flags, cred, td, slpflag, 0);
 1140         while (error) {
 1141                 if (intrflg && (error = nfs_sigintr(nmp, NULL, td))) {
 1142                         np->n_flag &= ~NFLUSHINPROG;
 1143                         if (np->n_flag & NFLUSHWANT) {
 1144                                 np->n_flag &= ~NFLUSHWANT;
 1145                                 wakeup(&np->n_flag);
 1146                         }
 1147                         return (error);
 1148                 }
 1149                 error = vinvalbuf(vp, flags, cred, td, 0, slptimeo);
 1150         }
 1151         np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
 1152         if (np->n_flag & NFLUSHWANT) {
 1153                 np->n_flag &= ~NFLUSHWANT;
 1154                 wakeup(&np->n_flag);
 1155         }
 1156         return (0);
 1157 }
 1158 
 1159 /*
 1160  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
 1161  * This is mainly to avoid queueing async I/O requests when the nfsiods
 1162  * are all hung on a dead server.
 1163  *
 1164  * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
 1165  * is eventually dequeued by the async daemon, nfs_doio() *will*.
 1166  */
 1167 int
 1168 nfs_asyncio(struct buf *bp, struct ucred *cred, struct thread *td)
 1169 {
 1170         struct nfsmount *nmp;
 1171         int iod;
 1172         int gotiod;
 1173         int slpflag = 0;
 1174         int slptimeo = 0;
 1175         int error, error2;
 1176 
 1177         nmp = VFSTONFS(bp->b_vp->v_mount);
 1178 
 1179         /*
 1180          * Commits are usually short and sweet so lets save some cpu and
 1181          * leave the async daemons for more important rpc's (such as reads
 1182          * and writes).
 1183          */
 1184         if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
 1185             (nmp->nm_bufqiods > nfs_numasync / 2)) {
 1186                 return(EIO);
 1187         }
 1188 
 1189 again:
 1190         if (nmp->nm_flag & NFSMNT_INT)
 1191                 slpflag = PCATCH;
 1192         gotiod = FALSE;
 1193 
 1194         /*
 1195          * Find a free iod to process this request.
 1196          */
 1197         for (iod = 0; iod < nfs_numasync; iod++)
 1198                 if (nfs_iodwant[iod]) {
 1199                         gotiod = TRUE;
 1200                         break;
 1201                 }
 1202 
 1203         /*
 1204          * Try to create one if none are free.
 1205          */
 1206         if (!gotiod) {
 1207                 iod = nfs_nfsiodnew();
 1208                 if (iod != -1)
 1209                         gotiod = TRUE;
 1210         }
 1211 
 1212         if (gotiod) {
 1213                 /*
 1214                  * Found one, so wake it up and tell it which
 1215                  * mount to process.
 1216                  */
 1217                 NFS_DPF(ASYNCIO, ("nfs_asyncio: waking iod %d for mount %p\n",
 1218                     iod, nmp));
 1219                 nfs_iodwant[iod] = NULL;
 1220                 nfs_iodmount[iod] = nmp;
 1221                 nmp->nm_bufqiods++;
 1222                 wakeup(&nfs_iodwant[iod]);
 1223         }
 1224 
 1225         /*
 1226          * If none are free, we may already have an iod working on this mount
 1227          * point.  If so, it will process our request.
 1228          */
 1229         if (!gotiod) {
 1230                 if (nmp->nm_bufqiods > 0) {
 1231                         NFS_DPF(ASYNCIO,
 1232                                 ("nfs_asyncio: %d iods are already processing mount %p\n",
 1233                                  nmp->nm_bufqiods, nmp));
 1234                         gotiod = TRUE;
 1235                 }
 1236         }
 1237 
 1238         /*
 1239          * If we have an iod which can process the request, then queue
 1240          * the buffer.
 1241          */
 1242         if (gotiod) {
 1243                 /*
 1244                  * Ensure that the queue never grows too large.  We still want
 1245                  * to asynchronize so we block rather then return EIO.
 1246                  */
 1247                 while (nmp->nm_bufqlen >= 2*nfs_numasync) {
 1248                         NFS_DPF(ASYNCIO,
 1249                                 ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp));
 1250                         nmp->nm_bufqwant = TRUE;
 1251                         error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO,
 1252                                        "nfsaio", slptimeo);
 1253                         if (error) {
 1254                                 error2 = nfs_sigintr(nmp, NULL, td);
 1255                                 if (error2)
 1256                                         return (error2);
 1257                                 if (slpflag == PCATCH) {
 1258                                         slpflag = 0;
 1259                                         slptimeo = 2 * hz;
 1260                                 }
 1261                         }
 1262                         /*
 1263                          * We might have lost our iod while sleeping,
 1264                          * so check and loop if nescessary.
 1265                          */
 1266                         if (nmp->nm_bufqiods == 0) {
 1267                                 NFS_DPF(ASYNCIO,
 1268                                         ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
 1269                                 goto again;
 1270                         }
 1271                 }
 1272 
 1273                 if (bp->b_iocmd == BIO_READ) {
 1274                         if (bp->b_rcred == NOCRED && cred != NOCRED)
 1275                                 bp->b_rcred = crhold(cred);
 1276                 } else {
 1277                         bp->b_flags |= B_WRITEINPROG;
 1278                         if (bp->b_wcred == NOCRED && cred != NOCRED)
 1279                                 bp->b_wcred = crhold(cred);
 1280                 }
 1281 
 1282                 BUF_KERNPROC(bp);
 1283                 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
 1284                 nmp->nm_bufqlen++;
 1285                 return (0);
 1286         }
 1287 
 1288         /*
 1289          * All the iods are busy on other mounts, so return EIO to
 1290          * force the caller to process the i/o synchronously.
 1291          */
 1292         NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n"));
 1293         return (EIO);
 1294 }
 1295 
 1296 /*
 1297  * Do an I/O operation to/from a cache block. This may be called
 1298  * synchronously or from an nfsiod.
 1299  */
 1300 int
 1301 nfs_doio(struct buf *bp, struct ucred *cr, struct thread *td)
 1302 {
 1303         struct uio *uiop;
 1304         struct vnode *vp;
 1305         struct nfsnode *np;
 1306         struct nfsmount *nmp;
 1307         int error = 0, iomode, must_commit = 0;
 1308         struct uio uio;
 1309         struct iovec io;
 1310         struct proc *p = td ? td->td_proc : NULL;
 1311 
 1312         vp = bp->b_vp;
 1313         np = VTONFS(vp);
 1314         nmp = VFSTONFS(vp->v_mount);
 1315         uiop = &uio;
 1316         uiop->uio_iov = &io;
 1317         uiop->uio_iovcnt = 1;
 1318         uiop->uio_segflg = UIO_SYSSPACE;
 1319         uiop->uio_td = td;
 1320 
 1321         /*
 1322          * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
 1323          * do this here so we do not have to do it in all the code that
 1324          * calls us.
 1325          */
 1326         bp->b_flags &= ~B_INVAL;
 1327         bp->b_ioflags &= ~BIO_ERROR;
 1328 
 1329         KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp));
 1330 
 1331         if (bp->b_iocmd == BIO_READ) {
 1332             io.iov_len = uiop->uio_resid = bp->b_bcount;
 1333             io.iov_base = bp->b_data;
 1334             uiop->uio_rw = UIO_READ;
 1335 
 1336             switch (vp->v_type) {
 1337             case VREG:
 1338                 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
 1339                 nfsstats.read_bios++;
 1340                 error = (nmp->nm_rpcops->nr_readrpc)(vp, uiop, cr);
 1341 
 1342                 if (!error) {
 1343                     if (uiop->uio_resid) {
 1344                         /*
 1345                          * If we had a short read with no error, we must have
 1346                          * hit a file hole.  We should zero-fill the remainder.
 1347                          * This can also occur if the server hits the file EOF.
 1348                          *
 1349                          * Holes used to be able to occur due to pending
 1350                          * writes, but that is not possible any longer.
 1351                          */
 1352                         int nread = bp->b_bcount - uiop->uio_resid;
 1353                         int left  = uiop->uio_resid;
 1354 
 1355                         if (left > 0)
 1356                                 bzero((char *)bp->b_data + nread, left);
 1357                         uiop->uio_resid = 0;
 1358                     }
 1359                 }
 1360                 /* ASSERT_VOP_LOCKED(vp, "nfs_doio"); */
 1361                 if (p && (vp->v_vflag & VV_TEXT) &&
 1362                         (np->n_mtime != np->n_vattr.va_mtime.tv_sec)) {
 1363                         PROC_LOCK(p);
 1364                         killproc(p, "text file modification");
 1365                         PROC_UNLOCK(p);
 1366                 }
 1367                 break;
 1368             case VLNK:
 1369                 uiop->uio_offset = (off_t)0;
 1370                 nfsstats.readlink_bios++;
 1371                 error = (nmp->nm_rpcops->nr_readlinkrpc)(vp, uiop, cr);
 1372                 break;
 1373             case VDIR:
 1374                 nfsstats.readdir_bios++;
 1375                 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
 1376                 if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
 1377                         error = nfs4_readdirrpc(vp, uiop, cr);
 1378                 else {
 1379                         if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
 1380                                 error = nfs_readdirplusrpc(vp, uiop, cr);
 1381                                 if (error == NFSERR_NOTSUPP)
 1382                                         nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
 1383                         }
 1384                         if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
 1385                                 error = nfs_readdirrpc(vp, uiop, cr);
 1386                 }
 1387                 /*
 1388                  * end-of-directory sets B_INVAL but does not generate an
 1389                  * error.
 1390                  */
 1391                 if (error == 0 && uiop->uio_resid == bp->b_bcount)
 1392                         bp->b_flags |= B_INVAL;
 1393                 break;
 1394             default:
 1395                 printf("nfs_doio:  type %x unexpected\n", vp->v_type);
 1396                 break;
 1397             };
 1398             if (error) {
 1399                 bp->b_ioflags |= BIO_ERROR;
 1400                 bp->b_error = error;
 1401             }
 1402         } else {
 1403             /*
 1404              * If we only need to commit, try to commit
 1405              */
 1406             if (bp->b_flags & B_NEEDCOMMIT) {
 1407                     int retv;
 1408                     off_t off;
 1409 
 1410                     off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
 1411                     bp->b_flags |= B_WRITEINPROG;
 1412                     retv = (nmp->nm_rpcops->nr_commit)(
 1413                                 bp->b_vp, off, bp->b_dirtyend-bp->b_dirtyoff,
 1414                                 bp->b_wcred, td);
 1415                     bp->b_flags &= ~B_WRITEINPROG;
 1416                     if (retv == 0) {
 1417                             bp->b_dirtyoff = bp->b_dirtyend = 0;
 1418                             bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
 1419                             bp->b_resid = 0;
 1420                             bufdone(bp);
 1421                             return (0);
 1422                     }
 1423                     if (retv == NFSERR_STALEWRITEVERF) {
 1424                             nfs_clearcommit(bp->b_vp->v_mount);
 1425                     }
 1426             }
 1427 
 1428             /*
 1429              * Setup for actual write
 1430              */
 1431 
 1432             if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
 1433                 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
 1434 
 1435             if (bp->b_dirtyend > bp->b_dirtyoff) {
 1436                 io.iov_len = uiop->uio_resid = bp->b_dirtyend
 1437                     - bp->b_dirtyoff;
 1438                 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
 1439                     + bp->b_dirtyoff;
 1440                 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
 1441                 uiop->uio_rw = UIO_WRITE;
 1442                 nfsstats.write_bios++;
 1443 
 1444                 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
 1445                     iomode = NFSV3WRITE_UNSTABLE;
 1446                 else
 1447                     iomode = NFSV3WRITE_FILESYNC;
 1448 
 1449                 bp->b_flags |= B_WRITEINPROG;
 1450                 error = (nmp->nm_rpcops->nr_writerpc)(vp, uiop, cr, &iomode, &must_commit);
 1451 
 1452                 /*
 1453                  * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
 1454                  * to cluster the buffers needing commit.  This will allow
 1455                  * the system to submit a single commit rpc for the whole
 1456                  * cluster.  We can do this even if the buffer is not 100%
 1457                  * dirty (relative to the NFS blocksize), so we optimize the
 1458                  * append-to-file-case.
 1459                  *
 1460                  * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
 1461                  * cleared because write clustering only works for commit
 1462                  * rpc's, not for the data portion of the write).
 1463                  */
 1464 
 1465                 if (!error && iomode == NFSV3WRITE_UNSTABLE) {
 1466                     bp->b_flags |= B_NEEDCOMMIT;
 1467                     if (bp->b_dirtyoff == 0
 1468                         && bp->b_dirtyend == bp->b_bcount)
 1469                         bp->b_flags |= B_CLUSTEROK;
 1470                 } else {
 1471                     bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
 1472                 }
 1473                 bp->b_flags &= ~B_WRITEINPROG;
 1474 
 1475                 /*
 1476                  * For an interrupted write, the buffer is still valid
 1477                  * and the write hasn't been pushed to the server yet,
 1478                  * so we can't set BIO_ERROR and report the interruption
 1479                  * by setting B_EINTR. For the B_ASYNC case, B_EINTR
 1480                  * is not relevant, so the rpc attempt is essentially
 1481                  * a noop.  For the case of a V3 write rpc not being
 1482                  * committed to stable storage, the block is still
 1483                  * dirty and requires either a commit rpc or another
 1484                  * write rpc with iomode == NFSV3WRITE_FILESYNC before
 1485                  * the block is reused. This is indicated by setting
 1486                  * the B_DELWRI and B_NEEDCOMMIT flags.
 1487                  *
 1488                  * If the buffer is marked B_PAGING, it does not reside on
 1489                  * the vp's paging queues so we cannot call bdirty().  The
 1490                  * bp in this case is not an NFS cache block so we should
 1491                  * be safe. XXX
 1492                  */
 1493                 if (error == EINTR || error == EIO
 1494                     || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
 1495                         int s;
 1496 
 1497                         s = splbio();
 1498                         bp->b_flags &= ~(B_INVAL|B_NOCACHE);
 1499                         if ((bp->b_flags & B_PAGING) == 0) {
 1500                             bdirty(bp);
 1501                             bp->b_flags &= ~B_DONE;
 1502                         }
 1503                         if (error && (bp->b_flags & B_ASYNC) == 0)
 1504                             bp->b_flags |= B_EINTR;
 1505                         splx(s);
 1506                 } else {
 1507                     if (error) {
 1508                         bp->b_ioflags |= BIO_ERROR;
 1509                         bp->b_error = np->n_error = error;
 1510                         np->n_flag |= NWRITEERR;
 1511                     }
 1512                     bp->b_dirtyoff = bp->b_dirtyend = 0;
 1513                 }
 1514             } else {
 1515                 bp->b_resid = 0;
 1516                 bufdone(bp);
 1517                 return (0);
 1518             }
 1519         }
 1520         bp->b_resid = uiop->uio_resid;
 1521         if (must_commit)
 1522             nfs_clearcommit(vp->v_mount);
 1523         bufdone(bp);
 1524         return (error);
 1525 }
 1526 
 1527 /*
 1528  * Used to aid in handling ftruncate() operations on the NFS client side.
 1529  * Truncation creates a number of special problems for NFS.  We have to
 1530  * throw away VM pages and buffer cache buffers that are beyond EOF, and
 1531  * we have to properly handle VM pages or (potentially dirty) buffers
 1532  * that straddle the truncation point.
 1533  */
 1534 
 1535 int
 1536 nfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
 1537 {
 1538         struct nfsnode *np = VTONFS(vp);
 1539         u_quad_t tsize = np->n_size;
 1540         int biosize = vp->v_mount->mnt_stat.f_iosize;
 1541         int error = 0;
 1542 
 1543         np->n_size = nsize;
 1544 
 1545         if (np->n_size < tsize) {
 1546                 struct buf *bp;
 1547                 daddr_t lbn;
 1548                 int bufsize;
 1549 
 1550                 /*
 1551                  * vtruncbuf() doesn't get the buffer overlapping the 
 1552                  * truncation point.  We may have a B_DELWRI and/or B_CACHE
 1553                  * buffer that now needs to be truncated.
 1554                  */
 1555                 error = vtruncbuf(vp, cred, td, nsize, biosize);
 1556                 lbn = nsize / biosize;
 1557                 bufsize = nsize & (biosize - 1);
 1558                 bp = nfs_getcacheblk(vp, lbn, bufsize, td);
 1559                 if (bp->b_dirtyoff > bp->b_bcount)
 1560                         bp->b_dirtyoff = bp->b_bcount;
 1561                 if (bp->b_dirtyend > bp->b_bcount)
 1562                         bp->b_dirtyend = bp->b_bcount;
 1563                 bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
 1564                 brelse(bp);
 1565         } else {
 1566                 vnode_pager_setsize(vp, nsize);
 1567         }
 1568         return(error);
 1569 }
 1570 

Cache object: 1a365477ae952fa29f96e01dc39e3728


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.