The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/nfsclient/nfs_vnops.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * Rick Macklem at The University of Guelph.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/9.0/sys/nfsclient/nfs_vnops.c 224733 2011-08-09 15:29:58Z jhb $");
   37 
   38 /*
   39  * vnode op calls for Sun NFS version 2 and 3
   40  */
   41 
   42 #include "opt_inet.h"
   43 #include "opt_kdtrace.h"
   44 
   45 #include <sys/param.h>
   46 #include <sys/kernel.h>
   47 #include <sys/systm.h>
   48 #include <sys/resourcevar.h>
   49 #include <sys/proc.h>
   50 #include <sys/mount.h>
   51 #include <sys/bio.h>
   52 #include <sys/buf.h>
   53 #include <sys/jail.h>
   54 #include <sys/malloc.h>
   55 #include <sys/mbuf.h>
   56 #include <sys/namei.h>
   57 #include <sys/socket.h>
   58 #include <sys/vnode.h>
   59 #include <sys/dirent.h>
   60 #include <sys/fcntl.h>
   61 #include <sys/lockf.h>
   62 #include <sys/stat.h>
   63 #include <sys/sysctl.h>
   64 #include <sys/signalvar.h>
   65 
   66 #include <vm/vm.h>
   67 #include <vm/vm_extern.h>
   68 #include <vm/vm_object.h>
   69 
   70 #include <fs/fifofs/fifo.h>
   71 
   72 #include <nfs/nfsproto.h>
   73 #include <nfsclient/nfs.h>
   74 #include <nfsclient/nfsnode.h>
   75 #include <nfsclient/nfsmount.h>
   76 #include <nfs/nfs_kdtrace.h>
   77 #include <nfs/nfs_lock.h>
   78 #include <nfs/xdr_subs.h>
   79 #include <nfsclient/nfsm_subs.h>
   80 
   81 #include <net/if.h>
   82 #include <netinet/in.h>
   83 #include <netinet/in_var.h>
   84 
   85 #include <machine/stdarg.h>
   86 
   87 #ifdef KDTRACE_HOOKS
   88 #include <sys/dtrace_bsd.h>
   89 
   90 dtrace_nfsclient_accesscache_flush_probe_func_t
   91     dtrace_nfsclient_accesscache_flush_done_probe;
   92 uint32_t nfsclient_accesscache_flush_done_id;
   93 
   94 dtrace_nfsclient_accesscache_get_probe_func_t
   95     dtrace_nfsclient_accesscache_get_hit_probe,
   96     dtrace_nfsclient_accesscache_get_miss_probe;
   97 uint32_t nfsclient_accesscache_get_hit_id;
   98 uint32_t nfsclient_accesscache_get_miss_id;
   99 
  100 dtrace_nfsclient_accesscache_load_probe_func_t
  101     dtrace_nfsclient_accesscache_load_done_probe;
  102 uint32_t nfsclient_accesscache_load_done_id;
  103 #endif /* !KDTRACE_HOOKS */
  104 
  105 /* Defs */
  106 #define TRUE    1
  107 #define FALSE   0
  108 
  109 /*
  110  * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
  111  * calls are not in getblk() and brelse() so that they would not be necessary
  112  * here.
  113  */
  114 #ifndef B_VMIO
  115 #define vfs_busy_pages(bp, f)
  116 #endif
  117 
  118 static vop_read_t       nfsfifo_read;
  119 static vop_write_t      nfsfifo_write;
  120 static vop_close_t      nfsfifo_close;
  121 static int      nfs_flush(struct vnode *, int, int);
  122 static int      nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *);
  123 static vop_lookup_t     nfs_lookup;
  124 static vop_create_t     nfs_create;
  125 static vop_mknod_t      nfs_mknod;
  126 static vop_open_t       nfs_open;
  127 static vop_close_t      nfs_close;
  128 static vop_access_t     nfs_access;
  129 static vop_getattr_t    nfs_getattr;
  130 static vop_setattr_t    nfs_setattr;
  131 static vop_read_t       nfs_read;
  132 static vop_fsync_t      nfs_fsync;
  133 static vop_remove_t     nfs_remove;
  134 static vop_link_t       nfs_link;
  135 static vop_rename_t     nfs_rename;
  136 static vop_mkdir_t      nfs_mkdir;
  137 static vop_rmdir_t      nfs_rmdir;
  138 static vop_symlink_t    nfs_symlink;
  139 static vop_readdir_t    nfs_readdir;
  140 static vop_strategy_t   nfs_strategy;
  141 static  int     nfs_lookitup(struct vnode *, const char *, int,
  142                     struct ucred *, struct thread *, struct nfsnode **);
  143 static  int     nfs_sillyrename(struct vnode *, struct vnode *,
  144                     struct componentname *);
  145 static vop_access_t     nfsspec_access;
  146 static vop_readlink_t   nfs_readlink;
  147 static vop_print_t      nfs_print;
  148 static vop_advlock_t    nfs_advlock;
  149 static vop_advlockasync_t nfs_advlockasync;
  150 
  151 /*
  152  * Global vfs data structures for nfs
  153  */
  154 struct vop_vector nfs_vnodeops = {
  155         .vop_default =          &default_vnodeops,
  156         .vop_access =           nfs_access,
  157         .vop_advlock =          nfs_advlock,
  158         .vop_advlockasync =     nfs_advlockasync,
  159         .vop_close =            nfs_close,
  160         .vop_create =           nfs_create,
  161         .vop_fsync =            nfs_fsync,
  162         .vop_getattr =          nfs_getattr,
  163         .vop_getpages =         nfs_getpages,
  164         .vop_putpages =         nfs_putpages,
  165         .vop_inactive =         nfs_inactive,
  166         .vop_link =             nfs_link,
  167         .vop_lookup =           nfs_lookup,
  168         .vop_mkdir =            nfs_mkdir,
  169         .vop_mknod =            nfs_mknod,
  170         .vop_open =             nfs_open,
  171         .vop_print =            nfs_print,
  172         .vop_read =             nfs_read,
  173         .vop_readdir =          nfs_readdir,
  174         .vop_readlink =         nfs_readlink,
  175         .vop_reclaim =          nfs_reclaim,
  176         .vop_remove =           nfs_remove,
  177         .vop_rename =           nfs_rename,
  178         .vop_rmdir =            nfs_rmdir,
  179         .vop_setattr =          nfs_setattr,
  180         .vop_strategy =         nfs_strategy,
  181         .vop_symlink =          nfs_symlink,
  182         .vop_write =            nfs_write,
  183 };
  184 
  185 struct vop_vector nfs_fifoops = {
  186         .vop_default =          &fifo_specops,
  187         .vop_access =           nfsspec_access,
  188         .vop_close =            nfsfifo_close,
  189         .vop_fsync =            nfs_fsync,
  190         .vop_getattr =          nfs_getattr,
  191         .vop_inactive =         nfs_inactive,
  192         .vop_print =            nfs_print,
  193         .vop_read =             nfsfifo_read,
  194         .vop_reclaim =          nfs_reclaim,
  195         .vop_setattr =          nfs_setattr,
  196         .vop_write =            nfsfifo_write,
  197 };
  198 
  199 static int      nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp,
  200                              struct componentname *cnp, struct vattr *vap);
  201 static int      nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
  202                               struct ucred *cred, struct thread *td);
  203 static int      nfs_renamerpc(struct vnode *fdvp, const char *fnameptr,
  204                               int fnamelen, struct vnode *tdvp,
  205                               const char *tnameptr, int tnamelen,
  206                               struct ucred *cred, struct thread *td);
  207 static int      nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
  208                              struct sillyrename *sp);
  209 
  210 /*
  211  * Global variables
  212  */
  213 struct mtx      nfs_iod_mtx;
  214 enum nfsiod_state nfs_iodwant[NFS_MAXASYNCDAEMON];
  215 struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
  216 int              nfs_numasync = 0;
  217 #define DIRHDSIZ        (sizeof (struct dirent) - (MAXNAMLEN + 1))
  218 
  219 SYSCTL_DECL(_vfs_oldnfs);
  220 
  221 static int      nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
  222 SYSCTL_INT(_vfs_oldnfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
  223            &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
  224 
  225 static int      nfs_prime_access_cache = 0;
  226 SYSCTL_INT(_vfs_oldnfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
  227            &nfs_prime_access_cache, 0,
  228            "Prime NFS ACCESS cache when fetching attributes");
  229 
  230 static int      nfsv3_commit_on_close = 0;
  231 SYSCTL_INT(_vfs_oldnfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
  232            &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
  233 
  234 static int      nfs_clean_pages_on_close = 1;
  235 SYSCTL_INT(_vfs_oldnfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
  236            &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close");
  237 
  238 int nfs_directio_enable = 0;
  239 SYSCTL_INT(_vfs_oldnfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
  240            &nfs_directio_enable, 0, "Enable NFS directio");
  241 
  242 /*
  243  * This sysctl allows other processes to mmap a file that has been opened
  244  * O_DIRECT by a process.  In general, having processes mmap the file while
  245  * Direct IO is in progress can lead to Data Inconsistencies.  But, we allow
  246  * this by default to prevent DoS attacks - to prevent a malicious user from
  247  * opening up files O_DIRECT preventing other users from mmap'ing these
  248  * files.  "Protected" environments where stricter consistency guarantees are
  249  * required can disable this knob.  The process that opened the file O_DIRECT
  250  * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not
  251  * meaningful.
  252  */
  253 int nfs_directio_allow_mmap = 1;
  254 SYSCTL_INT(_vfs_oldnfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
  255            &nfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
  256 
  257 #if 0
  258 SYSCTL_INT(_vfs_oldnfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
  259            &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
  260 
  261 SYSCTL_INT(_vfs_oldnfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
  262            &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
  263 #endif
  264 
  265 #define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY          \
  266                          | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE     \
  267                          | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
  268 
  269 /*
  270  * SMP Locking Note :
  271  * The list of locks after the description of the lock is the ordering
  272  * of other locks acquired with the lock held.
  273  * np->n_mtx : Protects the fields in the nfsnode.
  274        VM Object Lock
  275        VI_MTX (acquired indirectly)
  276  * nmp->nm_mtx : Protects the fields in the nfsmount.
  277        rep->r_mtx
  278  * nfs_iod_mtx : Global lock, protects shared nfsiod state.
  279  * nfs_reqq_mtx : Global lock, protects the nfs_reqq list.
  280        nmp->nm_mtx
  281        rep->r_mtx
  282  * rep->r_mtx : Protects the fields in an nfsreq.
  283  */
  284 
  285 static int
  286 nfs3_access_otw(struct vnode *vp, int wmode, struct thread *td,
  287     struct ucred *cred, uint32_t *retmode)
  288 {
  289         const int v3 = 1;
  290         u_int32_t *tl;
  291         int error = 0, attrflag, i, lrupos;
  292 
  293         struct mbuf *mreq, *mrep, *md, *mb;
  294         caddr_t bpos, dpos;
  295         u_int32_t rmode;
  296         struct nfsnode *np = VTONFS(vp);
  297 
  298         nfsstats.rpccnt[NFSPROC_ACCESS]++;
  299         mreq = nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
  300         mb = mreq;
  301         bpos = mtod(mb, caddr_t);
  302         nfsm_fhtom(vp, v3);
  303         tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
  304         *tl = txdr_unsigned(wmode);
  305         nfsm_request(vp, NFSPROC_ACCESS, td, cred);
  306         nfsm_postop_attr(vp, attrflag);
  307         if (!error) {
  308                 lrupos = 0;
  309                 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
  310                 rmode = fxdr_unsigned(u_int32_t, *tl);
  311                 mtx_lock(&np->n_mtx);
  312                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
  313                         if (np->n_accesscache[i].uid == cred->cr_uid) {
  314                                 np->n_accesscache[i].mode = rmode;
  315                                 np->n_accesscache[i].stamp = time_second;
  316                                 break;
  317                         }
  318                         if (i > 0 && np->n_accesscache[i].stamp <
  319                             np->n_accesscache[lrupos].stamp)
  320                                 lrupos = i;
  321                 }
  322                 if (i == NFS_ACCESSCACHESIZE) {
  323                         np->n_accesscache[lrupos].uid = cred->cr_uid;
  324                         np->n_accesscache[lrupos].mode = rmode;
  325                         np->n_accesscache[lrupos].stamp = time_second;
  326                 }
  327                 mtx_unlock(&np->n_mtx);
  328                 if (retmode != NULL)
  329                         *retmode = rmode;
  330                 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0);
  331         }
  332         m_freem(mrep);
  333 nfsmout:
  334 #ifdef KDTRACE_HOOKS
  335         if (error) {
  336                 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0,
  337                     error);
  338         }
  339 #endif
  340         return (error);
  341 }
  342 
  343 /*
  344  * nfs access vnode op.
  345  * For nfs version 2, just return ok. File accesses may fail later.
  346  * For nfs version 3, use the access rpc to check accessibility. If file modes
  347  * are changed on the server, accesses might still fail later.
  348  */
  349 static int
  350 nfs_access(struct vop_access_args *ap)
  351 {
  352         struct vnode *vp = ap->a_vp;
  353         int error = 0, i, gotahit;
  354         u_int32_t mode, rmode, wmode;
  355         int v3 = NFS_ISV3(vp);
  356         struct nfsnode *np = VTONFS(vp);
  357 
  358         /*
  359          * Disallow write attempts on filesystems mounted read-only;
  360          * unless the file is a socket, fifo, or a block or character
  361          * device resident on the filesystem.
  362          */
  363         if ((ap->a_accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
  364                 switch (vp->v_type) {
  365                 case VREG:
  366                 case VDIR:
  367                 case VLNK:
  368                         return (EROFS);
  369                 default:
  370                         break;
  371                 }
  372         }
  373         /*
  374          * For nfs v3, check to see if we have done this recently, and if
  375          * so return our cached result instead of making an ACCESS call.
  376          * If not, do an access rpc, otherwise you are stuck emulating
  377          * ufs_access() locally using the vattr. This may not be correct,
  378          * since the server may apply other access criteria such as
  379          * client uid-->server uid mapping that we do not know about.
  380          */
  381         if (v3) {
  382                 if (ap->a_accmode & VREAD)
  383                         mode = NFSV3ACCESS_READ;
  384                 else
  385                         mode = 0;
  386                 if (vp->v_type != VDIR) {
  387                         if (ap->a_accmode & VWRITE)
  388                                 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
  389                         if (ap->a_accmode & VEXEC)
  390                                 mode |= NFSV3ACCESS_EXECUTE;
  391                 } else {
  392                         if (ap->a_accmode & VWRITE)
  393                                 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
  394                                          NFSV3ACCESS_DELETE);
  395                         if (ap->a_accmode & VEXEC)
  396                                 mode |= NFSV3ACCESS_LOOKUP;
  397                 }
  398                 /* XXX safety belt, only make blanket request if caching */
  399                 if (nfsaccess_cache_timeout > 0) {
  400                         wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
  401                                 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
  402                                 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
  403                 } else {
  404                         wmode = mode;
  405                 }
  406 
  407                 /*
  408                  * Does our cached result allow us to give a definite yes to
  409                  * this request?
  410                  */
  411                 gotahit = 0;
  412                 mtx_lock(&np->n_mtx);
  413                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
  414                         if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) {
  415                                 if (time_second < (np->n_accesscache[i].stamp +
  416                                     nfsaccess_cache_timeout) &&
  417                                     (np->n_accesscache[i].mode & mode) == mode) {
  418                                         nfsstats.accesscache_hits++;
  419                                         gotahit = 1;
  420                                 }
  421                                 break;
  422                         }
  423                 }
  424                 mtx_unlock(&np->n_mtx);
  425 #ifdef KDTRACE_HOOKS
  426                 if (gotahit)
  427                         KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp,
  428                             ap->a_cred->cr_uid, mode);
  429                 else
  430                         KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp,
  431                             ap->a_cred->cr_uid, mode);
  432 #endif
  433                 if (gotahit == 0) {
  434                         /*
  435                          * Either a no, or a don't know.  Go to the wire.
  436                          */
  437                         nfsstats.accesscache_misses++;
  438                         error = nfs3_access_otw(vp, wmode, ap->a_td, ap->a_cred,
  439                             &rmode);
  440                         if (!error) {
  441                                 if ((rmode & mode) != mode)
  442                                         error = EACCES;
  443                         }
  444                 }
  445                 return (error);
  446         } else {
  447                 if ((error = nfsspec_access(ap)) != 0) {
  448                         return (error);
  449                 }
  450                 /*
  451                  * Attempt to prevent a mapped root from accessing a file
  452                  * which it shouldn't.  We try to read a byte from the file
  453                  * if the user is root and the file is not zero length.
  454                  * After calling nfsspec_access, we should have the correct
  455                  * file size cached.
  456                  */
  457                 mtx_lock(&np->n_mtx);
  458                 if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD)
  459                     && VTONFS(vp)->n_size > 0) {
  460                         struct iovec aiov;
  461                         struct uio auio;
  462                         char buf[1];
  463 
  464                         mtx_unlock(&np->n_mtx);
  465                         aiov.iov_base = buf;
  466                         aiov.iov_len = 1;
  467                         auio.uio_iov = &aiov;
  468                         auio.uio_iovcnt = 1;
  469                         auio.uio_offset = 0;
  470                         auio.uio_resid = 1;
  471                         auio.uio_segflg = UIO_SYSSPACE;
  472                         auio.uio_rw = UIO_READ;
  473                         auio.uio_td = ap->a_td;
  474 
  475                         if (vp->v_type == VREG)
  476                                 error = nfs_readrpc(vp, &auio, ap->a_cred);
  477                         else if (vp->v_type == VDIR) {
  478                                 char* bp;
  479                                 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
  480                                 aiov.iov_base = bp;
  481                                 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
  482                                 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
  483                                 free(bp, M_TEMP);
  484                         } else if (vp->v_type == VLNK)
  485                                 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
  486                         else
  487                                 error = EACCES;
  488                 } else
  489                         mtx_unlock(&np->n_mtx);
  490                 return (error);
  491         }
  492 }
  493 
  494 int nfs_otw_getattr_avoid = 0;
  495 
  496 /*
  497  * nfs open vnode op
  498  * Check to see if the type is ok
  499  * and that deletion is not in progress.
  500  * For paged in text files, you will need to flush the page cache
  501  * if consistency is lost.
  502  */
  503 /* ARGSUSED */
  504 static int
  505 nfs_open(struct vop_open_args *ap)
  506 {
  507         struct vnode *vp = ap->a_vp;
  508         struct nfsnode *np = VTONFS(vp);
  509         struct vattr vattr;
  510         int error;
  511         int fmode = ap->a_mode;
  512 
  513         if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK)
  514                 return (EOPNOTSUPP);
  515 
  516         /*
  517          * Get a valid lease. If cached data is stale, flush it.
  518          */
  519         mtx_lock(&np->n_mtx);
  520         if (np->n_flag & NMODIFIED) {
  521                 mtx_unlock(&np->n_mtx);
  522                 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  523                 if (error == EINTR || error == EIO)
  524                         return (error);
  525                 mtx_lock(&np->n_mtx);
  526                 np->n_attrstamp = 0;
  527                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
  528                 if (vp->v_type == VDIR)
  529                         np->n_direofoffset = 0;
  530                 mtx_unlock(&np->n_mtx);
  531                 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
  532                 if (error)
  533                         return (error);
  534                 mtx_lock(&np->n_mtx);
  535                 np->n_mtime = vattr.va_mtime;
  536         } else {
  537                 mtx_unlock(&np->n_mtx);
  538                 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
  539                 if (error)
  540                         return (error);
  541                 mtx_lock(&np->n_mtx);
  542                 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
  543                         if (vp->v_type == VDIR)
  544                                 np->n_direofoffset = 0;
  545                         mtx_unlock(&np->n_mtx);
  546                         error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  547                         if (error == EINTR || error == EIO) {
  548                                 return (error);
  549                         }
  550                         mtx_lock(&np->n_mtx);
  551                         np->n_mtime = vattr.va_mtime;
  552                 }
  553         }
  554         /*
  555          * If the object has >= 1 O_DIRECT active opens, we disable caching.
  556          */
  557         if (nfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
  558                 if (np->n_directio_opens == 0) {
  559                         mtx_unlock(&np->n_mtx);
  560                         error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  561                         if (error)
  562                                 return (error);
  563                         mtx_lock(&np->n_mtx);
  564                         np->n_flag |= NNONCACHE;
  565                 }
  566                 np->n_directio_opens++;
  567         }
  568         mtx_unlock(&np->n_mtx);
  569         vnode_create_vobject(vp, vattr.va_size, ap->a_td);
  570         return (0);
  571 }
  572 
  573 /*
  574  * nfs close vnode op
  575  * What an NFS client should do upon close after writing is a debatable issue.
  576  * Most NFS clients push delayed writes to the server upon close, basically for
  577  * two reasons:
  578  * 1 - So that any write errors may be reported back to the client process
  579  *     doing the close system call. By far the two most likely errors are
  580  *     NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
  581  * 2 - To put a worst case upper bound on cache inconsistency between
  582  *     multiple clients for the file.
  583  * There is also a consistency problem for Version 2 of the protocol w.r.t.
  584  * not being able to tell if other clients are writing a file concurrently,
  585  * since there is no way of knowing if the changed modify time in the reply
  586  * is only due to the write for this client.
  587  * (NFS Version 3 provides weak cache consistency data in the reply that
  588  *  should be sufficient to detect and handle this case.)
  589  *
  590  * The current code does the following:
  591  * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
  592  * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
  593  *                     or commit them (this satisfies 1 and 2 except for the
  594  *                     case where the server crashes after this close but
  595  *                     before the commit RPC, which is felt to be "good
  596  *                     enough". Changing the last argument to nfs_flush() to
  597  *                     a 1 would force a commit operation, if it is felt a
  598  *                     commit is necessary now.
  599  */
  600 /* ARGSUSED */
  601 static int
  602 nfs_close(struct vop_close_args *ap)
  603 {
  604         struct vnode *vp = ap->a_vp;
  605         struct nfsnode *np = VTONFS(vp);
  606         int error = 0;
  607         int fmode = ap->a_fflag;
  608 
  609         if (vp->v_type == VREG) {
  610             /*
  611              * Examine and clean dirty pages, regardless of NMODIFIED.
  612              * This closes a major hole in close-to-open consistency.
  613              * We want to push out all dirty pages (and buffers) on
  614              * close, regardless of whether they were dirtied by
  615              * mmap'ed writes or via write().
  616              */
  617             if (nfs_clean_pages_on_close && vp->v_object) {
  618                 VM_OBJECT_LOCK(vp->v_object);
  619                 vm_object_page_clean(vp->v_object, 0, 0, 0);
  620                 VM_OBJECT_UNLOCK(vp->v_object);
  621             }
  622             mtx_lock(&np->n_mtx);
  623             if (np->n_flag & NMODIFIED) {
  624                 mtx_unlock(&np->n_mtx);
  625                 if (NFS_ISV3(vp)) {
  626                     /*
  627                      * Under NFSv3 we have dirty buffers to dispose of.  We
  628                      * must flush them to the NFS server.  We have the option
  629                      * of waiting all the way through the commit rpc or just
  630                      * waiting for the initial write.  The default is to only
  631                      * wait through the initial write so the data is in the
  632                      * server's cache, which is roughly similar to the state
  633                      * a standard disk subsystem leaves the file in on close().
  634                      *
  635                      * We cannot clear the NMODIFIED bit in np->n_flag due to
  636                      * potential races with other processes, and certainly
  637                      * cannot clear it if we don't commit.
  638                      */
  639                     int cm = nfsv3_commit_on_close ? 1 : 0;
  640                     error = nfs_flush(vp, MNT_WAIT, cm);
  641                     /* np->n_flag &= ~NMODIFIED; */
  642                 } else
  643                     error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  644                 mtx_lock(&np->n_mtx);
  645             }
  646             if (np->n_flag & NWRITEERR) {
  647                 np->n_flag &= ~NWRITEERR;
  648                 error = np->n_error;
  649             }
  650             mtx_unlock(&np->n_mtx);
  651         }
  652         if (nfs_directio_enable)
  653                 KASSERT((np->n_directio_asyncwr == 0),
  654                         ("nfs_close: dirty unflushed (%d) directio buffers\n",
  655                          np->n_directio_asyncwr));
  656         if (nfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
  657                 mtx_lock(&np->n_mtx);
  658                 KASSERT((np->n_directio_opens > 0), 
  659                         ("nfs_close: unexpectedly value (0) of n_directio_opens\n"));
  660                 np->n_directio_opens--;
  661                 if (np->n_directio_opens == 0)
  662                         np->n_flag &= ~NNONCACHE;
  663                 mtx_unlock(&np->n_mtx);
  664         }
  665         return (error);
  666 }
  667 
  668 /*
  669  * nfs getattr call from vfs.
  670  */
  671 static int
  672 nfs_getattr(struct vop_getattr_args *ap)
  673 {
  674         struct vnode *vp = ap->a_vp;
  675         struct nfsnode *np = VTONFS(vp);
  676         struct thread *td = curthread;
  677         struct vattr *vap = ap->a_vap;
  678         struct vattr vattr;
  679         caddr_t bpos, dpos;
  680         int error = 0;
  681         struct mbuf *mreq, *mrep, *md, *mb;
  682         int v3 = NFS_ISV3(vp);
  683 
  684         /*
  685          * Update local times for special files.
  686          */
  687         mtx_lock(&np->n_mtx);
  688         if (np->n_flag & (NACC | NUPD))
  689                 np->n_flag |= NCHG;
  690         mtx_unlock(&np->n_mtx);
  691         /*
  692          * First look in the cache.
  693          */
  694         if (nfs_getattrcache(vp, &vattr) == 0)
  695                 goto nfsmout;
  696         if (v3 && nfs_prime_access_cache && nfsaccess_cache_timeout > 0) {
  697                 nfsstats.accesscache_misses++;
  698                 nfs3_access_otw(vp, NFSV3ACCESS_ALL, td, ap->a_cred, NULL);
  699                 if (nfs_getattrcache(vp, &vattr) == 0)
  700                         goto nfsmout;
  701         }
  702         nfsstats.rpccnt[NFSPROC_GETATTR]++;
  703         mreq = nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
  704         mb = mreq;
  705         bpos = mtod(mb, caddr_t);
  706         nfsm_fhtom(vp, v3);
  707         nfsm_request(vp, NFSPROC_GETATTR, td, ap->a_cred);
  708         if (!error) {
  709                 nfsm_loadattr(vp, &vattr);
  710         }
  711         m_freem(mrep);
  712 nfsmout:
  713         vap->va_type = vattr.va_type;
  714         vap->va_mode = vattr.va_mode;
  715         vap->va_nlink = vattr.va_nlink;
  716         vap->va_uid = vattr.va_uid;
  717         vap->va_gid = vattr.va_gid;
  718         vap->va_fsid = vattr.va_fsid;
  719         vap->va_fileid = vattr.va_fileid;
  720         vap->va_size = vattr.va_size;
  721         vap->va_blocksize = vattr.va_blocksize;
  722         vap->va_atime = vattr.va_atime;
  723         vap->va_mtime = vattr.va_mtime;
  724         vap->va_ctime = vattr.va_ctime;
  725         vap->va_gen = vattr.va_gen;
  726         vap->va_flags = vattr.va_flags;
  727         vap->va_rdev = vattr.va_rdev;
  728         vap->va_bytes = vattr.va_bytes;
  729         vap->va_filerev = vattr.va_filerev;
  730 
  731         return (error);
  732 }
  733 
  734 /*
  735  * nfs setattr call.
  736  */
  737 static int
  738 nfs_setattr(struct vop_setattr_args *ap)
  739 {
  740         struct vnode *vp = ap->a_vp;
  741         struct nfsnode *np = VTONFS(vp);
  742         struct vattr *vap = ap->a_vap;
  743         struct thread *td = curthread;
  744         int error = 0;
  745         u_quad_t tsize;
  746 
  747 #ifndef nolint
  748         tsize = (u_quad_t)0;
  749 #endif
  750 
  751         /*
  752          * Setting of flags is not supported.
  753          */
  754         if (vap->va_flags != VNOVAL)
  755                 return (EOPNOTSUPP);
  756 
  757         /*
  758          * Disallow write attempts if the filesystem is mounted read-only.
  759          */
  760         if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
  761             vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
  762             vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
  763             (vp->v_mount->mnt_flag & MNT_RDONLY)) {
  764                 error = EROFS;
  765                 goto out;
  766         }
  767         if (vap->va_size != VNOVAL) {
  768                 switch (vp->v_type) {
  769                 case VDIR:
  770                         return (EISDIR);
  771                 case VCHR:
  772                 case VBLK:
  773                 case VSOCK:
  774                 case VFIFO:
  775                         if (vap->va_mtime.tv_sec == VNOVAL &&
  776                             vap->va_atime.tv_sec == VNOVAL &&
  777                             vap->va_mode == (mode_t)VNOVAL &&
  778                             vap->va_uid == (uid_t)VNOVAL &&
  779                             vap->va_gid == (gid_t)VNOVAL)
  780                                 return (0);             
  781                         vap->va_size = VNOVAL;
  782                         break;
  783                 default:
  784                         /*
  785                          * Disallow write attempts if the filesystem is
  786                          * mounted read-only.
  787                          */
  788                         if (vp->v_mount->mnt_flag & MNT_RDONLY)
  789                                 return (EROFS);
  790                         /*
  791                          *  We run vnode_pager_setsize() early (why?),
  792                          * we must set np->n_size now to avoid vinvalbuf
  793                          * V_SAVE races that might setsize a lower
  794                          * value.
  795                          */
  796                         mtx_lock(&np->n_mtx);
  797                         tsize = np->n_size;
  798                         mtx_unlock(&np->n_mtx);
  799                         error = nfs_meta_setsize(vp, ap->a_cred, td,
  800                             vap->va_size);
  801                         mtx_lock(&np->n_mtx);
  802                         if (np->n_flag & NMODIFIED) {
  803                             tsize = np->n_size;
  804                             mtx_unlock(&np->n_mtx);
  805                             if (vap->va_size == 0)
  806                                 error = nfs_vinvalbuf(vp, 0, td, 1);
  807                             else
  808                                 error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
  809                             if (error) {
  810                                 vnode_pager_setsize(vp, tsize);
  811                                 goto out;
  812                             }
  813                         } else
  814                             mtx_unlock(&np->n_mtx);
  815                         /*
  816                          * np->n_size has already been set to vap->va_size
  817                          * in nfs_meta_setsize(). We must set it again since
  818                          * nfs_loadattrcache() could be called through
  819                          * nfs_meta_setsize() and could modify np->n_size.
  820                          */
  821                         mtx_lock(&np->n_mtx);
  822                         np->n_vattr.va_size = np->n_size = vap->va_size;
  823                         mtx_unlock(&np->n_mtx);
  824                 };
  825         } else {
  826                 mtx_lock(&np->n_mtx);
  827                 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && 
  828                     (np->n_flag & NMODIFIED) && vp->v_type == VREG) {
  829                         mtx_unlock(&np->n_mtx);
  830                         if ((error = nfs_vinvalbuf(vp, V_SAVE, td, 1)) != 0 &&
  831                             (error == EINTR || error == EIO))
  832                                 return error;
  833                 } else
  834                         mtx_unlock(&np->n_mtx);
  835         }
  836         error = nfs_setattrrpc(vp, vap, ap->a_cred);
  837         if (error && vap->va_size != VNOVAL) {
  838                 mtx_lock(&np->n_mtx);
  839                 np->n_size = np->n_vattr.va_size = tsize;
  840                 vnode_pager_setsize(vp, tsize);
  841                 mtx_unlock(&np->n_mtx);
  842         }
  843 out:
  844         return (error);
  845 }
  846 
  847 /*
  848  * Do an nfs setattr rpc.
  849  */
  850 static int
  851 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred)
  852 {
  853         struct nfsv2_sattr *sp;
  854         struct nfsnode *np = VTONFS(vp);
  855         caddr_t bpos, dpos;
  856         u_int32_t *tl;
  857         int error = 0, i, wccflag = NFSV3_WCCRATTR;
  858         struct mbuf *mreq, *mrep, *md, *mb;
  859         int v3 = NFS_ISV3(vp);
  860 
  861         nfsstats.rpccnt[NFSPROC_SETATTR]++;
  862         mreq = nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
  863         mb = mreq;
  864         bpos = mtod(mb, caddr_t);
  865         nfsm_fhtom(vp, v3);
  866         if (v3) {
  867                 nfsm_v3attrbuild(vap, TRUE);
  868                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
  869                 *tl = nfs_false;
  870         } else {
  871                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
  872                 if (vap->va_mode == (mode_t)VNOVAL)
  873                         sp->sa_mode = nfs_xdrneg1;
  874                 else
  875                         sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
  876                 if (vap->va_uid == (uid_t)VNOVAL)
  877                         sp->sa_uid = nfs_xdrneg1;
  878                 else
  879                         sp->sa_uid = txdr_unsigned(vap->va_uid);
  880                 if (vap->va_gid == (gid_t)VNOVAL)
  881                         sp->sa_gid = nfs_xdrneg1;
  882                 else
  883                         sp->sa_gid = txdr_unsigned(vap->va_gid);
  884                 sp->sa_size = txdr_unsigned(vap->va_size);
  885                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
  886                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
  887         }
  888         nfsm_request(vp, NFSPROC_SETATTR, curthread, cred);
  889         if (v3) {
  890                 mtx_lock(&np->n_mtx);
  891                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
  892                         np->n_accesscache[i].stamp = 0;
  893                 mtx_unlock(&np->n_mtx);
  894                 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
  895                 nfsm_wcc_data(vp, wccflag);
  896         } else
  897                 nfsm_loadattr(vp, NULL);
  898         m_freem(mrep);
  899 nfsmout:
  900         return (error);
  901 }
  902 
  903 /*
  904  * nfs lookup call, one step at a time...
  905  * First look in cache
  906  * If not found, unlock the directory nfsnode and do the rpc
  907  */
  908 static int
  909 nfs_lookup(struct vop_lookup_args *ap)
  910 {
  911         struct componentname *cnp = ap->a_cnp;
  912         struct vnode *dvp = ap->a_dvp;
  913         struct vnode **vpp = ap->a_vpp;
  914         struct mount *mp = dvp->v_mount;
  915         struct vattr vattr;
  916         struct timespec dmtime;
  917         int flags = cnp->cn_flags;
  918         struct vnode *newvp;
  919         struct nfsmount *nmp;
  920         caddr_t bpos, dpos;
  921         struct mbuf *mreq, *mrep, *md, *mb;
  922         long len;
  923         nfsfh_t *fhp;
  924         struct nfsnode *np, *newnp;
  925         int error = 0, attrflag, fhsize, ltype;
  926         int v3 = NFS_ISV3(dvp);
  927         struct thread *td = cnp->cn_thread;
  928 
  929         *vpp = NULLVP;
  930         if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) &&
  931             (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
  932                 return (EROFS);
  933         if (dvp->v_type != VDIR)
  934                 return (ENOTDIR);
  935         nmp = VFSTONFS(mp);
  936         np = VTONFS(dvp);
  937         if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) {
  938                 *vpp = NULLVP;
  939                 return (error);
  940         }
  941         error = cache_lookup(dvp, vpp, cnp);
  942         if (error > 0 && error != ENOENT)
  943                 return (error);
  944         if (error == -1) {
  945                 /*
  946                  * We only accept a positive hit in the cache if the
  947                  * change time of the file matches our cached copy.
  948                  * Otherwise, we discard the cache entry and fallback
  949                  * to doing a lookup RPC.
  950                  *
  951                  * To better handle stale file handles and attributes,
  952                  * clear the attribute cache of this node if it is a
  953                  * leaf component, part of an open() call, and not
  954                  * locally modified before fetching the attributes.
  955                  * This should allow stale file handles to be detected
  956                  * here where we can fall back to a LOOKUP RPC to
  957                  * recover rather than having nfs_open() detect the
  958                  * stale file handle and failing open(2) with ESTALE.
  959                  */
  960                 newvp = *vpp;
  961                 newnp = VTONFS(newvp);
  962                 if (!(nmp->nm_flag & NFSMNT_NOCTO) &&
  963                     (flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
  964                     !(newnp->n_flag & NMODIFIED)) {
  965                         mtx_lock(&newnp->n_mtx);
  966                         newnp->n_attrstamp = 0;
  967                         KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
  968                         mtx_unlock(&newnp->n_mtx);
  969                 }
  970                 if (VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 &&
  971                     timespeccmp(&vattr.va_ctime, &newnp->n_ctime, ==)) {
  972                         nfsstats.lookupcache_hits++;
  973                         if (cnp->cn_nameiop != LOOKUP &&
  974                             (flags & ISLASTCN))
  975                                 cnp->cn_flags |= SAVENAME;
  976                         return (0);
  977                 }
  978                 cache_purge(newvp);
  979                 if (dvp != newvp)
  980                         vput(newvp);
  981                 else 
  982                         vrele(newvp);
  983                 *vpp = NULLVP;
  984         } else if (error == ENOENT) {
  985                 if (dvp->v_iflag & VI_DOOMED)
  986                         return (ENOENT);
  987                 /*
  988                  * We only accept a negative hit in the cache if the
  989                  * modification time of the parent directory matches
  990                  * our cached copy.  Otherwise, we discard all of the
  991                  * negative cache entries for this directory. We also
  992                  * only trust -ve cache entries for less than
  993                  * nm_negative_namecache_timeout seconds.
  994                  */
  995                 if ((u_int)(ticks - np->n_dmtime_ticks) <
  996                     (nmp->nm_negnametimeo * hz) &&
  997                     VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 &&
  998                     timespeccmp(&vattr.va_mtime, &np->n_dmtime, ==)) {
  999                         nfsstats.lookupcache_hits++;
 1000                         return (ENOENT);
 1001                 }
 1002                 cache_purge_negative(dvp);
 1003                 mtx_lock(&np->n_mtx);
 1004                 timespecclear(&np->n_dmtime);
 1005                 mtx_unlock(&np->n_mtx);
 1006         }
 1007 
 1008         /*
 1009          * Cache the modification time of the parent directory in case
 1010          * the lookup fails and results in adding the first negative
 1011          * name cache entry for the directory.  Since this is reading
 1012          * a single time_t, don't bother with locking.  The
 1013          * modification time may be a bit stale, but it must be read
 1014          * before performing the lookup RPC to prevent a race where
 1015          * another lookup updates the timestamp on the directory after
 1016          * the lookup RPC has been performed on the server but before
 1017          * n_dmtime is set at the end of this function.
 1018          */
 1019         dmtime = np->n_vattr.va_mtime;
 1020         error = 0;
 1021         newvp = NULLVP;
 1022         nfsstats.lookupcache_misses++;
 1023         nfsstats.rpccnt[NFSPROC_LOOKUP]++;
 1024         len = cnp->cn_namelen;
 1025         mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
 1026                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
 1027         mb = mreq;
 1028         bpos = mtod(mb, caddr_t);
 1029         nfsm_fhtom(dvp, v3);
 1030         nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
 1031         nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_thread, cnp->cn_cred);
 1032         if (error) {
 1033                 if (v3) {
 1034                         nfsm_postop_attr(dvp, attrflag);
 1035                         m_freem(mrep);
 1036                 }
 1037                 goto nfsmout;
 1038         }
 1039         nfsm_getfh(fhp, fhsize, v3);
 1040 
 1041         /*
 1042          * Handle RENAME case...
 1043          */
 1044         if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
 1045                 if (NFS_CMPFH(np, fhp, fhsize)) {
 1046                         m_freem(mrep);
 1047                         return (EISDIR);
 1048                 }
 1049                 error = nfs_nget(mp, fhp, fhsize, &np, LK_EXCLUSIVE);
 1050                 if (error) {
 1051                         m_freem(mrep);
 1052                         return (error);
 1053                 }
 1054                 newvp = NFSTOV(np);
 1055                 if (v3) {
 1056                         nfsm_postop_attr(newvp, attrflag);
 1057                         nfsm_postop_attr(dvp, attrflag);
 1058                 } else
 1059                         nfsm_loadattr(newvp, NULL);
 1060                 *vpp = newvp;
 1061                 m_freem(mrep);
 1062                 cnp->cn_flags |= SAVENAME;
 1063                 return (0);
 1064         }
 1065 
 1066         if (flags & ISDOTDOT) {
 1067                 ltype = VOP_ISLOCKED(dvp);
 1068                 error = vfs_busy(mp, MBF_NOWAIT);
 1069                 if (error != 0) {
 1070                         vfs_ref(mp);
 1071                         VOP_UNLOCK(dvp, 0);
 1072                         error = vfs_busy(mp, 0);
 1073                         vn_lock(dvp, ltype | LK_RETRY);
 1074                         vfs_rel(mp);
 1075                         if (error == 0 && (dvp->v_iflag & VI_DOOMED)) {
 1076                                 vfs_unbusy(mp);
 1077                                 error = ENOENT;
 1078                         }
 1079                         if (error != 0) {
 1080                                 m_freem(mrep);
 1081                                 return (error);
 1082                         }
 1083                 }
 1084                 VOP_UNLOCK(dvp, 0);
 1085                 error = nfs_nget(mp, fhp, fhsize, &np, cnp->cn_lkflags);
 1086                 if (error == 0)
 1087                         newvp = NFSTOV(np);
 1088                 vfs_unbusy(mp);
 1089                 if (newvp != dvp)
 1090                         vn_lock(dvp, ltype | LK_RETRY);
 1091                 if (dvp->v_iflag & VI_DOOMED) {
 1092                         if (error == 0) {
 1093                                 if (newvp == dvp)
 1094                                         vrele(newvp);
 1095                                 else
 1096                                         vput(newvp);
 1097                         }
 1098                         error = ENOENT;
 1099                 }
 1100                 if (error) {
 1101                         m_freem(mrep);
 1102                         return (error);
 1103                 }
 1104         } else if (NFS_CMPFH(np, fhp, fhsize)) {
 1105                 VREF(dvp);
 1106                 newvp = dvp;
 1107         } else {
 1108                 error = nfs_nget(mp, fhp, fhsize, &np, cnp->cn_lkflags);
 1109                 if (error) {
 1110                         m_freem(mrep);
 1111                         return (error);
 1112                 }
 1113                 newvp = NFSTOV(np);
 1114 
 1115                 /*
 1116                  * Flush the attribute cache when opening a leaf node
 1117                  * to ensure that fresh attributes are fetched in
 1118                  * nfs_open() if we are unable to fetch attributes
 1119                  * from the LOOKUP reply.
 1120                  */
 1121                 if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
 1122                     !(np->n_flag & NMODIFIED)) {
 1123                         mtx_lock(&np->n_mtx);
 1124                         np->n_attrstamp = 0;
 1125                         KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
 1126                         mtx_unlock(&np->n_mtx);
 1127                 }
 1128         }
 1129         if (v3) {
 1130                 nfsm_postop_attr(newvp, attrflag);
 1131                 nfsm_postop_attr(dvp, attrflag);
 1132         } else
 1133                 nfsm_loadattr(newvp, NULL);
 1134         if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
 1135                 cnp->cn_flags |= SAVENAME;
 1136         if ((cnp->cn_flags & MAKEENTRY) &&
 1137             (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
 1138                 np->n_ctime = np->n_vattr.va_ctime;
 1139                 cache_enter(dvp, newvp, cnp);
 1140         }
 1141         *vpp = newvp;
 1142         m_freem(mrep);
 1143 nfsmout:
 1144         if (error) {
 1145                 if (newvp != NULLVP) {
 1146                         vput(newvp);
 1147                         *vpp = NULLVP;
 1148                 }
 1149 
 1150                 if (error != ENOENT)
 1151                         goto done;
 1152 
 1153                 /* The requested file was not found. */
 1154                 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
 1155                     (flags & ISLASTCN)) {
 1156                         /*
 1157                          * XXX: UFS does a full VOP_ACCESS(dvp,
 1158                          * VWRITE) here instead of just checking
 1159                          * MNT_RDONLY.
 1160                          */
 1161                         if (mp->mnt_flag & MNT_RDONLY)
 1162                                 return (EROFS);
 1163                         cnp->cn_flags |= SAVENAME;
 1164                         return (EJUSTRETURN);
 1165                 }
 1166 
 1167                 if ((cnp->cn_flags & MAKEENTRY) && cnp->cn_nameiop != CREATE) {
 1168                         /*
 1169                          * Maintain n_dmtime as the modification time
 1170                          * of the parent directory when the oldest -ve
 1171                          * name cache entry for this directory was
 1172                          * added.  If a -ve cache entry has already
 1173                          * been added with a newer modification time
 1174                          * by a concurrent lookup, then don't bother
 1175                          * adding a cache entry.  The modification
 1176                          * time of the directory might have changed
 1177                          * due to the file this lookup failed to find
 1178                          * being created.  In that case a subsequent
 1179                          * lookup would incorrectly use the entry
 1180                          * added here instead of doing an extra
 1181                          * lookup.
 1182                          */
 1183                         mtx_lock(&np->n_mtx);
 1184                         if (timespeccmp(&np->n_dmtime, &dmtime, <=)) {
 1185                                 if (!timespecisset(&np->n_dmtime)) {
 1186                                         np->n_dmtime = dmtime;
 1187                                         np->n_dmtime_ticks = ticks;
 1188                                 }
 1189                                 mtx_unlock(&np->n_mtx);
 1190                                 cache_enter(dvp, NULL, cnp);
 1191                         } else
 1192                                 mtx_unlock(&np->n_mtx);
 1193                 }
 1194                 return (ENOENT);
 1195         }
 1196 done:
 1197         return (error);
 1198 }
 1199 
 1200 /*
 1201  * nfs read call.
 1202  * Just call nfs_bioread() to do the work.
 1203  */
 1204 static int
 1205 nfs_read(struct vop_read_args *ap)
 1206 {
 1207         struct vnode *vp = ap->a_vp;
 1208 
 1209         switch (vp->v_type) {
 1210         case VREG:
 1211                 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
 1212         case VDIR:
 1213                 return (EISDIR);
 1214         default:
 1215                 return (EOPNOTSUPP);
 1216         }
 1217 }
 1218 
 1219 /*
 1220  * nfs readlink call
 1221  */
 1222 static int
 1223 nfs_readlink(struct vop_readlink_args *ap)
 1224 {
 1225         struct vnode *vp = ap->a_vp;
 1226 
 1227         if (vp->v_type != VLNK)
 1228                 return (EINVAL);
 1229         return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
 1230 }
 1231 
 1232 /*
 1233  * Do a readlink rpc.
 1234  * Called by nfs_doio() from below the buffer cache.
 1235  */
 1236 int
 1237 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 1238 {
 1239         caddr_t bpos, dpos;
 1240         int error = 0, len, attrflag;
 1241         struct mbuf *mreq, *mrep, *md, *mb;
 1242         int v3 = NFS_ISV3(vp);
 1243 
 1244         nfsstats.rpccnt[NFSPROC_READLINK]++;
 1245         mreq = nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
 1246         mb = mreq;
 1247         bpos = mtod(mb, caddr_t);
 1248         nfsm_fhtom(vp, v3);
 1249         nfsm_request(vp, NFSPROC_READLINK, uiop->uio_td, cred);
 1250         if (v3)
 1251                 nfsm_postop_attr(vp, attrflag);
 1252         if (!error) {
 1253                 nfsm_strsiz(len, NFS_MAXPATHLEN);
 1254                 if (len == NFS_MAXPATHLEN) {
 1255                         struct nfsnode *np = VTONFS(vp);
 1256                         mtx_lock(&np->n_mtx);
 1257                         if (np->n_size && np->n_size < NFS_MAXPATHLEN)
 1258                                 len = np->n_size;
 1259                         mtx_unlock(&np->n_mtx);
 1260                 }
 1261                 nfsm_mtouio(uiop, len);
 1262         }
 1263         m_freem(mrep);
 1264 nfsmout:
 1265         return (error);
 1266 }
 1267 
 1268 /*
 1269  * nfs read rpc call
 1270  * Ditto above
 1271  */
 1272 int
 1273 nfs_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 1274 {
 1275         u_int32_t *tl;
 1276         caddr_t bpos, dpos;
 1277         struct mbuf *mreq, *mrep, *md, *mb;
 1278         struct nfsmount *nmp;
 1279         off_t end;
 1280         int error = 0, len, retlen, tsiz, eof, attrflag;
 1281         int v3 = NFS_ISV3(vp);
 1282         int rsize;
 1283 
 1284 #ifndef nolint
 1285         eof = 0;
 1286 #endif
 1287         nmp = VFSTONFS(vp->v_mount);
 1288         tsiz = uiop->uio_resid;
 1289         mtx_lock(&nmp->nm_mtx);
 1290         end = uiop->uio_offset + tsiz;
 1291         if (end > nmp->nm_maxfilesize || end < uiop->uio_offset) {
 1292                 mtx_unlock(&nmp->nm_mtx);
 1293                 return (EFBIG);
 1294         }
 1295         rsize = nmp->nm_rsize;
 1296         mtx_unlock(&nmp->nm_mtx);
 1297         while (tsiz > 0) {
 1298                 nfsstats.rpccnt[NFSPROC_READ]++;
 1299                 len = (tsiz > rsize) ? rsize : tsiz;
 1300                 mreq = nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
 1301                 mb = mreq;
 1302                 bpos = mtod(mb, caddr_t);
 1303                 nfsm_fhtom(vp, v3);
 1304                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED * 3);
 1305                 if (v3) {
 1306                         txdr_hyper(uiop->uio_offset, tl);
 1307                         *(tl + 2) = txdr_unsigned(len);
 1308                 } else {
 1309                         *tl++ = txdr_unsigned(uiop->uio_offset);
 1310                         *tl++ = txdr_unsigned(len);
 1311                         *tl = 0;
 1312                 }
 1313                 nfsm_request(vp, NFSPROC_READ, uiop->uio_td, cred);
 1314                 if (v3) {
 1315                         nfsm_postop_attr(vp, attrflag);
 1316                         if (error) {
 1317                                 m_freem(mrep);
 1318                                 goto nfsmout;
 1319                         }
 1320                         tl = nfsm_dissect(u_int32_t *, 2 * NFSX_UNSIGNED);
 1321                         eof = fxdr_unsigned(int, *(tl + 1));
 1322                 } else {
 1323                         nfsm_loadattr(vp, NULL);
 1324                 }
 1325                 nfsm_strsiz(retlen, rsize);
 1326                 nfsm_mtouio(uiop, retlen);
 1327                 m_freem(mrep);
 1328                 tsiz -= retlen;
 1329                 if (v3) {
 1330                         if (eof || retlen == 0) {
 1331                                 tsiz = 0;
 1332                         }
 1333                 } else if (retlen < len) {
 1334                         tsiz = 0;
 1335                 }
 1336         }
 1337 nfsmout:
 1338         return (error);
 1339 }
 1340 
 1341 /*
 1342  * nfs write call
 1343  */
 1344 int
 1345 nfs_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
 1346              int *iomode, int *must_commit)
 1347 {
 1348         u_int32_t *tl;
 1349         int32_t backup;
 1350         caddr_t bpos, dpos;
 1351         struct mbuf *mreq, *mrep, *md, *mb;
 1352         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 1353         off_t end;
 1354         int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
 1355         int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
 1356         int wsize;
 1357         
 1358         KASSERT(uiop->uio_iovcnt == 1, ("nfs: writerpc iovcnt > 1"));
 1359         *must_commit = 0;
 1360         tsiz = uiop->uio_resid;
 1361         mtx_lock(&nmp->nm_mtx);
 1362         end = uiop->uio_offset + tsiz;
 1363         if (end > nmp->nm_maxfilesize || end < uiop->uio_offset) {
 1364                 mtx_unlock(&nmp->nm_mtx);               
 1365                 return (EFBIG);
 1366         }
 1367         wsize = nmp->nm_wsize;
 1368         mtx_unlock(&nmp->nm_mtx);
 1369         while (tsiz > 0) {
 1370                 nfsstats.rpccnt[NFSPROC_WRITE]++;
 1371                 len = (tsiz > wsize) ? wsize : tsiz;
 1372                 mreq = nfsm_reqhead(vp, NFSPROC_WRITE,
 1373                         NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
 1374                 mb = mreq;
 1375                 bpos = mtod(mb, caddr_t);
 1376                 nfsm_fhtom(vp, v3);
 1377                 if (v3) {
 1378                         tl = nfsm_build(u_int32_t *, 5 * NFSX_UNSIGNED);
 1379                         txdr_hyper(uiop->uio_offset, tl);
 1380                         tl += 2;
 1381                         *tl++ = txdr_unsigned(len);
 1382                         *tl++ = txdr_unsigned(*iomode);
 1383                         *tl = txdr_unsigned(len);
 1384                 } else {
 1385                         u_int32_t x;
 1386 
 1387                         tl = nfsm_build(u_int32_t *, 4 * NFSX_UNSIGNED);
 1388                         /* Set both "begin" and "current" to non-garbage. */
 1389                         x = txdr_unsigned((u_int32_t)uiop->uio_offset);
 1390                         *tl++ = x;      /* "begin offset" */
 1391                         *tl++ = x;      /* "current offset" */
 1392                         x = txdr_unsigned(len);
 1393                         *tl++ = x;      /* total to this offset */
 1394                         *tl = x;        /* size of this write */
 1395                 }
 1396                 nfsm_uiotom(uiop, len);
 1397                 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_td, cred);
 1398                 if (v3) {
 1399                         wccflag = NFSV3_WCCCHK;
 1400                         nfsm_wcc_data(vp, wccflag);
 1401                         if (!error) {
 1402                                 tl = nfsm_dissect(u_int32_t *, 2 * NFSX_UNSIGNED
 1403                                         + NFSX_V3WRITEVERF);
 1404                                 rlen = fxdr_unsigned(int, *tl++);
 1405                                 if (rlen == 0) {
 1406                                         error = NFSERR_IO;
 1407                                         m_freem(mrep);
 1408                                         break;
 1409                                 } else if (rlen < len) {
 1410                                         backup = len - rlen;
 1411                                         uiop->uio_iov->iov_base =
 1412                                             (char *)uiop->uio_iov->iov_base -
 1413                                             backup;
 1414                                         uiop->uio_iov->iov_len += backup;
 1415                                         uiop->uio_offset -= backup;
 1416                                         uiop->uio_resid += backup;
 1417                                         len = rlen;
 1418                                 }
 1419                                 commit = fxdr_unsigned(int, *tl++);
 1420 
 1421                                 /*
 1422                                  * Return the lowest committment level
 1423                                  * obtained by any of the RPCs.
 1424                                  */
 1425                                 if (committed == NFSV3WRITE_FILESYNC)
 1426                                         committed = commit;
 1427                                 else if (committed == NFSV3WRITE_DATASYNC &&
 1428                                         commit == NFSV3WRITE_UNSTABLE)
 1429                                         committed = commit;
 1430                                 mtx_lock(&nmp->nm_mtx);
 1431                                 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
 1432                                     bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
 1433                                         NFSX_V3WRITEVERF);
 1434                                     nmp->nm_state |= NFSSTA_HASWRITEVERF;
 1435                                 } else if (bcmp((caddr_t)tl,
 1436                                     (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
 1437                                     *must_commit = 1;
 1438                                     bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
 1439                                         NFSX_V3WRITEVERF);
 1440                                 }
 1441                                 mtx_unlock(&nmp->nm_mtx);
 1442                         }
 1443                 } else {
 1444                         nfsm_loadattr(vp, NULL);
 1445                 }
 1446                 if (wccflag) {
 1447                         mtx_lock(&(VTONFS(vp))->n_mtx);
 1448                         VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime;
 1449                         mtx_unlock(&(VTONFS(vp))->n_mtx);
 1450                 }
 1451                 m_freem(mrep);
 1452                 if (error)
 1453                         break;
 1454                 tsiz -= len;
 1455         }
 1456 nfsmout:
 1457         if (vp->v_mount->mnt_kern_flag & MNTK_ASYNC)
 1458                 committed = NFSV3WRITE_FILESYNC;
 1459         *iomode = committed;
 1460         if (error)
 1461                 uiop->uio_resid = tsiz;
 1462         return (error);
 1463 }
 1464 
 1465 /*
 1466  * nfs mknod rpc
 1467  * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
 1468  * mode set to specify the file type and the size field for rdev.
 1469  */
 1470 static int
 1471 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
 1472     struct vattr *vap)
 1473 {
 1474         struct nfsv2_sattr *sp;
 1475         u_int32_t *tl;
 1476         struct vnode *newvp = NULL;
 1477         struct nfsnode *np = NULL;
 1478         struct vattr vattr;
 1479         caddr_t bpos, dpos;
 1480         int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
 1481         struct mbuf *mreq, *mrep, *md, *mb;
 1482         u_int32_t rdev;
 1483         int v3 = NFS_ISV3(dvp);
 1484 
 1485         if (vap->va_type == VCHR || vap->va_type == VBLK)
 1486                 rdev = txdr_unsigned(vap->va_rdev);
 1487         else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
 1488                 rdev = nfs_xdrneg1;
 1489         else {
 1490                 return (EOPNOTSUPP);
 1491         }
 1492         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
 1493                 return (error);
 1494         nfsstats.rpccnt[NFSPROC_MKNOD]++;
 1495         mreq = nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
 1496                 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
 1497         mb = mreq;
 1498         bpos = mtod(mb, caddr_t);
 1499         nfsm_fhtom(dvp, v3);
 1500         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 1501         if (v3) {
 1502                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
 1503                 *tl++ = vtonfsv3_type(vap->va_type);
 1504                 nfsm_v3attrbuild(vap, FALSE);
 1505                 if (vap->va_type == VCHR || vap->va_type == VBLK) {
 1506                         tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
 1507                         *tl++ = txdr_unsigned(major(vap->va_rdev));
 1508                         *tl = txdr_unsigned(minor(vap->va_rdev));
 1509                 }
 1510         } else {
 1511                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 1512                 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
 1513                 sp->sa_uid = nfs_xdrneg1;
 1514                 sp->sa_gid = nfs_xdrneg1;
 1515                 sp->sa_size = rdev;
 1516                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 1517                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 1518         }
 1519         nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_thread, cnp->cn_cred);
 1520         if (!error) {
 1521                 nfsm_mtofh(dvp, newvp, v3, gotvp);
 1522                 if (!gotvp) {
 1523                         if (newvp) {
 1524                                 vput(newvp);
 1525                                 newvp = NULL;
 1526                         }
 1527                         error = nfs_lookitup(dvp, cnp->cn_nameptr,
 1528                             cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
 1529                         if (!error)
 1530                                 newvp = NFSTOV(np);
 1531                 }
 1532         }
 1533         if (v3)
 1534                 nfsm_wcc_data(dvp, wccflag);
 1535         m_freem(mrep);
 1536 nfsmout:
 1537         if (error) {
 1538                 if (newvp)
 1539                         vput(newvp);
 1540         } else {
 1541                 if (cnp->cn_flags & MAKEENTRY)
 1542                         cache_enter(dvp, newvp, cnp);
 1543                 *vpp = newvp;
 1544         }
 1545         mtx_lock(&(VTONFS(dvp))->n_mtx);
 1546         VTONFS(dvp)->n_flag |= NMODIFIED;
 1547         if (!wccflag) {
 1548                 VTONFS(dvp)->n_attrstamp = 0;
 1549                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1550         }
 1551         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 1552         return (error);
 1553 }
 1554 
 1555 /*
 1556  * nfs mknod vop
 1557  * just call nfs_mknodrpc() to do the work.
 1558  */
 1559 /* ARGSUSED */
 1560 static int
 1561 nfs_mknod(struct vop_mknod_args *ap)
 1562 {
 1563         return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap));
 1564 }
 1565 
 1566 static u_long create_verf;
 1567 /*
 1568  * nfs file create call
 1569  */
 1570 static int
 1571 nfs_create(struct vop_create_args *ap)
 1572 {
 1573         struct vnode *dvp = ap->a_dvp;
 1574         struct vattr *vap = ap->a_vap;
 1575         struct componentname *cnp = ap->a_cnp;
 1576         struct nfsv2_sattr *sp;
 1577         u_int32_t *tl;
 1578         struct nfsnode *np = NULL;
 1579         struct vnode *newvp = NULL;
 1580         caddr_t bpos, dpos;
 1581         int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
 1582         struct mbuf *mreq, *mrep, *md, *mb;
 1583         struct vattr vattr;
 1584         int v3 = NFS_ISV3(dvp);
 1585 
 1586         /*
 1587          * Oops, not for me..
 1588          */
 1589         if (vap->va_type == VSOCK) {
 1590                 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap);
 1591                 return (error);
 1592         }
 1593 
 1594         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0) {
 1595                 return (error);
 1596         }
 1597         if (vap->va_vaflags & VA_EXCLUSIVE)
 1598                 fmode |= O_EXCL;
 1599 again:
 1600         nfsstats.rpccnt[NFSPROC_CREATE]++;
 1601         mreq = nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
 1602                 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
 1603         mb = mreq;
 1604         bpos = mtod(mb, caddr_t);
 1605         nfsm_fhtom(dvp, v3);
 1606         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 1607         if (v3) {
 1608                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
 1609                 if (fmode & O_EXCL) {
 1610                         *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
 1611                         tl = nfsm_build(u_int32_t *, NFSX_V3CREATEVERF);
 1612 #ifdef INET
 1613                         CURVNET_SET(CRED_TO_VNET(cnp->cn_cred));
 1614                         IN_IFADDR_RLOCK();
 1615                         if (!TAILQ_EMPTY(&V_in_ifaddrhead))
 1616                                 *tl++ = IA_SIN(TAILQ_FIRST(&V_in_ifaddrhead))->sin_addr.s_addr;
 1617                         else
 1618 #endif
 1619                                 *tl++ = create_verf;
 1620 #ifdef INET
 1621                         IN_IFADDR_RUNLOCK();
 1622                         CURVNET_RESTORE();
 1623 #endif
 1624                         *tl = ++create_verf;
 1625                 } else {
 1626                         *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
 1627                         nfsm_v3attrbuild(vap, FALSE);
 1628                 }
 1629         } else {
 1630                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 1631                 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
 1632                 sp->sa_uid = nfs_xdrneg1;
 1633                 sp->sa_gid = nfs_xdrneg1;
 1634                 sp->sa_size = 0;
 1635                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 1636                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 1637         }
 1638         nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_thread, cnp->cn_cred);
 1639         if (!error) {
 1640                 nfsm_mtofh(dvp, newvp, v3, gotvp);
 1641                 if (!gotvp) {
 1642                         if (newvp) {
 1643                                 vput(newvp);
 1644                                 newvp = NULL;
 1645                         }
 1646                         error = nfs_lookitup(dvp, cnp->cn_nameptr,
 1647                             cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
 1648                         if (!error)
 1649                                 newvp = NFSTOV(np);
 1650                 }
 1651         }
 1652         if (v3)
 1653                 nfsm_wcc_data(dvp, wccflag);
 1654         m_freem(mrep);
 1655 nfsmout:
 1656         if (error) {
 1657                 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
 1658                         fmode &= ~O_EXCL;
 1659                         goto again;
 1660                 }
 1661                 if (newvp)
 1662                         vput(newvp);
 1663         } else if (v3 && (fmode & O_EXCL)) {
 1664                 /*
 1665                  * We are normally called with only a partially initialized
 1666                  * VAP.  Since the NFSv3 spec says that server may use the
 1667                  * file attributes to store the verifier, the spec requires
 1668                  * us to do a SETATTR RPC. FreeBSD servers store the verifier
 1669                  * in atime, but we can't really assume that all servers will
 1670                  * so we ensure that our SETATTR sets both atime and mtime.
 1671                  */
 1672                 if (vap->va_mtime.tv_sec == VNOVAL)
 1673                         vfs_timestamp(&vap->va_mtime);
 1674                 if (vap->va_atime.tv_sec == VNOVAL)
 1675                         vap->va_atime = vap->va_mtime;
 1676                 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred);
 1677                 if (error)
 1678                         vput(newvp);
 1679         }
 1680         if (!error) {
 1681                 if (cnp->cn_flags & MAKEENTRY)
 1682                         cache_enter(dvp, newvp, cnp);
 1683                 *ap->a_vpp = newvp;
 1684         }
 1685         mtx_lock(&(VTONFS(dvp))->n_mtx);
 1686         VTONFS(dvp)->n_flag |= NMODIFIED;
 1687         if (!wccflag) {
 1688                 VTONFS(dvp)->n_attrstamp = 0;
 1689                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1690         }
 1691         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 1692         return (error);
 1693 }
 1694 
 1695 /*
 1696  * nfs file remove call
 1697  * To try and make nfs semantics closer to ufs semantics, a file that has
 1698  * other processes using the vnode is renamed instead of removed and then
 1699  * removed later on the last close.
 1700  * - If v_usecount > 1
 1701  *        If a rename is not already in the works
 1702  *           call nfs_sillyrename() to set it up
 1703  *     else
 1704  *        do the remove rpc
 1705  */
 1706 static int
 1707 nfs_remove(struct vop_remove_args *ap)
 1708 {
 1709         struct vnode *vp = ap->a_vp;
 1710         struct vnode *dvp = ap->a_dvp;
 1711         struct componentname *cnp = ap->a_cnp;
 1712         struct nfsnode *np = VTONFS(vp);
 1713         int error = 0;
 1714         struct vattr vattr;
 1715 
 1716         KASSERT((cnp->cn_flags & HASBUF) != 0, ("nfs_remove: no name"));
 1717         KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount"));
 1718         if (vp->v_type == VDIR)
 1719                 error = EPERM;
 1720         else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
 1721             !VOP_GETATTR(vp, &vattr, cnp->cn_cred) && vattr.va_nlink > 1)) {
 1722                 /*
 1723                  * Purge the name cache so that the chance of a lookup for
 1724                  * the name succeeding while the remove is in progress is
 1725                  * minimized. Without node locking it can still happen, such
 1726                  * that an I/O op returns ESTALE, but since you get this if
 1727                  * another host removes the file..
 1728                  */
 1729                 cache_purge(vp);
 1730                 /*
 1731                  * throw away biocache buffers, mainly to avoid
 1732                  * unnecessary delayed writes later.
 1733                  */
 1734                 error = nfs_vinvalbuf(vp, 0, cnp->cn_thread, 1);
 1735                 /* Do the rpc */
 1736                 if (error != EINTR && error != EIO)
 1737                         error = nfs_removerpc(dvp, cnp->cn_nameptr,
 1738                                 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread);
 1739                 /*
 1740                  * Kludge City: If the first reply to the remove rpc is lost..
 1741                  *   the reply to the retransmitted request will be ENOENT
 1742                  *   since the file was in fact removed
 1743                  *   Therefore, we cheat and return success.
 1744                  */
 1745                 if (error == ENOENT)
 1746                         error = 0;
 1747         } else if (!np->n_sillyrename)
 1748                 error = nfs_sillyrename(dvp, vp, cnp);
 1749         mtx_lock(&np->n_mtx);
 1750         np->n_attrstamp = 0;
 1751         mtx_unlock(&np->n_mtx);
 1752         KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
 1753         return (error);
 1754 }
 1755 
 1756 /*
 1757  * nfs file remove rpc called from nfs_inactive
 1758  */
 1759 int
 1760 nfs_removeit(struct sillyrename *sp)
 1761 {
 1762         /*
 1763          * Make sure that the directory vnode is still valid.
 1764          * XXX we should lock sp->s_dvp here.
 1765          */
 1766         if (sp->s_dvp->v_type == VBAD)
 1767                 return (0);
 1768         return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
 1769                 NULL));
 1770 }
 1771 
 1772 /*
 1773  * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
 1774  */
 1775 static int
 1776 nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
 1777     struct ucred *cred, struct thread *td)
 1778 {
 1779         caddr_t bpos, dpos;
 1780         int error = 0, wccflag = NFSV3_WCCRATTR;
 1781         struct mbuf *mreq, *mrep, *md, *mb;
 1782         int v3 = NFS_ISV3(dvp);
 1783 
 1784         nfsstats.rpccnt[NFSPROC_REMOVE]++;
 1785         mreq = nfsm_reqhead(dvp, NFSPROC_REMOVE,
 1786                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
 1787         mb = mreq;
 1788         bpos = mtod(mb, caddr_t);
 1789         nfsm_fhtom(dvp, v3);
 1790         nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
 1791         nfsm_request(dvp, NFSPROC_REMOVE, td, cred);
 1792         if (v3)
 1793                 nfsm_wcc_data(dvp, wccflag);
 1794         m_freem(mrep);
 1795 nfsmout:
 1796         mtx_lock(&(VTONFS(dvp))->n_mtx);
 1797         VTONFS(dvp)->n_flag |= NMODIFIED;
 1798         if (!wccflag) {
 1799                 VTONFS(dvp)->n_attrstamp = 0;
 1800                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1801         }
 1802         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 1803         return (error);
 1804 }
 1805 
 1806 /*
 1807  * nfs file rename call
 1808  */
 1809 static int
 1810 nfs_rename(struct vop_rename_args *ap)
 1811 {
 1812         struct vnode *fvp = ap->a_fvp;
 1813         struct vnode *tvp = ap->a_tvp;
 1814         struct vnode *fdvp = ap->a_fdvp;
 1815         struct vnode *tdvp = ap->a_tdvp;
 1816         struct componentname *tcnp = ap->a_tcnp;
 1817         struct componentname *fcnp = ap->a_fcnp;
 1818         int error;
 1819 
 1820         KASSERT((tcnp->cn_flags & HASBUF) != 0 &&
 1821             (fcnp->cn_flags & HASBUF) != 0, ("nfs_rename: no name"));
 1822         /* Check for cross-device rename */
 1823         if ((fvp->v_mount != tdvp->v_mount) ||
 1824             (tvp && (fvp->v_mount != tvp->v_mount))) {
 1825                 error = EXDEV;
 1826                 goto out;
 1827         }
 1828 
 1829         if (fvp == tvp) {
 1830                 nfs_printf("nfs_rename: fvp == tvp (can't happen)\n");
 1831                 error = 0;
 1832                 goto out;
 1833         }
 1834         if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
 1835                 goto out;
 1836 
 1837         /*
 1838          * We have to flush B_DELWRI data prior to renaming
 1839          * the file.  If we don't, the delayed-write buffers
 1840          * can be flushed out later after the file has gone stale
 1841          * under NFSV3.  NFSV2 does not have this problem because
 1842          * ( as far as I can tell ) it flushes dirty buffers more
 1843          * often.
 1844          * 
 1845          * Skip the rename operation if the fsync fails, this can happen
 1846          * due to the server's volume being full, when we pushed out data
 1847          * that was written back to our cache earlier. Not checking for
 1848          * this condition can result in potential (silent) data loss.
 1849          */
 1850         error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread);
 1851         VOP_UNLOCK(fvp, 0);
 1852         if (!error && tvp)
 1853                 error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread);
 1854         if (error)
 1855                 goto out;
 1856 
 1857         /*
 1858          * If the tvp exists and is in use, sillyrename it before doing the
 1859          * rename of the new file over it.
 1860          * XXX Can't sillyrename a directory.
 1861          */
 1862         if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
 1863                 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
 1864                 vput(tvp);
 1865                 tvp = NULL;
 1866         }
 1867 
 1868         error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
 1869                 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
 1870                 tcnp->cn_thread);
 1871 
 1872         if (fvp->v_type == VDIR) {
 1873                 if (tvp != NULL && tvp->v_type == VDIR)
 1874                         cache_purge(tdvp);
 1875                 cache_purge(fdvp);
 1876         }
 1877 
 1878 out:
 1879         if (tdvp == tvp)
 1880                 vrele(tdvp);
 1881         else
 1882                 vput(tdvp);
 1883         if (tvp)
 1884                 vput(tvp);
 1885         vrele(fdvp);
 1886         vrele(fvp);
 1887         /*
 1888          * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
 1889          */
 1890         if (error == ENOENT)
 1891                 error = 0;
 1892         return (error);
 1893 }
 1894 
 1895 /*
 1896  * nfs file rename rpc called from nfs_remove() above
 1897  */
 1898 static int
 1899 nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
 1900     struct sillyrename *sp)
 1901 {
 1902 
 1903         return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, sdvp,
 1904             sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_thread));
 1905 }
 1906 
 1907 /*
 1908  * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
 1909  */
 1910 static int
 1911 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen,
 1912     struct vnode *tdvp, const char *tnameptr, int tnamelen, struct ucred *cred,
 1913     struct thread *td)
 1914 {
 1915         caddr_t bpos, dpos;
 1916         int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
 1917         struct mbuf *mreq, *mrep, *md, *mb;
 1918         int v3 = NFS_ISV3(fdvp);
 1919 
 1920         nfsstats.rpccnt[NFSPROC_RENAME]++;
 1921         mreq = nfsm_reqhead(fdvp, NFSPROC_RENAME,
 1922                 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
 1923                 nfsm_rndup(tnamelen));
 1924         mb = mreq;
 1925         bpos = mtod(mb, caddr_t);
 1926         nfsm_fhtom(fdvp, v3);
 1927         nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
 1928         nfsm_fhtom(tdvp, v3);
 1929         nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
 1930         nfsm_request(fdvp, NFSPROC_RENAME, td, cred);
 1931         if (v3) {
 1932                 nfsm_wcc_data(fdvp, fwccflag);
 1933                 nfsm_wcc_data(tdvp, twccflag);
 1934         }
 1935         m_freem(mrep);
 1936 nfsmout:
 1937         mtx_lock(&(VTONFS(fdvp))->n_mtx);
 1938         VTONFS(fdvp)->n_flag |= NMODIFIED;
 1939         mtx_unlock(&(VTONFS(fdvp))->n_mtx);
 1940         mtx_lock(&(VTONFS(tdvp))->n_mtx);
 1941         VTONFS(tdvp)->n_flag |= NMODIFIED;
 1942         mtx_unlock(&(VTONFS(tdvp))->n_mtx);
 1943         if (!fwccflag) {
 1944                 VTONFS(fdvp)->n_attrstamp = 0;
 1945                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp);
 1946         }
 1947         if (!twccflag) {
 1948                 VTONFS(tdvp)->n_attrstamp = 0;
 1949                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
 1950         }
 1951         return (error);
 1952 }
 1953 
 1954 /*
 1955  * nfs hard link create call
 1956  */
 1957 static int
 1958 nfs_link(struct vop_link_args *ap)
 1959 {
 1960         struct vnode *vp = ap->a_vp;
 1961         struct vnode *tdvp = ap->a_tdvp;
 1962         struct componentname *cnp = ap->a_cnp;
 1963         caddr_t bpos, dpos;
 1964         int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
 1965         struct mbuf *mreq, *mrep, *md, *mb;
 1966         int v3;
 1967 
 1968         if (vp->v_mount != tdvp->v_mount) {
 1969                 return (EXDEV);
 1970         }
 1971 
 1972         /*
 1973          * Push all writes to the server, so that the attribute cache
 1974          * doesn't get "out of sync" with the server.
 1975          * XXX There should be a better way!
 1976          */
 1977         VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread);
 1978 
 1979         v3 = NFS_ISV3(vp);
 1980         nfsstats.rpccnt[NFSPROC_LINK]++;
 1981         mreq = nfsm_reqhead(vp, NFSPROC_LINK,
 1982                 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
 1983         mb = mreq;
 1984         bpos = mtod(mb, caddr_t);
 1985         nfsm_fhtom(vp, v3);
 1986         nfsm_fhtom(tdvp, v3);
 1987         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 1988         nfsm_request(vp, NFSPROC_LINK, cnp->cn_thread, cnp->cn_cred);
 1989         if (v3) {
 1990                 nfsm_postop_attr(vp, attrflag);
 1991                 nfsm_wcc_data(tdvp, wccflag);
 1992         }
 1993         m_freem(mrep);
 1994 nfsmout:
 1995         mtx_lock(&(VTONFS(tdvp))->n_mtx);
 1996         VTONFS(tdvp)->n_flag |= NMODIFIED;
 1997         mtx_unlock(&(VTONFS(tdvp))->n_mtx);
 1998         if (!attrflag) {
 1999                 VTONFS(vp)->n_attrstamp = 0;
 2000                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
 2001         }
 2002         if (!wccflag) {
 2003                 VTONFS(tdvp)->n_attrstamp = 0;
 2004                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
 2005         }
 2006         return (error);
 2007 }
 2008 
 2009 /*
 2010  * nfs symbolic link create call
 2011  */
 2012 static int
 2013 nfs_symlink(struct vop_symlink_args *ap)
 2014 {
 2015         struct vnode *dvp = ap->a_dvp;
 2016         struct vattr *vap = ap->a_vap;
 2017         struct componentname *cnp = ap->a_cnp;
 2018         struct nfsv2_sattr *sp;
 2019         caddr_t bpos, dpos;
 2020         int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
 2021         struct mbuf *mreq, *mrep, *md, *mb;
 2022         struct vnode *newvp = NULL;
 2023         int v3 = NFS_ISV3(dvp);
 2024 
 2025         nfsstats.rpccnt[NFSPROC_SYMLINK]++;
 2026         slen = strlen(ap->a_target);
 2027         mreq = nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
 2028             nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
 2029         mb = mreq;
 2030         bpos = mtod(mb, caddr_t);
 2031         nfsm_fhtom(dvp, v3);
 2032         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 2033         if (v3) {
 2034                 nfsm_v3attrbuild(vap, FALSE);
 2035         }
 2036         nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
 2037         if (!v3) {
 2038                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 2039                 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
 2040                 sp->sa_uid = nfs_xdrneg1;
 2041                 sp->sa_gid = nfs_xdrneg1;
 2042                 sp->sa_size = nfs_xdrneg1;
 2043                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 2044                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 2045         }
 2046 
 2047         /*
 2048          * Issue the NFS request and get the rpc response.
 2049          *
 2050          * Only NFSv3 responses returning an error of 0 actually return
 2051          * a file handle that can be converted into newvp without having
 2052          * to do an extra lookup rpc.
 2053          */
 2054         nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_thread, cnp->cn_cred);
 2055         if (v3) {
 2056                 if (error == 0)
 2057                         nfsm_mtofh(dvp, newvp, v3, gotvp);
 2058                 nfsm_wcc_data(dvp, wccflag);
 2059         }
 2060 
 2061         /*
 2062          * out code jumps -> here, mrep is also freed.
 2063          */
 2064 
 2065         m_freem(mrep);
 2066 nfsmout:
 2067 
 2068         /*
 2069          * If we do not have an error and we could not extract the newvp from
 2070          * the response due to the request being NFSv2, we have to do a
 2071          * lookup in order to obtain a newvp to return.
 2072          */
 2073         if (error == 0 && newvp == NULL) {
 2074                 struct nfsnode *np = NULL;
 2075 
 2076                 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
 2077                     cnp->cn_cred, cnp->cn_thread, &np);
 2078                 if (!error)
 2079                         newvp = NFSTOV(np);
 2080         }
 2081         if (error) {
 2082                 if (newvp)
 2083                         vput(newvp);
 2084         } else {
 2085                 *ap->a_vpp = newvp;
 2086         }
 2087         mtx_lock(&(VTONFS(dvp))->n_mtx);
 2088         VTONFS(dvp)->n_flag |= NMODIFIED;
 2089         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 2090         if (!wccflag) {
 2091                 VTONFS(dvp)->n_attrstamp = 0;
 2092                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2093         }
 2094         return (error);
 2095 }
 2096 
 2097 /*
 2098  * nfs make dir call
 2099  */
 2100 static int
 2101 nfs_mkdir(struct vop_mkdir_args *ap)
 2102 {
 2103         struct vnode *dvp = ap->a_dvp;
 2104         struct vattr *vap = ap->a_vap;
 2105         struct componentname *cnp = ap->a_cnp;
 2106         struct nfsv2_sattr *sp;
 2107         int len;
 2108         struct nfsnode *np = NULL;
 2109         struct vnode *newvp = NULL;
 2110         caddr_t bpos, dpos;
 2111         int error = 0, wccflag = NFSV3_WCCRATTR;
 2112         int gotvp = 0;
 2113         struct mbuf *mreq, *mrep, *md, *mb;
 2114         struct vattr vattr;
 2115         int v3 = NFS_ISV3(dvp);
 2116 
 2117         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
 2118                 return (error);
 2119         len = cnp->cn_namelen;
 2120         nfsstats.rpccnt[NFSPROC_MKDIR]++;
 2121         mreq = nfsm_reqhead(dvp, NFSPROC_MKDIR,
 2122           NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
 2123         mb = mreq;
 2124         bpos = mtod(mb, caddr_t);
 2125         nfsm_fhtom(dvp, v3);
 2126         nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
 2127         if (v3) {
 2128                 nfsm_v3attrbuild(vap, FALSE);
 2129         } else {
 2130                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 2131                 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
 2132                 sp->sa_uid = nfs_xdrneg1;
 2133                 sp->sa_gid = nfs_xdrneg1;
 2134                 sp->sa_size = nfs_xdrneg1;
 2135                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 2136                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 2137         }
 2138         nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_thread, cnp->cn_cred);
 2139         if (!error)
 2140                 nfsm_mtofh(dvp, newvp, v3, gotvp);
 2141         if (v3)
 2142                 nfsm_wcc_data(dvp, wccflag);
 2143         m_freem(mrep);
 2144 nfsmout:
 2145         mtx_lock(&(VTONFS(dvp))->n_mtx);
 2146         VTONFS(dvp)->n_flag |= NMODIFIED;
 2147         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 2148         if (!wccflag) {
 2149                 VTONFS(dvp)->n_attrstamp = 0;
 2150                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2151         }
 2152         if (error == 0 && newvp == NULL) {
 2153                 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
 2154                         cnp->cn_thread, &np);
 2155                 if (!error) {
 2156                         newvp = NFSTOV(np);
 2157                         if (newvp->v_type != VDIR)
 2158                                 error = EEXIST;
 2159                 }
 2160         }
 2161         if (error) {
 2162                 if (newvp)
 2163                         vput(newvp);
 2164         } else
 2165                 *ap->a_vpp = newvp;
 2166         return (error);
 2167 }
 2168 
 2169 /*
 2170  * nfs remove directory call
 2171  */
 2172 static int
 2173 nfs_rmdir(struct vop_rmdir_args *ap)
 2174 {
 2175         struct vnode *vp = ap->a_vp;
 2176         struct vnode *dvp = ap->a_dvp;
 2177         struct componentname *cnp = ap->a_cnp;
 2178         caddr_t bpos, dpos;
 2179         int error = 0, wccflag = NFSV3_WCCRATTR;
 2180         struct mbuf *mreq, *mrep, *md, *mb;
 2181         int v3 = NFS_ISV3(dvp);
 2182 
 2183         if (dvp == vp)
 2184                 return (EINVAL);
 2185         nfsstats.rpccnt[NFSPROC_RMDIR]++;
 2186         mreq = nfsm_reqhead(dvp, NFSPROC_RMDIR,
 2187                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
 2188         mb = mreq;
 2189         bpos = mtod(mb, caddr_t);
 2190         nfsm_fhtom(dvp, v3);
 2191         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 2192         nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_thread, cnp->cn_cred);
 2193         if (v3)
 2194                 nfsm_wcc_data(dvp, wccflag);
 2195         m_freem(mrep);
 2196 nfsmout:
 2197         mtx_lock(&(VTONFS(dvp))->n_mtx);
 2198         VTONFS(dvp)->n_flag |= NMODIFIED;
 2199         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 2200         if (!wccflag) {
 2201                 VTONFS(dvp)->n_attrstamp = 0;
 2202                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2203         }
 2204         cache_purge(dvp);
 2205         cache_purge(vp);
 2206         /*
 2207          * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
 2208          */
 2209         if (error == ENOENT)
 2210                 error = 0;
 2211         return (error);
 2212 }
 2213 
 2214 /*
 2215  * nfs readdir call
 2216  */
 2217 static int
 2218 nfs_readdir(struct vop_readdir_args *ap)
 2219 {
 2220         struct vnode *vp = ap->a_vp;
 2221         struct nfsnode *np = VTONFS(vp);
 2222         struct uio *uio = ap->a_uio;
 2223         int tresid, error = 0;
 2224         struct vattr vattr;
 2225         
 2226         if (vp->v_type != VDIR) 
 2227                 return(EPERM);
 2228 
 2229         /*
 2230          * First, check for hit on the EOF offset cache
 2231          */
 2232         if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
 2233             (np->n_flag & NMODIFIED) == 0) {
 2234                 if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) {
 2235                         mtx_lock(&np->n_mtx);
 2236                         if (!NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
 2237                                 mtx_unlock(&np->n_mtx);
 2238                                 nfsstats.direofcache_hits++;
 2239                                 goto out;
 2240                         } else
 2241                                 mtx_unlock(&np->n_mtx);
 2242                 }
 2243         }
 2244 
 2245         /*
 2246          * Call nfs_bioread() to do the real work.
 2247          */
 2248         tresid = uio->uio_resid;
 2249         error = nfs_bioread(vp, uio, 0, ap->a_cred);
 2250 
 2251         if (!error && uio->uio_resid == tresid) {
 2252                 nfsstats.direofcache_misses++;
 2253         }
 2254 out:
 2255         return (error);
 2256 }
 2257 
 2258 /*
 2259  * Readdir rpc call.
 2260  * Called from below the buffer cache by nfs_doio().
 2261  */
 2262 int
 2263 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 2264 {
 2265         int len, left;
 2266         struct dirent *dp = NULL;
 2267         u_int32_t *tl;
 2268         caddr_t cp;
 2269         nfsuint64 *cookiep;
 2270         caddr_t bpos, dpos;
 2271         struct mbuf *mreq, *mrep, *md, *mb;
 2272         nfsuint64 cookie;
 2273         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2274         struct nfsnode *dnp = VTONFS(vp);
 2275         u_quad_t fileno;
 2276         int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
 2277         int attrflag;
 2278         int v3 = NFS_ISV3(vp);
 2279 
 2280         KASSERT(uiop->uio_iovcnt == 1 &&
 2281             (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
 2282             (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
 2283             ("nfs readdirrpc bad uio"));
 2284 
 2285         /*
 2286          * If there is no cookie, assume directory was stale.
 2287          */
 2288         nfs_dircookie_lock(dnp);
 2289         cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
 2290         if (cookiep) {
 2291                 cookie = *cookiep;
 2292                 nfs_dircookie_unlock(dnp);
 2293         } else {
 2294                 nfs_dircookie_unlock(dnp);              
 2295                 return (NFSERR_BAD_COOKIE);
 2296         }
 2297 
 2298         /*
 2299          * Loop around doing readdir rpc's of size nm_readdirsize
 2300          * truncated to a multiple of DIRBLKSIZ.
 2301          * The stopping criteria is EOF or buffer full.
 2302          */
 2303         while (more_dirs && bigenough) {
 2304                 nfsstats.rpccnt[NFSPROC_READDIR]++;
 2305                 mreq = nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
 2306                         NFSX_READDIR(v3));
 2307                 mb = mreq;
 2308                 bpos = mtod(mb, caddr_t);
 2309                 nfsm_fhtom(vp, v3);
 2310                 if (v3) {
 2311                         tl = nfsm_build(u_int32_t *, 5 * NFSX_UNSIGNED);
 2312                         *tl++ = cookie.nfsuquad[0];
 2313                         *tl++ = cookie.nfsuquad[1];
 2314                         mtx_lock(&dnp->n_mtx);
 2315                         *tl++ = dnp->n_cookieverf.nfsuquad[0];
 2316                         *tl++ = dnp->n_cookieverf.nfsuquad[1];
 2317                         mtx_unlock(&dnp->n_mtx);
 2318                 } else {
 2319                         tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
 2320                         *tl++ = cookie.nfsuquad[0];
 2321                 }
 2322                 *tl = txdr_unsigned(nmp->nm_readdirsize);
 2323                 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_td, cred);
 2324                 if (v3) {
 2325                         nfsm_postop_attr(vp, attrflag);
 2326                         if (!error) {
 2327                                 tl = nfsm_dissect(u_int32_t *,
 2328                                     2 * NFSX_UNSIGNED);
 2329                                 mtx_lock(&dnp->n_mtx);
 2330                                 dnp->n_cookieverf.nfsuquad[0] = *tl++;
 2331                                 dnp->n_cookieverf.nfsuquad[1] = *tl;
 2332                                 mtx_unlock(&dnp->n_mtx);
 2333                         } else {
 2334                                 m_freem(mrep);
 2335                                 goto nfsmout;
 2336                         }
 2337                 }
 2338                 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2339                 more_dirs = fxdr_unsigned(int, *tl);
 2340 
 2341                 /* loop thru the dir entries, doctoring them to 4bsd form */
 2342                 while (more_dirs && bigenough) {
 2343                         if (v3) {
 2344                                 tl = nfsm_dissect(u_int32_t *,
 2345                                     3 * NFSX_UNSIGNED);
 2346                                 fileno = fxdr_hyper(tl);
 2347                                 len = fxdr_unsigned(int, *(tl + 2));
 2348                         } else {
 2349                                 tl = nfsm_dissect(u_int32_t *,
 2350                                     2 * NFSX_UNSIGNED);
 2351                                 fileno = fxdr_unsigned(u_quad_t, *tl++);
 2352                                 len = fxdr_unsigned(int, *tl);
 2353                         }
 2354                         if (len <= 0 || len > NFS_MAXNAMLEN) {
 2355                                 error = EBADRPC;
 2356                                 m_freem(mrep);
 2357                                 goto nfsmout;
 2358                         }
 2359                         tlen = nfsm_rndup(len);
 2360                         if (tlen == len)
 2361                                 tlen += 4;      /* To ensure null termination */
 2362                         left = DIRBLKSIZ - blksiz;
 2363                         if ((tlen + DIRHDSIZ) > left) {
 2364                                 dp->d_reclen += left;
 2365                                 uiop->uio_iov->iov_base =
 2366                                     (char *)uiop->uio_iov->iov_base + left;
 2367                                 uiop->uio_iov->iov_len -= left;
 2368                                 uiop->uio_offset += left;
 2369                                 uiop->uio_resid -= left;
 2370                                 blksiz = 0;
 2371                         }
 2372                         if ((tlen + DIRHDSIZ) > uiop->uio_resid)
 2373                                 bigenough = 0;
 2374                         if (bigenough) {
 2375                                 dp = (struct dirent *)uiop->uio_iov->iov_base;
 2376                                 dp->d_fileno = (int)fileno;
 2377                                 dp->d_namlen = len;
 2378                                 dp->d_reclen = tlen + DIRHDSIZ;
 2379                                 dp->d_type = DT_UNKNOWN;
 2380                                 blksiz += dp->d_reclen;
 2381                                 if (blksiz == DIRBLKSIZ)
 2382                                         blksiz = 0;
 2383                                 uiop->uio_offset += DIRHDSIZ;
 2384                                 uiop->uio_resid -= DIRHDSIZ;
 2385                                 uiop->uio_iov->iov_base =
 2386                                     (char *)uiop->uio_iov->iov_base + DIRHDSIZ;
 2387                                 uiop->uio_iov->iov_len -= DIRHDSIZ;
 2388                                 nfsm_mtouio(uiop, len);
 2389                                 cp = uiop->uio_iov->iov_base;
 2390                                 tlen -= len;
 2391                                 *cp = '\0';     /* null terminate */
 2392                                 uiop->uio_iov->iov_base =
 2393                                     (char *)uiop->uio_iov->iov_base + tlen;
 2394                                 uiop->uio_iov->iov_len -= tlen;
 2395                                 uiop->uio_offset += tlen;
 2396                                 uiop->uio_resid -= tlen;
 2397                         } else
 2398                                 nfsm_adv(nfsm_rndup(len));
 2399                         if (v3) {
 2400                                 tl = nfsm_dissect(u_int32_t *,
 2401                                     3 * NFSX_UNSIGNED);
 2402                         } else {
 2403                                 tl = nfsm_dissect(u_int32_t *,
 2404                                     2 * NFSX_UNSIGNED);
 2405                         }
 2406                         if (bigenough) {
 2407                                 cookie.nfsuquad[0] = *tl++;
 2408                                 if (v3)
 2409                                         cookie.nfsuquad[1] = *tl++;
 2410                         } else if (v3)
 2411                                 tl += 2;
 2412                         else
 2413                                 tl++;
 2414                         more_dirs = fxdr_unsigned(int, *tl);
 2415                 }
 2416                 /*
 2417                  * If at end of rpc data, get the eof boolean
 2418                  */
 2419                 if (!more_dirs) {
 2420                         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2421                         more_dirs = (fxdr_unsigned(int, *tl) == 0);
 2422                 }
 2423                 m_freem(mrep);
 2424         }
 2425         /*
 2426          * Fill last record, iff any, out to a multiple of DIRBLKSIZ
 2427          * by increasing d_reclen for the last record.
 2428          */
 2429         if (blksiz > 0) {
 2430                 left = DIRBLKSIZ - blksiz;
 2431                 dp->d_reclen += left;
 2432                 uiop->uio_iov->iov_base =
 2433                     (char *)uiop->uio_iov->iov_base + left;
 2434                 uiop->uio_iov->iov_len -= left;
 2435                 uiop->uio_offset += left;
 2436                 uiop->uio_resid -= left;
 2437         }
 2438 
 2439         /*
 2440          * We are now either at the end of the directory or have filled the
 2441          * block.
 2442          */
 2443         if (bigenough)
 2444                 dnp->n_direofoffset = uiop->uio_offset;
 2445         else {
 2446                 if (uiop->uio_resid > 0)
 2447                         nfs_printf("EEK! readdirrpc resid > 0\n");
 2448                 nfs_dircookie_lock(dnp);
 2449                 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
 2450                 *cookiep = cookie;
 2451                 nfs_dircookie_unlock(dnp);
 2452         }
 2453 nfsmout:
 2454         return (error);
 2455 }
 2456 
 2457 /*
 2458  * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
 2459  */
 2460 int
 2461 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 2462 {
 2463         int len, left;
 2464         struct dirent *dp;
 2465         u_int32_t *tl;
 2466         caddr_t cp;
 2467         struct vnode *newvp;
 2468         nfsuint64 *cookiep;
 2469         caddr_t bpos, dpos, dpossav1, dpossav2;
 2470         struct mbuf *mreq, *mrep, *md, *mb, *mdsav1, *mdsav2;
 2471         struct nameidata nami, *ndp = &nami;
 2472         struct componentname *cnp = &ndp->ni_cnd;
 2473         nfsuint64 cookie;
 2474         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2475         struct nfsnode *dnp = VTONFS(vp), *np;
 2476         nfsfh_t *fhp;
 2477         u_quad_t fileno;
 2478         int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
 2479         int attrflag, fhsize;
 2480 
 2481 #ifndef nolint
 2482         dp = NULL;
 2483 #endif
 2484         KASSERT(uiop->uio_iovcnt == 1 &&
 2485             (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
 2486             (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
 2487             ("nfs readdirplusrpc bad uio"));
 2488         ndp->ni_dvp = vp;
 2489         newvp = NULLVP;
 2490 
 2491         /*
 2492          * If there is no cookie, assume directory was stale.
 2493          */
 2494         nfs_dircookie_lock(dnp);
 2495         cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
 2496         if (cookiep) {
 2497                 cookie = *cookiep;
 2498                 nfs_dircookie_unlock(dnp);
 2499         } else {
 2500                 nfs_dircookie_unlock(dnp);
 2501                 return (NFSERR_BAD_COOKIE);
 2502         }
 2503         /*
 2504          * Loop around doing readdir rpc's of size nm_readdirsize
 2505          * truncated to a multiple of DIRBLKSIZ.
 2506          * The stopping criteria is EOF or buffer full.
 2507          */
 2508         while (more_dirs && bigenough) {
 2509                 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
 2510                 mreq = nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
 2511                         NFSX_FH(1) + 6 * NFSX_UNSIGNED);
 2512                 mb = mreq;
 2513                 bpos = mtod(mb, caddr_t);
 2514                 nfsm_fhtom(vp, 1);
 2515                 tl = nfsm_build(u_int32_t *, 6 * NFSX_UNSIGNED);
 2516                 *tl++ = cookie.nfsuquad[0];
 2517                 *tl++ = cookie.nfsuquad[1];
 2518                 mtx_lock(&dnp->n_mtx);
 2519                 *tl++ = dnp->n_cookieverf.nfsuquad[0];
 2520                 *tl++ = dnp->n_cookieverf.nfsuquad[1];
 2521                 mtx_unlock(&dnp->n_mtx);
 2522                 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
 2523                 *tl = txdr_unsigned(nmp->nm_rsize);
 2524                 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_td, cred);
 2525                 nfsm_postop_attr(vp, attrflag);
 2526                 if (error) {
 2527                         m_freem(mrep);
 2528                         goto nfsmout;
 2529                 }
 2530                 tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 2531                 mtx_lock(&dnp->n_mtx);
 2532                 dnp->n_cookieverf.nfsuquad[0] = *tl++;
 2533                 dnp->n_cookieverf.nfsuquad[1] = *tl++;
 2534                 mtx_unlock(&dnp->n_mtx);
 2535                 more_dirs = fxdr_unsigned(int, *tl);
 2536 
 2537                 /* loop thru the dir entries, doctoring them to 4bsd form */
 2538                 while (more_dirs && bigenough) {
 2539                         tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 2540                         fileno = fxdr_hyper(tl);
 2541                         len = fxdr_unsigned(int, *(tl + 2));
 2542                         if (len <= 0 || len > NFS_MAXNAMLEN) {
 2543                                 error = EBADRPC;
 2544                                 m_freem(mrep);
 2545                                 goto nfsmout;
 2546                         }
 2547                         tlen = nfsm_rndup(len);
 2548                         if (tlen == len)
 2549                                 tlen += 4;      /* To ensure null termination*/
 2550                         left = DIRBLKSIZ - blksiz;
 2551                         if ((tlen + DIRHDSIZ) > left) {
 2552                                 dp->d_reclen += left;
 2553                                 uiop->uio_iov->iov_base =
 2554                                     (char *)uiop->uio_iov->iov_base + left;
 2555                                 uiop->uio_iov->iov_len -= left;
 2556                                 uiop->uio_offset += left;
 2557                                 uiop->uio_resid -= left;
 2558                                 blksiz = 0;
 2559                         }
 2560                         if ((tlen + DIRHDSIZ) > uiop->uio_resid)
 2561                                 bigenough = 0;
 2562                         if (bigenough) {
 2563                                 dp = (struct dirent *)uiop->uio_iov->iov_base;
 2564                                 dp->d_fileno = (int)fileno;
 2565                                 dp->d_namlen = len;
 2566                                 dp->d_reclen = tlen + DIRHDSIZ;
 2567                                 dp->d_type = DT_UNKNOWN;
 2568                                 blksiz += dp->d_reclen;
 2569                                 if (blksiz == DIRBLKSIZ)
 2570                                         blksiz = 0;
 2571                                 uiop->uio_offset += DIRHDSIZ;
 2572                                 uiop->uio_resid -= DIRHDSIZ;
 2573                                 uiop->uio_iov->iov_base =
 2574                                     (char *)uiop->uio_iov->iov_base + DIRHDSIZ;
 2575                                 uiop->uio_iov->iov_len -= DIRHDSIZ;
 2576                                 cnp->cn_nameptr = uiop->uio_iov->iov_base;
 2577                                 cnp->cn_namelen = len;
 2578                                 nfsm_mtouio(uiop, len);
 2579                                 cp = uiop->uio_iov->iov_base;
 2580                                 tlen -= len;
 2581                                 *cp = '\0';
 2582                                 uiop->uio_iov->iov_base =
 2583                                     (char *)uiop->uio_iov->iov_base + tlen;
 2584                                 uiop->uio_iov->iov_len -= tlen;
 2585                                 uiop->uio_offset += tlen;
 2586                                 uiop->uio_resid -= tlen;
 2587                         } else
 2588                                 nfsm_adv(nfsm_rndup(len));
 2589                         tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 2590                         if (bigenough) {
 2591                                 cookie.nfsuquad[0] = *tl++;
 2592                                 cookie.nfsuquad[1] = *tl++;
 2593                         } else
 2594                                 tl += 2;
 2595 
 2596                         /*
 2597                          * Since the attributes are before the file handle
 2598                          * (sigh), we must skip over the attributes and then
 2599                          * come back and get them.
 2600                          */
 2601                         attrflag = fxdr_unsigned(int, *tl);
 2602                         if (attrflag) {
 2603                             dpossav1 = dpos;
 2604                             mdsav1 = md;
 2605                             nfsm_adv(NFSX_V3FATTR);
 2606                             tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2607                             doit = fxdr_unsigned(int, *tl);
 2608                             /*
 2609                              * Skip loading the attrs for "..". There's a 
 2610                              * race between loading the attrs here and 
 2611                              * lookups that look for the directory currently
 2612                              * being read (in the parent). We try to acquire
 2613                              * the exclusive lock on ".." here, owning the 
 2614                              * lock on the directory being read. Lookup will
 2615                              * hold the lock on ".." and try to acquire the 
 2616                              * lock on the directory being read.
 2617                              * 
 2618                              * There are other ways of fixing this, one would
 2619                              * be to do a trylock on the ".." vnode and skip
 2620                              * loading the attrs on ".." if it happens to be 
 2621                              * locked by another process. But skipping the
 2622                              * attrload on ".." seems the easiest option.
 2623                              */
 2624                             if (strcmp(dp->d_name, "..") == 0) {
 2625                                     doit = 0;
 2626                                     /*
 2627                                      * We've already skipped over the attrs, 
 2628                                      * skip over the filehandle. And store d_type
 2629                                      * as VDIR.
 2630                                      */
 2631                                     tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2632                                     i = fxdr_unsigned(int, *tl);
 2633                                     nfsm_adv(nfsm_rndup(i));
 2634                                     dp->d_type = IFTODT(VTTOIF(VDIR));
 2635                             }       
 2636                             if (doit) {
 2637                                 nfsm_getfh(fhp, fhsize, 1);
 2638                                 if (NFS_CMPFH(dnp, fhp, fhsize)) {
 2639                                     VREF(vp);
 2640                                     newvp = vp;
 2641                                     np = dnp;
 2642                                 } else {
 2643                                     error = nfs_nget(vp->v_mount, fhp,
 2644                                         fhsize, &np, LK_EXCLUSIVE);
 2645                                     if (error)
 2646                                         doit = 0;
 2647                                     else
 2648                                         newvp = NFSTOV(np);
 2649                                 }
 2650                             }
 2651                             if (doit && bigenough) {
 2652                                 dpossav2 = dpos;
 2653                                 dpos = dpossav1;
 2654                                 mdsav2 = md;
 2655                                 md = mdsav1;
 2656                                 nfsm_loadattr(newvp, NULL);
 2657                                 dpos = dpossav2;
 2658                                 md = mdsav2;
 2659                                 dp->d_type =
 2660                                     IFTODT(VTTOIF(np->n_vattr.va_type));
 2661                                 ndp->ni_vp = newvp;
 2662                                 /*
 2663                                  * Update n_ctime so subsequent lookup
 2664                                  * doesn't purge entry.
 2665                                  */
 2666                                 np->n_ctime = np->n_vattr.va_ctime;
 2667                                 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
 2668                             }
 2669                         } else {
 2670                             /* Just skip over the file handle */
 2671                             tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2672                             i = fxdr_unsigned(int, *tl);
 2673                             if (i) {
 2674                                     tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2675                                     fhsize = fxdr_unsigned(int, *tl);
 2676                                     nfsm_adv(nfsm_rndup(fhsize));
 2677                             }
 2678                         }
 2679                         if (newvp != NULLVP) {
 2680                             if (newvp == vp)
 2681                                 vrele(newvp);
 2682                             else
 2683                                 vput(newvp);
 2684                             newvp = NULLVP;
 2685                         }
 2686                         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2687                         more_dirs = fxdr_unsigned(int, *tl);
 2688                 }
 2689                 /*
 2690                  * If at end of rpc data, get the eof boolean
 2691                  */
 2692                 if (!more_dirs) {
 2693                         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2694                         more_dirs = (fxdr_unsigned(int, *tl) == 0);
 2695                 }
 2696                 m_freem(mrep);
 2697         }
 2698         /*
 2699          * Fill last record, iff any, out to a multiple of DIRBLKSIZ
 2700          * by increasing d_reclen for the last record.
 2701          */
 2702         if (blksiz > 0) {
 2703                 left = DIRBLKSIZ - blksiz;
 2704                 dp->d_reclen += left;
 2705                 uiop->uio_iov->iov_base =
 2706                     (char *)uiop->uio_iov->iov_base + left;
 2707                 uiop->uio_iov->iov_len -= left;
 2708                 uiop->uio_offset += left;
 2709                 uiop->uio_resid -= left;
 2710         }
 2711 
 2712         /*
 2713          * We are now either at the end of the directory or have filled the
 2714          * block.
 2715          */
 2716         if (bigenough)
 2717                 dnp->n_direofoffset = uiop->uio_offset;
 2718         else {
 2719                 if (uiop->uio_resid > 0)
 2720                         nfs_printf("EEK! readdirplusrpc resid > 0\n");
 2721                 nfs_dircookie_lock(dnp);
 2722                 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
 2723                 *cookiep = cookie;
 2724                 nfs_dircookie_unlock(dnp);
 2725         }
 2726 nfsmout:
 2727         if (newvp != NULLVP) {
 2728                 if (newvp == vp)
 2729                         vrele(newvp);
 2730                 else
 2731                         vput(newvp);
 2732                 newvp = NULLVP;
 2733         }
 2734         return (error);
 2735 }
 2736 
 2737 /*
 2738  * Silly rename. To make the NFS filesystem that is stateless look a little
 2739  * more like the "ufs" a remove of an active vnode is translated to a rename
 2740  * to a funny looking filename that is removed by nfs_inactive on the
 2741  * nfsnode. There is the potential for another process on a different client
 2742  * to create the same funny name between the nfs_lookitup() fails and the
 2743  * nfs_rename() completes, but...
 2744  */
 2745 static int
 2746 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
 2747 {
 2748         struct sillyrename *sp;
 2749         struct nfsnode *np;
 2750         int error;
 2751         short pid;
 2752         unsigned int lticks;
 2753 
 2754         cache_purge(dvp);
 2755         np = VTONFS(vp);
 2756         KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir"));
 2757         sp = malloc(sizeof (struct sillyrename),
 2758                 M_NFSREQ, M_WAITOK);
 2759         sp->s_cred = crhold(cnp->cn_cred);
 2760         sp->s_dvp = dvp;
 2761         sp->s_removeit = nfs_removeit;
 2762         VREF(dvp);
 2763 
 2764         /* 
 2765          * Fudge together a funny name.
 2766          * Changing the format of the funny name to accomodate more 
 2767          * sillynames per directory.
 2768          * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is 
 2769          * CPU ticks since boot.
 2770          */
 2771         pid = cnp->cn_thread->td_proc->p_pid;
 2772         lticks = (unsigned int)ticks;
 2773         for ( ; ; ) {
 2774                 sp->s_namlen = sprintf(sp->s_name, 
 2775                                        ".nfs.%08x.%04x4.4", lticks, 
 2776                                        pid);
 2777                 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
 2778                                  cnp->cn_thread, NULL))
 2779                         break;
 2780                 lticks++;
 2781         }
 2782         error = nfs_renameit(dvp, cnp, sp);
 2783         if (error)
 2784                 goto bad;
 2785         error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
 2786                 cnp->cn_thread, &np);
 2787         np->n_sillyrename = sp;
 2788         return (0);
 2789 bad:
 2790         vrele(sp->s_dvp);
 2791         crfree(sp->s_cred);
 2792         free((caddr_t)sp, M_NFSREQ);
 2793         return (error);
 2794 }
 2795 
 2796 /*
 2797  * Look up a file name and optionally either update the file handle or
 2798  * allocate an nfsnode, depending on the value of npp.
 2799  * npp == NULL  --> just do the lookup
 2800  * *npp == NULL --> allocate a new nfsnode and make sure attributes are
 2801  *                      handled too
 2802  * *npp != NULL --> update the file handle in the vnode
 2803  */
 2804 static int
 2805 nfs_lookitup(struct vnode *dvp, const char *name, int len, struct ucred *cred,
 2806     struct thread *td, struct nfsnode **npp)
 2807 {
 2808         struct vnode *newvp = NULL;
 2809         struct nfsnode *np, *dnp = VTONFS(dvp);
 2810         caddr_t bpos, dpos;
 2811         int error = 0, fhlen, attrflag;
 2812         struct mbuf *mreq, *mrep, *md, *mb;
 2813         nfsfh_t *nfhp;
 2814         int v3 = NFS_ISV3(dvp);
 2815 
 2816         nfsstats.rpccnt[NFSPROC_LOOKUP]++;
 2817         mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
 2818                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
 2819         mb = mreq;
 2820         bpos = mtod(mb, caddr_t);
 2821         nfsm_fhtom(dvp, v3);
 2822         nfsm_strtom(name, len, NFS_MAXNAMLEN);
 2823         nfsm_request(dvp, NFSPROC_LOOKUP, td, cred);
 2824         if (npp && !error) {
 2825                 nfsm_getfh(nfhp, fhlen, v3);
 2826                 if (*npp) {
 2827                     np = *npp;
 2828                     if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
 2829                         free((caddr_t)np->n_fhp, M_NFSBIGFH);
 2830                         np->n_fhp = &np->n_fh;
 2831                     } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
 2832                         np->n_fhp =(nfsfh_t *)malloc(fhlen, M_NFSBIGFH, M_WAITOK);
 2833                     bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
 2834                     np->n_fhsize = fhlen;
 2835                     newvp = NFSTOV(np);
 2836                 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
 2837                     VREF(dvp);
 2838                     newvp = dvp;
 2839                 } else {
 2840                     error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np, LK_EXCLUSIVE);
 2841                     if (error) {
 2842                         m_freem(mrep);
 2843                         return (error);
 2844                     }
 2845                     newvp = NFSTOV(np);
 2846                 }
 2847                 if (v3) {
 2848                         nfsm_postop_attr(newvp, attrflag);
 2849                         if (!attrflag && *npp == NULL) {
 2850                                 m_freem(mrep);
 2851                                 if (newvp == dvp)
 2852                                         vrele(newvp);
 2853                                 else
 2854                                         vput(newvp);
 2855                                 return (ENOENT);
 2856                         }
 2857                 } else
 2858                         nfsm_loadattr(newvp, NULL);
 2859         }
 2860         m_freem(mrep);
 2861 nfsmout:
 2862         if (npp && *npp == NULL) {
 2863                 if (error) {
 2864                         if (newvp) {
 2865                                 if (newvp == dvp)
 2866                                         vrele(newvp);
 2867                                 else
 2868                                         vput(newvp);
 2869                         }
 2870                 } else
 2871                         *npp = np;
 2872         }
 2873         return (error);
 2874 }
 2875 
 2876 /*
 2877  * Nfs Version 3 commit rpc
 2878  */
 2879 int
 2880 nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
 2881            struct thread *td)
 2882 {
 2883         u_int32_t *tl;
 2884         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2885         caddr_t bpos, dpos;
 2886         int error = 0, wccflag = NFSV3_WCCRATTR;
 2887         struct mbuf *mreq, *mrep, *md, *mb;
 2888 
 2889         mtx_lock(&nmp->nm_mtx);
 2890         if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
 2891                 mtx_unlock(&nmp->nm_mtx);
 2892                 return (0);
 2893         }
 2894         mtx_unlock(&nmp->nm_mtx);
 2895         nfsstats.rpccnt[NFSPROC_COMMIT]++;
 2896         mreq = nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
 2897         mb = mreq;
 2898         bpos = mtod(mb, caddr_t);
 2899         nfsm_fhtom(vp, 1);
 2900         tl = nfsm_build(u_int32_t *, 3 * NFSX_UNSIGNED);
 2901         txdr_hyper(offset, tl);
 2902         tl += 2;
 2903         *tl = txdr_unsigned(cnt);
 2904         nfsm_request(vp, NFSPROC_COMMIT, td, cred);
 2905         nfsm_wcc_data(vp, wccflag);
 2906         if (!error) {
 2907                 tl = nfsm_dissect(u_int32_t *, NFSX_V3WRITEVERF);
 2908                 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
 2909                         NFSX_V3WRITEVERF)) {
 2910                         bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
 2911                                 NFSX_V3WRITEVERF);
 2912                         error = NFSERR_STALEWRITEVERF;
 2913                 }
 2914         }
 2915         m_freem(mrep);
 2916 nfsmout:
 2917         return (error);
 2918 }
 2919 
 2920 /*
 2921  * Strategy routine.
 2922  * For async requests when nfsiod(s) are running, queue the request by
 2923  * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
 2924  * request.
 2925  */
 2926 static int
 2927 nfs_strategy(struct vop_strategy_args *ap)
 2928 {
 2929         struct buf *bp = ap->a_bp;
 2930         struct ucred *cr;
 2931 
 2932         KASSERT(!(bp->b_flags & B_DONE),
 2933             ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
 2934         BUF_ASSERT_HELD(bp);
 2935 
 2936         if (bp->b_iocmd == BIO_READ)
 2937                 cr = bp->b_rcred;
 2938         else
 2939                 cr = bp->b_wcred;
 2940 
 2941         /*
 2942          * If the op is asynchronous and an i/o daemon is waiting
 2943          * queue the request, wake it up and wait for completion
 2944          * otherwise just do it ourselves.
 2945          */
 2946         if ((bp->b_flags & B_ASYNC) == 0 ||
 2947             nfs_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread))
 2948                 (void)nfs_doio(ap->a_vp, bp, cr, curthread);
 2949         return (0);
 2950 }
 2951 
 2952 /*
 2953  * fsync vnode op. Just call nfs_flush() with commit == 1.
 2954  */
 2955 /* ARGSUSED */
 2956 static int
 2957 nfs_fsync(struct vop_fsync_args *ap)
 2958 {
 2959 
 2960         return (nfs_flush(ap->a_vp, ap->a_waitfor, 1));
 2961 }
 2962 
 2963 /*
 2964  * Flush all the blocks associated with a vnode.
 2965  *      Walk through the buffer pool and push any dirty pages
 2966  *      associated with the vnode.
 2967  */
 2968 static int
 2969 nfs_flush(struct vnode *vp, int waitfor, int commit)
 2970 {
 2971         struct nfsnode *np = VTONFS(vp);
 2972         struct buf *bp;
 2973         int i;
 2974         struct buf *nbp;
 2975         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2976         int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
 2977         int passone = 1;
 2978         u_quad_t off, endoff, toff;
 2979         struct ucred* wcred = NULL;
 2980         struct buf **bvec = NULL;
 2981         struct bufobj *bo;
 2982         struct thread *td = curthread;
 2983 #ifndef NFS_COMMITBVECSIZ
 2984 #define NFS_COMMITBVECSIZ       20
 2985 #endif
 2986         struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
 2987         int bvecsize = 0, bveccount;
 2988 
 2989         if (nmp->nm_flag & NFSMNT_INT)
 2990                 slpflag = NFS_PCATCH;
 2991         if (!commit)
 2992                 passone = 0;
 2993         bo = &vp->v_bufobj;
 2994         /*
 2995          * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
 2996          * server, but has not been committed to stable storage on the server
 2997          * yet. On the first pass, the byte range is worked out and the commit
 2998          * rpc is done. On the second pass, nfs_writebp() is called to do the
 2999          * job.
 3000          */
 3001 again:
 3002         off = (u_quad_t)-1;
 3003         endoff = 0;
 3004         bvecpos = 0;
 3005         if (NFS_ISV3(vp) && commit) {
 3006                 if (bvec != NULL && bvec != bvec_on_stack)
 3007                         free(bvec, M_TEMP);
 3008                 /*
 3009                  * Count up how many buffers waiting for a commit.
 3010                  */
 3011                 bveccount = 0;
 3012                 BO_LOCK(bo);
 3013                 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 3014                         if (!BUF_ISLOCKED(bp) &&
 3015                             (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
 3016                                 == (B_DELWRI | B_NEEDCOMMIT))
 3017                                 bveccount++;
 3018                 }
 3019                 /*
 3020                  * Allocate space to remember the list of bufs to commit.  It is
 3021                  * important to use M_NOWAIT here to avoid a race with nfs_write.
 3022                  * If we can't get memory (for whatever reason), we will end up
 3023                  * committing the buffers one-by-one in the loop below.
 3024                  */
 3025                 if (bveccount > NFS_COMMITBVECSIZ) {
 3026                         /*
 3027                          * Release the vnode interlock to avoid a lock
 3028                          * order reversal.
 3029                          */
 3030                         BO_UNLOCK(bo);
 3031                         bvec = (struct buf **)
 3032                                 malloc(bveccount * sizeof(struct buf *),
 3033                                        M_TEMP, M_NOWAIT);
 3034                         BO_LOCK(bo);
 3035                         if (bvec == NULL) {
 3036                                 bvec = bvec_on_stack;
 3037                                 bvecsize = NFS_COMMITBVECSIZ;
 3038                         } else
 3039                                 bvecsize = bveccount;
 3040                 } else {
 3041                         bvec = bvec_on_stack;
 3042                         bvecsize = NFS_COMMITBVECSIZ;
 3043                 }
 3044                 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 3045                         if (bvecpos >= bvecsize)
 3046                                 break;
 3047                         if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
 3048                                 nbp = TAILQ_NEXT(bp, b_bobufs);
 3049                                 continue;
 3050                         }
 3051                         if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
 3052                             (B_DELWRI | B_NEEDCOMMIT)) {
 3053                                 BUF_UNLOCK(bp);
 3054                                 nbp = TAILQ_NEXT(bp, b_bobufs);
 3055                                 continue;
 3056                         }
 3057                         BO_UNLOCK(bo);
 3058                         bremfree(bp);
 3059                         /*
 3060                          * Work out if all buffers are using the same cred
 3061                          * so we can deal with them all with one commit.
 3062                          *
 3063                          * NOTE: we are not clearing B_DONE here, so we have
 3064                          * to do it later on in this routine if we intend to
 3065                          * initiate I/O on the bp.
 3066                          *
 3067                          * Note: to avoid loopback deadlocks, we do not
 3068                          * assign b_runningbufspace.
 3069                          */
 3070                         if (wcred == NULL)
 3071                                 wcred = bp->b_wcred;
 3072                         else if (wcred != bp->b_wcred)
 3073                                 wcred = NOCRED;
 3074                         vfs_busy_pages(bp, 1);
 3075 
 3076                         BO_LOCK(bo);
 3077                         /*
 3078                          * bp is protected by being locked, but nbp is not
 3079                          * and vfs_busy_pages() may sleep.  We have to
 3080                          * recalculate nbp.
 3081                          */
 3082                         nbp = TAILQ_NEXT(bp, b_bobufs);
 3083 
 3084                         /*
 3085                          * A list of these buffers is kept so that the
 3086                          * second loop knows which buffers have actually
 3087                          * been committed. This is necessary, since there
 3088                          * may be a race between the commit rpc and new
 3089                          * uncommitted writes on the file.
 3090                          */
 3091                         bvec[bvecpos++] = bp;
 3092                         toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
 3093                                 bp->b_dirtyoff;
 3094                         if (toff < off)
 3095                                 off = toff;
 3096                         toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
 3097                         if (toff > endoff)
 3098                                 endoff = toff;
 3099                 }
 3100                 BO_UNLOCK(bo);
 3101         }
 3102         if (bvecpos > 0) {
 3103                 /*
 3104                  * Commit data on the server, as required.
 3105                  * If all bufs are using the same wcred, then use that with
 3106                  * one call for all of them, otherwise commit each one
 3107                  * separately.
 3108                  */
 3109                 if (wcred != NOCRED)
 3110                         retv = nfs_commit(vp, off, (int)(endoff - off),
 3111                                           wcred, td);
 3112                 else {
 3113                         retv = 0;
 3114                         for (i = 0; i < bvecpos; i++) {
 3115                                 off_t off, size;
 3116                                 bp = bvec[i];
 3117                                 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
 3118                                         bp->b_dirtyoff;
 3119                                 size = (u_quad_t)(bp->b_dirtyend
 3120                                                   - bp->b_dirtyoff);
 3121                                 retv = nfs_commit(vp, off, (int)size,
 3122                                                   bp->b_wcred, td);
 3123                                 if (retv) break;
 3124                         }
 3125                 }
 3126 
 3127                 if (retv == NFSERR_STALEWRITEVERF)
 3128                         nfs_clearcommit(vp->v_mount);
 3129 
 3130                 /*
 3131                  * Now, either mark the blocks I/O done or mark the
 3132                  * blocks dirty, depending on whether the commit
 3133                  * succeeded.
 3134                  */
 3135                 for (i = 0; i < bvecpos; i++) {
 3136                         bp = bvec[i];
 3137                         bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
 3138                         if (retv) {
 3139                                 /*
 3140                                  * Error, leave B_DELWRI intact
 3141                                  */
 3142                                 vfs_unbusy_pages(bp);
 3143                                 brelse(bp);
 3144                         } else {
 3145                                 /*
 3146                                  * Success, remove B_DELWRI ( bundirty() ).
 3147                                  *
 3148                                  * b_dirtyoff/b_dirtyend seem to be NFS
 3149                                  * specific.  We should probably move that
 3150                                  * into bundirty(). XXX
 3151                                  */
 3152                                 bufobj_wref(bo);
 3153                                 bp->b_flags |= B_ASYNC;
 3154                                 bundirty(bp);
 3155                                 bp->b_flags &= ~B_DONE;
 3156                                 bp->b_ioflags &= ~BIO_ERROR;
 3157                                 bp->b_dirtyoff = bp->b_dirtyend = 0;
 3158                                 bufdone(bp);
 3159                         }
 3160                 }
 3161         }
 3162 
 3163         /*
 3164          * Start/do any write(s) that are required.
 3165          */
 3166 loop:
 3167         BO_LOCK(bo);
 3168         TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 3169                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
 3170                         if (waitfor != MNT_WAIT || passone)
 3171                                 continue;
 3172 
 3173                         error = BUF_TIMELOCK(bp,
 3174                             LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
 3175                             BO_MTX(bo), "nfsfsync", slpflag, slptimeo);
 3176                         if (error == 0) {
 3177                                 BUF_UNLOCK(bp);
 3178                                 goto loop;
 3179                         }
 3180                         if (error == ENOLCK) {
 3181                                 error = 0;
 3182                                 goto loop;
 3183                         }
 3184                         if (nfs_sigintr(nmp, td)) {
 3185                                 error = EINTR;
 3186                                 goto done;
 3187                         }
 3188                         if (slpflag & PCATCH) {
 3189                                 slpflag = 0;
 3190                                 slptimeo = 2 * hz;
 3191                         }
 3192                         goto loop;
 3193                 }
 3194                 if ((bp->b_flags & B_DELWRI) == 0)
 3195                         panic("nfs_fsync: not dirty");
 3196                 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
 3197                         BUF_UNLOCK(bp);
 3198                         continue;
 3199                 }
 3200                 BO_UNLOCK(bo);
 3201                 bremfree(bp);
 3202                 if (passone || !commit)
 3203                     bp->b_flags |= B_ASYNC;
 3204                 else
 3205                     bp->b_flags |= B_ASYNC;
 3206                 bwrite(bp);
 3207                 if (nfs_sigintr(nmp, td)) {
 3208                         error = EINTR;
 3209                         goto done;
 3210                 }
 3211                 goto loop;
 3212         }
 3213         if (passone) {
 3214                 passone = 0;
 3215                 BO_UNLOCK(bo);
 3216                 goto again;
 3217         }
 3218         if (waitfor == MNT_WAIT) {
 3219                 while (bo->bo_numoutput) {
 3220                         error = bufobj_wwait(bo, slpflag, slptimeo);
 3221                         if (error) {
 3222                             BO_UNLOCK(bo);
 3223                             error = nfs_sigintr(nmp, td);
 3224                             if (error)
 3225                                 goto done;
 3226                             if (slpflag & PCATCH) {
 3227                                 slpflag = 0;
 3228                                 slptimeo = 2 * hz;
 3229                             }
 3230                             BO_LOCK(bo);
 3231                         }
 3232                 }
 3233                 if (bo->bo_dirty.bv_cnt != 0 && commit) {
 3234                         BO_UNLOCK(bo);
 3235                         goto loop;
 3236                 }
 3237                 /*
 3238                  * Wait for all the async IO requests to drain
 3239                  */
 3240                 BO_UNLOCK(bo);
 3241                 mtx_lock(&np->n_mtx);
 3242                 while (np->n_directio_asyncwr > 0) {
 3243                         np->n_flag |= NFSYNCWAIT;
 3244                         error = nfs_msleep(td, (caddr_t)&np->n_directio_asyncwr,
 3245                                            &np->n_mtx, slpflag | (PRIBIO + 1), 
 3246                                            "nfsfsync", 0);
 3247                         if (error) {
 3248                                 if (nfs_sigintr(nmp, td)) {
 3249                                         mtx_unlock(&np->n_mtx);
 3250                                         error = EINTR;  
 3251                                         goto done;
 3252                                 }
 3253                         }
 3254                 }
 3255                 mtx_unlock(&np->n_mtx);
 3256         } else
 3257                 BO_UNLOCK(bo);
 3258         mtx_lock(&np->n_mtx);
 3259         if (np->n_flag & NWRITEERR) {
 3260                 error = np->n_error;
 3261                 np->n_flag &= ~NWRITEERR;
 3262         }
 3263         if (commit && bo->bo_dirty.bv_cnt == 0 &&
 3264             bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0)
 3265                 np->n_flag &= ~NMODIFIED;
 3266         mtx_unlock(&np->n_mtx);
 3267 done:
 3268         if (bvec != NULL && bvec != bvec_on_stack)
 3269                 free(bvec, M_TEMP);
 3270         return (error);
 3271 }
 3272 
 3273 /*
 3274  * NFS advisory byte-level locks.
 3275  */
 3276 static int
 3277 nfs_advlock(struct vop_advlock_args *ap)
 3278 {
 3279         struct vnode *vp = ap->a_vp;
 3280         u_quad_t size;
 3281         int error;
 3282 
 3283         error = vn_lock(vp, LK_SHARED);
 3284         if (error)
 3285                 return (error);
 3286         if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
 3287                 size = VTONFS(vp)->n_size;
 3288                 VOP_UNLOCK(vp, 0);
 3289                 error = lf_advlock(ap, &(vp->v_lockf), size);
 3290         } else {
 3291                 if (nfs_advlock_p)
 3292                         error = nfs_advlock_p(ap);
 3293                 else
 3294                         error = ENOLCK;
 3295         }
 3296 
 3297         return (error);
 3298 }
 3299 
 3300 /*
 3301  * NFS advisory byte-level locks.
 3302  */
 3303 static int
 3304 nfs_advlockasync(struct vop_advlockasync_args *ap)
 3305 {
 3306         struct vnode *vp = ap->a_vp;
 3307         u_quad_t size;
 3308         int error;
 3309         
 3310         error = vn_lock(vp, LK_SHARED);
 3311         if (error)
 3312                 return (error);
 3313         if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
 3314                 size = VTONFS(vp)->n_size;
 3315                 VOP_UNLOCK(vp, 0);
 3316                 error = lf_advlockasync(ap, &(vp->v_lockf), size);
 3317         } else {
 3318                 VOP_UNLOCK(vp, 0);
 3319                 error = EOPNOTSUPP;
 3320         }
 3321         return (error);
 3322 }
 3323 
 3324 /*
 3325  * Print out the contents of an nfsnode.
 3326  */
 3327 static int
 3328 nfs_print(struct vop_print_args *ap)
 3329 {
 3330         struct vnode *vp = ap->a_vp;
 3331         struct nfsnode *np = VTONFS(vp);
 3332 
 3333         nfs_printf("\tfileid %ld fsid 0x%x",
 3334            np->n_vattr.va_fileid, np->n_vattr.va_fsid);
 3335         if (vp->v_type == VFIFO)
 3336                 fifo_printinfo(vp);
 3337         printf("\n");
 3338         return (0);
 3339 }
 3340 
 3341 /*
 3342  * This is the "real" nfs::bwrite(struct buf*).
 3343  * We set B_CACHE if this is a VMIO buffer.
 3344  */
 3345 int
 3346 nfs_writebp(struct buf *bp, int force __unused, struct thread *td)
 3347 {
 3348         int s;
 3349         int oldflags = bp->b_flags;
 3350 #if 0
 3351         int retv = 1;
 3352         off_t off;
 3353 #endif
 3354 
 3355         BUF_ASSERT_HELD(bp);
 3356 
 3357         if (bp->b_flags & B_INVAL) {
 3358                 brelse(bp);
 3359                 return(0);
 3360         }
 3361 
 3362         bp->b_flags |= B_CACHE;
 3363 
 3364         /*
 3365          * Undirty the bp.  We will redirty it later if the I/O fails.
 3366          */
 3367 
 3368         s = splbio();
 3369         bundirty(bp);
 3370         bp->b_flags &= ~B_DONE;
 3371         bp->b_ioflags &= ~BIO_ERROR;
 3372         bp->b_iocmd = BIO_WRITE;
 3373 
 3374         bufobj_wref(bp->b_bufobj);
 3375         curthread->td_ru.ru_oublock++;
 3376         splx(s);
 3377 
 3378         /*
 3379          * Note: to avoid loopback deadlocks, we do not
 3380          * assign b_runningbufspace.
 3381          */
 3382         vfs_busy_pages(bp, 1);
 3383 
 3384         BUF_KERNPROC(bp);
 3385         bp->b_iooffset = dbtob(bp->b_blkno);
 3386         bstrategy(bp);
 3387 
 3388         if( (oldflags & B_ASYNC) == 0) {
 3389                 int rtval = bufwait(bp);
 3390 
 3391                 if (oldflags & B_DELWRI) {
 3392                         s = splbio();
 3393                         reassignbuf(bp);
 3394                         splx(s);
 3395                 }
 3396                 brelse(bp);
 3397                 return (rtval);
 3398         }
 3399 
 3400         return (0);
 3401 }
 3402 
 3403 /*
 3404  * nfs special file access vnode op.
 3405  * Essentially just get vattr and then imitate iaccess() since the device is
 3406  * local to the client.
 3407  */
 3408 static int
 3409 nfsspec_access(struct vop_access_args *ap)
 3410 {
 3411         struct vattr *vap;
 3412         struct ucred *cred = ap->a_cred;
 3413         struct vnode *vp = ap->a_vp;
 3414         accmode_t accmode = ap->a_accmode;
 3415         struct vattr vattr;
 3416         int error;
 3417 
 3418         /*
 3419          * Disallow write attempts on filesystems mounted read-only;
 3420          * unless the file is a socket, fifo, or a block or character
 3421          * device resident on the filesystem.
 3422          */
 3423         if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
 3424                 switch (vp->v_type) {
 3425                 case VREG:
 3426                 case VDIR:
 3427                 case VLNK:
 3428                         return (EROFS);
 3429                 default:
 3430                         break;
 3431                 }
 3432         }
 3433         vap = &vattr;
 3434         error = VOP_GETATTR(vp, vap, cred);
 3435         if (error)
 3436                 goto out;
 3437         error  = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid,
 3438                          accmode, cred, NULL);
 3439 out:
 3440         return error;
 3441 }
 3442 
 3443 /*
 3444  * Read wrapper for fifos.
 3445  */
 3446 static int
 3447 nfsfifo_read(struct vop_read_args *ap)
 3448 {
 3449         struct nfsnode *np = VTONFS(ap->a_vp);
 3450         int error;
 3451 
 3452         /*
 3453          * Set access flag.
 3454          */
 3455         mtx_lock(&np->n_mtx);
 3456         np->n_flag |= NACC;
 3457         getnanotime(&np->n_atim);
 3458         mtx_unlock(&np->n_mtx);
 3459         error = fifo_specops.vop_read(ap);
 3460         return error;   
 3461 }
 3462 
 3463 /*
 3464  * Write wrapper for fifos.
 3465  */
 3466 static int
 3467 nfsfifo_write(struct vop_write_args *ap)
 3468 {
 3469         struct nfsnode *np = VTONFS(ap->a_vp);
 3470 
 3471         /*
 3472          * Set update flag.
 3473          */
 3474         mtx_lock(&np->n_mtx);
 3475         np->n_flag |= NUPD;
 3476         getnanotime(&np->n_mtim);
 3477         mtx_unlock(&np->n_mtx);
 3478         return(fifo_specops.vop_write(ap));
 3479 }
 3480 
 3481 /*
 3482  * Close wrapper for fifos.
 3483  *
 3484  * Update the times on the nfsnode then do fifo close.
 3485  */
 3486 static int
 3487 nfsfifo_close(struct vop_close_args *ap)
 3488 {
 3489         struct vnode *vp = ap->a_vp;
 3490         struct nfsnode *np = VTONFS(vp);
 3491         struct vattr vattr;
 3492         struct timespec ts;
 3493 
 3494         mtx_lock(&np->n_mtx);
 3495         if (np->n_flag & (NACC | NUPD)) {
 3496                 getnanotime(&ts);
 3497                 if (np->n_flag & NACC)
 3498                         np->n_atim = ts;
 3499                 if (np->n_flag & NUPD)
 3500                         np->n_mtim = ts;
 3501                 np->n_flag |= NCHG;
 3502                 if (vrefcnt(vp) == 1 &&
 3503                     (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
 3504                         VATTR_NULL(&vattr);
 3505                         if (np->n_flag & NACC)
 3506                                 vattr.va_atime = np->n_atim;
 3507                         if (np->n_flag & NUPD)
 3508                                 vattr.va_mtime = np->n_mtim;
 3509                         mtx_unlock(&np->n_mtx);
 3510                         (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
 3511                         goto out;
 3512                 }
 3513         }
 3514         mtx_unlock(&np->n_mtx);
 3515 out:
 3516         return (fifo_specops.vop_close(ap));
 3517 }
 3518 
 3519 /*
 3520  * Just call nfs_writebp() with the force argument set to 1.
 3521  *
 3522  * NOTE: B_DONE may or may not be set in a_bp on call.
 3523  */
 3524 static int
 3525 nfs_bwrite(struct buf *bp)
 3526 {
 3527 
 3528         return (nfs_writebp(bp, 1, curthread));
 3529 }
 3530 
 3531 struct buf_ops buf_ops_nfs = {
 3532         .bop_name       =       "buf_ops_nfs",
 3533         .bop_write      =       nfs_bwrite,
 3534         .bop_strategy   =       bufstrategy,
 3535         .bop_sync       =       bufsync,
 3536         .bop_bdflush    =       bufbdflush,
 3537 };

Cache object: 3e89bf10c952d53f2cec53104a73757a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.