The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/nfsclient/nfs_clvnops.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1989, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  *
    7  * This code is derived from software contributed to Berkeley by
    8  * Rick Macklem at The University of Guelph.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      from nfs_vnops.c        8.16 (Berkeley) 5/27/95
   35  */
   36 
   37 #include <sys/cdefs.h>
   38 __FBSDID("$FreeBSD$");
   39 
   40 /*
   41  * vnode op calls for Sun NFS version 2, 3 and 4
   42  */
   43 
   44 #include "opt_inet.h"
   45 
   46 #include <sys/param.h>
   47 #include <sys/kernel.h>
   48 #include <sys/systm.h>
   49 #include <sys/resourcevar.h>
   50 #include <sys/proc.h>
   51 #include <sys/mount.h>
   52 #include <sys/bio.h>
   53 #include <sys/buf.h>
   54 #include <sys/extattr.h>
   55 #include <sys/filio.h>
   56 #include <sys/jail.h>
   57 #include <sys/malloc.h>
   58 #include <sys/mbuf.h>
   59 #include <sys/namei.h>
   60 #include <sys/socket.h>
   61 #include <sys/vnode.h>
   62 #include <sys/dirent.h>
   63 #include <sys/fcntl.h>
   64 #include <sys/lockf.h>
   65 #include <sys/stat.h>
   66 #include <sys/sysctl.h>
   67 #include <sys/signalvar.h>
   68 
   69 #include <vm/vm.h>
   70 #include <vm/vm_extern.h>
   71 #include <vm/vm_object.h>
   72 
   73 #include <fs/nfs/nfsport.h>
   74 #include <fs/nfsclient/nfsnode.h>
   75 #include <fs/nfsclient/nfsmount.h>
   76 #include <fs/nfsclient/nfs.h>
   77 #include <fs/nfsclient/nfs_kdtrace.h>
   78 
   79 #include <net/if.h>
   80 #include <netinet/in.h>
   81 #include <netinet/in_var.h>
   82 
   83 #include <nfs/nfs_lock.h>
   84 
   85 #ifdef KDTRACE_HOOKS
   86 #include <sys/dtrace_bsd.h>
   87 
   88 dtrace_nfsclient_accesscache_flush_probe_func_t
   89                 dtrace_nfscl_accesscache_flush_done_probe;
   90 uint32_t        nfscl_accesscache_flush_done_id;
   91 
   92 dtrace_nfsclient_accesscache_get_probe_func_t
   93                 dtrace_nfscl_accesscache_get_hit_probe,
   94                 dtrace_nfscl_accesscache_get_miss_probe;
   95 uint32_t        nfscl_accesscache_get_hit_id;
   96 uint32_t        nfscl_accesscache_get_miss_id;
   97 
   98 dtrace_nfsclient_accesscache_load_probe_func_t
   99                 dtrace_nfscl_accesscache_load_done_probe;
  100 uint32_t        nfscl_accesscache_load_done_id;
  101 #endif /* !KDTRACE_HOOKS */
  102 
  103 /* Defs */
  104 #define TRUE    1
  105 #define FALSE   0
  106 
  107 extern struct nfsstatsv1 nfsstatsv1;
  108 extern int nfsrv_useacl;
  109 extern int nfscl_debuglevel;
  110 MALLOC_DECLARE(M_NEWNFSREQ);
  111 
  112 static vop_read_t       nfsfifo_read;
  113 static vop_write_t      nfsfifo_write;
  114 static vop_close_t      nfsfifo_close;
  115 static int      nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *,
  116                     struct thread *);
  117 static vop_lookup_t     nfs_lookup;
  118 static vop_create_t     nfs_create;
  119 static vop_mknod_t      nfs_mknod;
  120 static vop_open_t       nfs_open;
  121 static vop_pathconf_t   nfs_pathconf;
  122 static vop_close_t      nfs_close;
  123 static vop_access_t     nfs_access;
  124 static vop_getattr_t    nfs_getattr;
  125 static vop_setattr_t    nfs_setattr;
  126 static vop_read_t       nfs_read;
  127 static vop_fsync_t      nfs_fsync;
  128 static vop_remove_t     nfs_remove;
  129 static vop_link_t       nfs_link;
  130 static vop_rename_t     nfs_rename;
  131 static vop_mkdir_t      nfs_mkdir;
  132 static vop_rmdir_t      nfs_rmdir;
  133 static vop_symlink_t    nfs_symlink;
  134 static vop_readdir_t    nfs_readdir;
  135 static vop_strategy_t   nfs_strategy;
  136 static  int     nfs_lookitup(struct vnode *, char *, int,
  137                     struct ucred *, struct thread *, struct nfsnode **);
  138 static  int     nfs_sillyrename(struct vnode *, struct vnode *,
  139                     struct componentname *);
  140 static vop_access_t     nfsspec_access;
  141 static vop_readlink_t   nfs_readlink;
  142 static vop_print_t      nfs_print;
  143 static vop_advlock_t    nfs_advlock;
  144 static vop_advlockasync_t nfs_advlockasync;
  145 static vop_getacl_t nfs_getacl;
  146 static vop_setacl_t nfs_setacl;
  147 static vop_advise_t nfs_advise;
  148 static vop_allocate_t nfs_allocate;
  149 static vop_deallocate_t nfs_deallocate;
  150 static vop_copy_file_range_t nfs_copy_file_range;
  151 static vop_ioctl_t nfs_ioctl;
  152 static vop_getextattr_t nfs_getextattr;
  153 static vop_setextattr_t nfs_setextattr;
  154 static vop_listextattr_t nfs_listextattr;
  155 static vop_deleteextattr_t nfs_deleteextattr;
  156 static vop_lock1_t      nfs_lock;
  157 
  158 /*
  159  * Global vfs data structures for nfs
  160  */
  161 
  162 static struct vop_vector newnfs_vnodeops_nosig = {
  163         .vop_default =          &default_vnodeops,
  164         .vop_access =           nfs_access,
  165         .vop_advlock =          nfs_advlock,
  166         .vop_advlockasync =     nfs_advlockasync,
  167         .vop_close =            nfs_close,
  168         .vop_create =           nfs_create,
  169         .vop_fsync =            nfs_fsync,
  170         .vop_getattr =          nfs_getattr,
  171         .vop_getpages =         ncl_getpages,
  172         .vop_putpages =         ncl_putpages,
  173         .vop_inactive =         ncl_inactive,
  174         .vop_link =             nfs_link,
  175         .vop_lock1 =            nfs_lock,
  176         .vop_lookup =           nfs_lookup,
  177         .vop_mkdir =            nfs_mkdir,
  178         .vop_mknod =            nfs_mknod,
  179         .vop_open =             nfs_open,
  180         .vop_pathconf =         nfs_pathconf,
  181         .vop_print =            nfs_print,
  182         .vop_read =             nfs_read,
  183         .vop_readdir =          nfs_readdir,
  184         .vop_readlink =         nfs_readlink,
  185         .vop_reclaim =          ncl_reclaim,
  186         .vop_remove =           nfs_remove,
  187         .vop_rename =           nfs_rename,
  188         .vop_rmdir =            nfs_rmdir,
  189         .vop_setattr =          nfs_setattr,
  190         .vop_strategy =         nfs_strategy,
  191         .vop_symlink =          nfs_symlink,
  192         .vop_write =            ncl_write,
  193         .vop_getacl =           nfs_getacl,
  194         .vop_setacl =           nfs_setacl,
  195         .vop_advise =           nfs_advise,
  196         .vop_allocate =         nfs_allocate,
  197         .vop_deallocate =       nfs_deallocate,
  198         .vop_copy_file_range =  nfs_copy_file_range,
  199         .vop_ioctl =            nfs_ioctl,
  200         .vop_getextattr =       nfs_getextattr,
  201         .vop_setextattr =       nfs_setextattr,
  202         .vop_listextattr =      nfs_listextattr,
  203         .vop_deleteextattr =    nfs_deleteextattr,
  204 };
  205 VFS_VOP_VECTOR_REGISTER(newnfs_vnodeops_nosig);
  206 
  207 static int
  208 nfs_vnodeops_bypass(struct vop_generic_args *a)
  209 {
  210 
  211         return (vop_sigdefer(&newnfs_vnodeops_nosig, a));
  212 }
  213 
  214 struct vop_vector newnfs_vnodeops = {
  215         .vop_default =          &default_vnodeops,
  216         .vop_bypass =           nfs_vnodeops_bypass,
  217 };
  218 VFS_VOP_VECTOR_REGISTER(newnfs_vnodeops);
  219 
  220 static struct vop_vector newnfs_fifoops_nosig = {
  221         .vop_default =          &fifo_specops,
  222         .vop_access =           nfsspec_access,
  223         .vop_close =            nfsfifo_close,
  224         .vop_fsync =            nfs_fsync,
  225         .vop_getattr =          nfs_getattr,
  226         .vop_inactive =         ncl_inactive,
  227         .vop_pathconf =         nfs_pathconf,
  228         .vop_print =            nfs_print,
  229         .vop_read =             nfsfifo_read,
  230         .vop_reclaim =          ncl_reclaim,
  231         .vop_setattr =          nfs_setattr,
  232         .vop_write =            nfsfifo_write,
  233 };
  234 VFS_VOP_VECTOR_REGISTER(newnfs_fifoops_nosig);
  235 
  236 static int
  237 nfs_fifoops_bypass(struct vop_generic_args *a)
  238 {
  239 
  240         return (vop_sigdefer(&newnfs_fifoops_nosig, a));
  241 }
  242 
  243 struct vop_vector newnfs_fifoops = {
  244         .vop_default =          &default_vnodeops,
  245         .vop_bypass =           nfs_fifoops_bypass,
  246 };
  247 VFS_VOP_VECTOR_REGISTER(newnfs_fifoops);
  248 
  249 static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp,
  250     struct componentname *cnp, struct vattr *vap);
  251 static int nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name,
  252     int namelen, struct ucred *cred, struct thread *td);
  253 static int nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp,
  254     char *fnameptr, int fnamelen, struct vnode *tdvp, struct vnode *tvp,
  255     char *tnameptr, int tnamelen, struct ucred *cred, struct thread *td);
  256 static int nfs_renameit(struct vnode *sdvp, struct vnode *svp,
  257     struct componentname *scnp, struct sillyrename *sp);
  258 
  259 /*
  260  * Global variables
  261  */
  262 SYSCTL_DECL(_vfs_nfs);
  263 
  264 static int      nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
  265 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
  266            &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
  267 
  268 static int      nfs_prime_access_cache = 0;
  269 SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
  270            &nfs_prime_access_cache, 0,
  271            "Prime NFS ACCESS cache when fetching attributes");
  272 
  273 static int      newnfs_commit_on_close = 0;
  274 SYSCTL_INT(_vfs_nfs, OID_AUTO, commit_on_close, CTLFLAG_RW,
  275     &newnfs_commit_on_close, 0, "write+commit on close, else only write");
  276 
  277 static int      nfs_clean_pages_on_close = 1;
  278 SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
  279            &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close");
  280 
  281 int newnfs_directio_enable = 0;
  282 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
  283            &newnfs_directio_enable, 0, "Enable NFS directio");
  284 
  285 int nfs_keep_dirty_on_error;
  286 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_keep_dirty_on_error, CTLFLAG_RW,
  287     &nfs_keep_dirty_on_error, 0, "Retry pageout if error returned");
  288 
  289 /*
  290  * This sysctl allows other processes to mmap a file that has been opened
  291  * O_DIRECT by a process.  In general, having processes mmap the file while
  292  * Direct IO is in progress can lead to Data Inconsistencies.  But, we allow
  293  * this by default to prevent DoS attacks - to prevent a malicious user from
  294  * opening up files O_DIRECT preventing other users from mmap'ing these
  295  * files.  "Protected" environments where stricter consistency guarantees are
  296  * required can disable this knob.  The process that opened the file O_DIRECT
  297  * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not
  298  * meaningful.
  299  */
  300 int newnfs_directio_allow_mmap = 1;
  301 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
  302            &newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
  303 
  304 static uint64_t nfs_maxalloclen = 64 * 1024 * 1024;
  305 SYSCTL_U64(_vfs_nfs, OID_AUTO, maxalloclen, CTLFLAG_RW,
  306            &nfs_maxalloclen, 0, "NFS max allocate/deallocate length");
  307 
  308 #define NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY                \
  309                          | NFSACCESS_EXTEND | NFSACCESS_EXECUTE \
  310                          | NFSACCESS_DELETE | NFSACCESS_LOOKUP)
  311 
  312 /*
  313  * SMP Locking Note :
  314  * The list of locks after the description of the lock is the ordering
  315  * of other locks acquired with the lock held.
  316  * np->n_mtx : Protects the fields in the nfsnode.
  317        VM Object Lock
  318        VI_MTX (acquired indirectly)
  319  * nmp->nm_mtx : Protects the fields in the nfsmount.
  320        rep->r_mtx
  321  * ncl_iod_mutex : Global lock, protects shared nfsiod state.
  322  * nfs_reqq_mtx : Global lock, protects the nfs_reqq list.
  323        nmp->nm_mtx
  324        rep->r_mtx
  325  * rep->r_mtx : Protects the fields in an nfsreq.
  326  */
  327 
  328 static int
  329 nfs_lock(struct vop_lock1_args *ap)
  330 {
  331         struct vnode *vp;
  332         struct nfsnode *np;
  333         u_quad_t nsize;
  334         int error, lktype;
  335         bool onfault;
  336 
  337         vp = ap->a_vp;
  338         lktype = ap->a_flags & LK_TYPE_MASK;
  339         error = VOP_LOCK1_APV(&default_vnodeops, ap);
  340         if (error != 0 || vp->v_op != &newnfs_vnodeops)
  341                 return (error);
  342         np = VTONFS(vp);
  343         if (np == NULL)
  344                 return (0);
  345         NFSLOCKNODE(np);
  346         if ((np->n_flag & NVNSETSZSKIP) == 0 || (lktype != LK_SHARED &&
  347             lktype != LK_EXCLUSIVE && lktype != LK_UPGRADE &&
  348             lktype != LK_TRYUPGRADE)) {
  349                 NFSUNLOCKNODE(np);
  350                 return (0);
  351         }
  352         onfault = (ap->a_flags & LK_EATTR_MASK) == LK_NOWAIT &&
  353             (ap->a_flags & LK_INIT_MASK) == LK_CANRECURSE &&
  354             (lktype == LK_SHARED || lktype == LK_EXCLUSIVE);
  355         if (onfault && vp->v_vnlock->lk_recurse == 0) {
  356                 /*
  357                  * Force retry in vm_fault(), to make the lock request
  358                  * sleepable, which allows us to piggy-back the
  359                  * sleepable call to vnode_pager_setsize().
  360                  */
  361                 NFSUNLOCKNODE(np);
  362                 VOP_UNLOCK(vp);
  363                 return (EBUSY);
  364         }
  365         if ((ap->a_flags & LK_NOWAIT) != 0 ||
  366             (lktype == LK_SHARED && vp->v_vnlock->lk_recurse > 0)) {
  367                 NFSUNLOCKNODE(np);
  368                 return (0);
  369         }
  370         if (lktype == LK_SHARED) {
  371                 NFSUNLOCKNODE(np);
  372                 VOP_UNLOCK(vp);
  373                 ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
  374                 ap->a_flags |= LK_EXCLUSIVE;
  375                 error = VOP_LOCK1_APV(&default_vnodeops, ap);
  376                 if (error != 0 || vp->v_op != &newnfs_vnodeops)
  377                         return (error);
  378                 if (vp->v_data == NULL)
  379                         goto downgrade;
  380                 MPASS(vp->v_data == np);
  381                 NFSLOCKNODE(np);
  382                 if ((np->n_flag & NVNSETSZSKIP) == 0) {
  383                         NFSUNLOCKNODE(np);
  384                         goto downgrade;
  385                 }
  386         }
  387         np->n_flag &= ~NVNSETSZSKIP;
  388         nsize = np->n_size;
  389         NFSUNLOCKNODE(np);
  390         vnode_pager_setsize(vp, nsize);
  391 downgrade:
  392         if (lktype == LK_SHARED) {
  393                 ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
  394                 ap->a_flags |= LK_DOWNGRADE;
  395                 (void)VOP_LOCK1_APV(&default_vnodeops, ap);
  396         }
  397         return (0);
  398 }
  399 
  400 static int
  401 nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td,
  402     struct ucred *cred, u_int32_t *retmode)
  403 {
  404         int error = 0, attrflag, i, lrupos;
  405         u_int32_t rmode;
  406         struct nfsnode *np = VTONFS(vp);
  407         struct nfsvattr nfsva;
  408 
  409         error = nfsrpc_accessrpc(vp, wmode, cred, td, &nfsva, &attrflag,
  410             &rmode);
  411         if (attrflag)
  412                 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
  413         if (!error) {
  414                 lrupos = 0;
  415                 NFSLOCKNODE(np);
  416                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
  417                         if (np->n_accesscache[i].uid == cred->cr_uid) {
  418                                 np->n_accesscache[i].mode = rmode;
  419                                 np->n_accesscache[i].stamp = time_second;
  420                                 break;
  421                         }
  422                         if (i > 0 && np->n_accesscache[i].stamp <
  423                             np->n_accesscache[lrupos].stamp)
  424                                 lrupos = i;
  425                 }
  426                 if (i == NFS_ACCESSCACHESIZE) {
  427                         np->n_accesscache[lrupos].uid = cred->cr_uid;
  428                         np->n_accesscache[lrupos].mode = rmode;
  429                         np->n_accesscache[lrupos].stamp = time_second;
  430                 }
  431                 NFSUNLOCKNODE(np);
  432                 if (retmode != NULL)
  433                         *retmode = rmode;
  434                 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0);
  435         } else if (NFS_ISV4(vp)) {
  436                 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
  437         }
  438 #ifdef KDTRACE_HOOKS
  439         if (error != 0)
  440                 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0,
  441                     error);
  442 #endif
  443         return (error);
  444 }
  445 
  446 /*
  447  * nfs access vnode op.
  448  * For nfs version 2, just return ok. File accesses may fail later.
  449  * For nfs version 3, use the access rpc to check accessibility. If file modes
  450  * are changed on the server, accesses might still fail later.
  451  */
  452 static int
  453 nfs_access(struct vop_access_args *ap)
  454 {
  455         struct vnode *vp = ap->a_vp;
  456         int error = 0, i, gotahit;
  457         u_int32_t mode, wmode, rmode;
  458         int v34 = NFS_ISV34(vp);
  459         struct nfsnode *np = VTONFS(vp);
  460 
  461         /*
  462          * Disallow write attempts on filesystems mounted read-only;
  463          * unless the file is a socket, fifo, or a block or character
  464          * device resident on the filesystem.
  465          */
  466         if ((ap->a_accmode & (VWRITE | VAPPEND | VWRITE_NAMED_ATTRS |
  467             VDELETE_CHILD | VWRITE_ATTRIBUTES | VDELETE | VWRITE_ACL |
  468             VWRITE_OWNER)) != 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) {
  469                 switch (vp->v_type) {
  470                 case VREG:
  471                 case VDIR:
  472                 case VLNK:
  473                         return (EROFS);
  474                 default:
  475                         break;
  476                 }
  477         }
  478         /*
  479          * For nfs v3 or v4, check to see if we have done this recently, and if
  480          * so return our cached result instead of making an ACCESS call.
  481          * If not, do an access rpc, otherwise you are stuck emulating
  482          * ufs_access() locally using the vattr. This may not be correct,
  483          * since the server may apply other access criteria such as
  484          * client uid-->server uid mapping that we do not know about.
  485          */
  486         if (v34) {
  487                 if (ap->a_accmode & VREAD)
  488                         mode = NFSACCESS_READ;
  489                 else
  490                         mode = 0;
  491                 if (vp->v_type != VDIR) {
  492                         if (ap->a_accmode & VWRITE)
  493                                 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND);
  494                         if (ap->a_accmode & VAPPEND)
  495                                 mode |= NFSACCESS_EXTEND;
  496                         if (ap->a_accmode & VEXEC)
  497                                 mode |= NFSACCESS_EXECUTE;
  498                         if (ap->a_accmode & VDELETE)
  499                                 mode |= NFSACCESS_DELETE;
  500                 } else {
  501                         if (ap->a_accmode & VWRITE)
  502                                 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND);
  503                         if (ap->a_accmode & VAPPEND)
  504                                 mode |= NFSACCESS_EXTEND;
  505                         if (ap->a_accmode & VEXEC)
  506                                 mode |= NFSACCESS_LOOKUP;
  507                         if (ap->a_accmode & VDELETE)
  508                                 mode |= NFSACCESS_DELETE;
  509                         if (ap->a_accmode & VDELETE_CHILD)
  510                                 mode |= NFSACCESS_MODIFY;
  511                 }
  512                 /* XXX safety belt, only make blanket request if caching */
  513                 if (nfsaccess_cache_timeout > 0) {
  514                         wmode = NFSACCESS_READ | NFSACCESS_MODIFY |
  515                                 NFSACCESS_EXTEND | NFSACCESS_EXECUTE |
  516                                 NFSACCESS_DELETE | NFSACCESS_LOOKUP;
  517                 } else {
  518                         wmode = mode;
  519                 }
  520 
  521                 /*
  522                  * Does our cached result allow us to give a definite yes to
  523                  * this request?
  524                  */
  525                 gotahit = 0;
  526                 NFSLOCKNODE(np);
  527                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
  528                         if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) {
  529                             if (time_second < (np->n_accesscache[i].stamp
  530                                 + nfsaccess_cache_timeout) &&
  531                                 (np->n_accesscache[i].mode & mode) == mode) {
  532                                 NFSINCRGLOBAL(nfsstatsv1.accesscache_hits);
  533                                 gotahit = 1;
  534                             }
  535                             break;
  536                         }
  537                 }
  538                 NFSUNLOCKNODE(np);
  539 #ifdef KDTRACE_HOOKS
  540                 if (gotahit != 0)
  541                         KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp,
  542                             ap->a_cred->cr_uid, mode);
  543                 else
  544                         KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp,
  545                             ap->a_cred->cr_uid, mode);
  546 #endif
  547                 if (gotahit == 0) {
  548                         /*
  549                          * Either a no, or a don't know.  Go to the wire.
  550                          */
  551                         NFSINCRGLOBAL(nfsstatsv1.accesscache_misses);
  552                         error = nfs34_access_otw(vp, wmode, ap->a_td,
  553                             ap->a_cred, &rmode);
  554                         if (!error &&
  555                             (rmode & mode) != mode)
  556                                 error = EACCES;
  557                 }
  558                 return (error);
  559         } else {
  560                 if ((error = nfsspec_access(ap)) != 0) {
  561                         return (error);
  562                 }
  563                 /*
  564                  * Attempt to prevent a mapped root from accessing a file
  565                  * which it shouldn't.  We try to read a byte from the file
  566                  * if the user is root and the file is not zero length.
  567                  * After calling nfsspec_access, we should have the correct
  568                  * file size cached.
  569                  */
  570                 NFSLOCKNODE(np);
  571                 if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD)
  572                     && VTONFS(vp)->n_size > 0) {
  573                         struct iovec aiov;
  574                         struct uio auio;
  575                         char buf[1];
  576 
  577                         NFSUNLOCKNODE(np);
  578                         aiov.iov_base = buf;
  579                         aiov.iov_len = 1;
  580                         auio.uio_iov = &aiov;
  581                         auio.uio_iovcnt = 1;
  582                         auio.uio_offset = 0;
  583                         auio.uio_resid = 1;
  584                         auio.uio_segflg = UIO_SYSSPACE;
  585                         auio.uio_rw = UIO_READ;
  586                         auio.uio_td = ap->a_td;
  587 
  588                         if (vp->v_type == VREG)
  589                                 error = ncl_readrpc(vp, &auio, ap->a_cred);
  590                         else if (vp->v_type == VDIR) {
  591                                 char* bp;
  592                                 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
  593                                 aiov.iov_base = bp;
  594                                 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
  595                                 error = ncl_readdirrpc(vp, &auio, ap->a_cred,
  596                                     ap->a_td);
  597                                 free(bp, M_TEMP);
  598                         } else if (vp->v_type == VLNK)
  599                                 error = ncl_readlinkrpc(vp, &auio, ap->a_cred);
  600                         else
  601                                 error = EACCES;
  602                 } else
  603                         NFSUNLOCKNODE(np);
  604                 return (error);
  605         }
  606 }
  607 
  608 /*
  609  * nfs open vnode op
  610  * Check to see if the type is ok
  611  * and that deletion is not in progress.
  612  * For paged in text files, you will need to flush the page cache
  613  * if consistency is lost.
  614  */
  615 /* ARGSUSED */
  616 static int
  617 nfs_open(struct vop_open_args *ap)
  618 {
  619         struct vnode *vp = ap->a_vp;
  620         struct nfsnode *np = VTONFS(vp);
  621         struct vattr vattr;
  622         int error;
  623         int fmode = ap->a_mode;
  624         struct ucred *cred;
  625         vm_object_t obj;
  626 
  627         if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK)
  628                 return (EOPNOTSUPP);
  629 
  630         /*
  631          * For NFSv4, we need to do the Open Op before cache validation,
  632          * so that we conform to RFC3530 Sec. 9.3.1.
  633          */
  634         if (NFS_ISV4(vp)) {
  635                 error = nfsrpc_open(vp, fmode, ap->a_cred, ap->a_td);
  636                 if (error) {
  637                         error = nfscl_maperr(ap->a_td, error, (uid_t)0,
  638                             (gid_t)0);
  639                         return (error);
  640                 }
  641         }
  642 
  643         /*
  644          * Now, if this Open will be doing reading, re-validate/flush the
  645          * cache, so that Close/Open coherency is maintained.
  646          */
  647         NFSLOCKNODE(np);
  648         if (np->n_flag & NMODIFIED) {
  649                 NFSUNLOCKNODE(np);
  650                 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
  651                         NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
  652                         if (VN_IS_DOOMED(vp))
  653                                 return (EBADF);
  654                 }
  655                 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  656                 if (error == EINTR || error == EIO) {
  657                         if (NFS_ISV4(vp))
  658                                 (void) nfsrpc_close(vp, 0, ap->a_td);
  659                         return (error);
  660                 }
  661                 NFSLOCKNODE(np);
  662                 np->n_attrstamp = 0;
  663                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
  664                 if (vp->v_type == VDIR)
  665                         np->n_direofoffset = 0;
  666                 NFSUNLOCKNODE(np);
  667                 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
  668                 if (error) {
  669                         if (NFS_ISV4(vp))
  670                                 (void) nfsrpc_close(vp, 0, ap->a_td);
  671                         return (error);
  672                 }
  673                 NFSLOCKNODE(np);
  674                 np->n_mtime = vattr.va_mtime;
  675                 if (NFS_ISV4(vp))
  676                         np->n_change = vattr.va_filerev;
  677         } else {
  678                 NFSUNLOCKNODE(np);
  679                 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
  680                 if (error) {
  681                         if (NFS_ISV4(vp))
  682                                 (void) nfsrpc_close(vp, 0, ap->a_td);
  683                         return (error);
  684                 }
  685                 NFSLOCKNODE(np);
  686                 if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) ||
  687                     NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
  688                         if (vp->v_type == VDIR)
  689                                 np->n_direofoffset = 0;
  690                         NFSUNLOCKNODE(np);
  691                         if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
  692                                 NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
  693                                 if (VN_IS_DOOMED(vp))
  694                                         return (EBADF);
  695                         }
  696                         error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  697                         if (error == EINTR || error == EIO) {
  698                                 if (NFS_ISV4(vp))
  699                                         (void) nfsrpc_close(vp, 0, ap->a_td);
  700                                 return (error);
  701                         }
  702                         NFSLOCKNODE(np);
  703                         np->n_mtime = vattr.va_mtime;
  704                         if (NFS_ISV4(vp))
  705                                 np->n_change = vattr.va_filerev;
  706                 }
  707         }
  708 
  709         /*
  710          * If the object has >= 1 O_DIRECT active opens, we disable caching.
  711          */
  712         if (newnfs_directio_enable && (fmode & O_DIRECT) &&
  713             (vp->v_type == VREG)) {
  714                 if (np->n_directio_opens == 0) {
  715                         NFSUNLOCKNODE(np);
  716                         if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
  717                                 NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
  718                                 if (VN_IS_DOOMED(vp))
  719                                         return (EBADF);
  720                         }
  721                         error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  722                         if (error) {
  723                                 if (NFS_ISV4(vp))
  724                                         (void) nfsrpc_close(vp, 0, ap->a_td);
  725                                 return (error);
  726                         }
  727                         NFSLOCKNODE(np);
  728                         np->n_flag |= NNONCACHE;
  729                 }
  730                 np->n_directio_opens++;
  731         }
  732 
  733         /* If opened for writing via NFSv4.1 or later, mark that for pNFS. */
  734         if (NFSHASPNFS(VFSTONFS(vp->v_mount)) && (fmode & FWRITE) != 0)
  735                 np->n_flag |= NWRITEOPENED;
  736 
  737         /*
  738          * If this is an open for writing, capture a reference to the
  739          * credentials, so they can be used by ncl_putpages(). Using
  740          * these write credentials is preferable to the credentials of
  741          * whatever thread happens to be doing the VOP_PUTPAGES() since
  742          * the write RPCs are less likely to fail with EACCES.
  743          */
  744         if ((fmode & FWRITE) != 0) {
  745                 cred = np->n_writecred;
  746                 np->n_writecred = crhold(ap->a_cred);
  747         } else
  748                 cred = NULL;
  749         NFSUNLOCKNODE(np);
  750 
  751         if (cred != NULL)
  752                 crfree(cred);
  753         vnode_create_vobject(vp, vattr.va_size, ap->a_td);
  754 
  755         /*
  756          * If the text file has been mmap'd, flush any dirty pages to the
  757          * buffer cache and then...
  758          * Make sure all writes are pushed to the NFS server.  If this is not
  759          * done, the modify time of the file can change while the text
  760          * file is being executed.  This will cause the process that is
  761          * executing the text file to be terminated.
  762          */
  763         if (vp->v_writecount <= -1) {
  764                 if ((obj = vp->v_object) != NULL &&
  765                     vm_object_mightbedirty(obj)) {
  766                         if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
  767                                 NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
  768                                 if (VN_IS_DOOMED(vp))
  769                                         return (EBADF);
  770                         }
  771                         VM_OBJECT_WLOCK(obj);
  772                         vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
  773                         VM_OBJECT_WUNLOCK(obj);
  774                 }
  775 
  776                 /* Now, flush the buffer cache. */
  777                 ncl_flush(vp, MNT_WAIT, curthread, 0, 0);
  778 
  779                 /* And, finally, make sure that n_mtime is up to date. */
  780                 np = VTONFS(vp);
  781                 NFSLOCKNODE(np);
  782                 np->n_mtime = np->n_vattr.na_mtime;
  783                 NFSUNLOCKNODE(np);
  784         }
  785         return (0);
  786 }
  787 
  788 /*
  789  * nfs close vnode op
  790  * What an NFS client should do upon close after writing is a debatable issue.
  791  * Most NFS clients push delayed writes to the server upon close, basically for
  792  * two reasons:
  793  * 1 - So that any write errors may be reported back to the client process
  794  *     doing the close system call. By far the two most likely errors are
  795  *     NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
  796  * 2 - To put a worst case upper bound on cache inconsistency between
  797  *     multiple clients for the file.
  798  * There is also a consistency problem for Version 2 of the protocol w.r.t.
  799  * not being able to tell if other clients are writing a file concurrently,
  800  * since there is no way of knowing if the changed modify time in the reply
  801  * is only due to the write for this client.
  802  * (NFS Version 3 provides weak cache consistency data in the reply that
  803  *  should be sufficient to detect and handle this case.)
  804  *
  805  * The current code does the following:
  806  * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
  807  * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
  808  *                     or commit them (this satisfies 1 and 2 except for the
  809  *                     case where the server crashes after this close but
  810  *                     before the commit RPC, which is felt to be "good
  811  *                     enough". Changing the last argument to ncl_flush() to
  812  *                     a 1 would force a commit operation, if it is felt a
  813  *                     commit is necessary now.
  814  * for NFS Version 4 - flush the dirty buffers and commit them, if
  815  *                     nfscl_mustflush() says this is necessary.
  816  *                     It is necessary if there is no write delegation held,
  817  *                     in order to satisfy open/close coherency.
  818  *                     If the file isn't cached on local stable storage,
  819  *                     it may be necessary in order to detect "out of space"
  820  *                     errors from the server, if the write delegation
  821  *                     issued by the server doesn't allow the file to grow.
  822  */
  823 /* ARGSUSED */
  824 static int
  825 nfs_close(struct vop_close_args *ap)
  826 {
  827         struct vnode *vp = ap->a_vp;
  828         struct nfsnode *np = VTONFS(vp);
  829         struct nfsvattr nfsva;
  830         struct ucred *cred;
  831         int error = 0, ret, localcred = 0;
  832         int fmode = ap->a_fflag;
  833 
  834         if (NFSCL_FORCEDISM(vp->v_mount))
  835                 return (0);
  836         /*
  837          * During shutdown, a_cred isn't valid, so just use root.
  838          */
  839         if (ap->a_cred == NOCRED) {
  840                 cred = newnfs_getcred();
  841                 localcred = 1;
  842         } else {
  843                 cred = ap->a_cred;
  844         }
  845         if (vp->v_type == VREG) {
  846             /*
  847              * Examine and clean dirty pages, regardless of NMODIFIED.
  848              * This closes a major hole in close-to-open consistency.
  849              * We want to push out all dirty pages (and buffers) on
  850              * close, regardless of whether they were dirtied by
  851              * mmap'ed writes or via write().
  852              */
  853             if (nfs_clean_pages_on_close && vp->v_object) {
  854                 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
  855                         NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
  856                         if (VN_IS_DOOMED(vp) && ap->a_fflag != FNONBLOCK)
  857                                 return (EBADF);
  858                 }
  859                 VM_OBJECT_WLOCK(vp->v_object);
  860                 vm_object_page_clean(vp->v_object, 0, 0, 0);
  861                 VM_OBJECT_WUNLOCK(vp->v_object);
  862             }
  863             NFSLOCKNODE(np);
  864             if (np->n_flag & NMODIFIED) {
  865                 NFSUNLOCKNODE(np);
  866                 if (NFS_ISV3(vp)) {
  867                     /*
  868                      * Under NFSv3 we have dirty buffers to dispose of.  We
  869                      * must flush them to the NFS server.  We have the option
  870                      * of waiting all the way through the commit rpc or just
  871                      * waiting for the initial write.  The default is to only
  872                      * wait through the initial write so the data is in the
  873                      * server's cache, which is roughly similar to the state
  874                      * a standard disk subsystem leaves the file in on close().
  875                      *
  876                      * We cannot clear the NMODIFIED bit in np->n_flag due to
  877                      * potential races with other processes, and certainly
  878                      * cannot clear it if we don't commit.
  879                      * These races occur when there is no longer the old
  880                      * traditional vnode locking implemented for Vnode Ops.
  881                      */
  882                     int cm = newnfs_commit_on_close ? 1 : 0;
  883                     if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
  884                             NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
  885                             if (VN_IS_DOOMED(vp) && ap->a_fflag != FNONBLOCK)
  886                                     return (EBADF);
  887                     }
  888                     error = ncl_flush(vp, MNT_WAIT, ap->a_td, cm, 0);
  889                     /* np->n_flag &= ~NMODIFIED; */
  890                 } else if (NFS_ISV4(vp)) { 
  891                         if (nfscl_mustflush(vp) != 0) {
  892                                 int cm = newnfs_commit_on_close ? 1 : 0;
  893                                 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
  894                                         NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
  895                                         if (VN_IS_DOOMED(vp) && ap->a_fflag !=
  896                                             FNONBLOCK)
  897                                                 return (EBADF);
  898                                 }
  899                                 error = ncl_flush(vp, MNT_WAIT, ap->a_td,
  900                                     cm, 0);
  901                                 /*
  902                                  * as above w.r.t races when clearing
  903                                  * NMODIFIED.
  904                                  * np->n_flag &= ~NMODIFIED;
  905                                  */
  906                         }
  907                 } else {
  908                         if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
  909                                 NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
  910                                 if (VN_IS_DOOMED(vp) && ap->a_fflag !=
  911                                     FNONBLOCK)
  912                                         return (EBADF);
  913                         }
  914                         error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  915                 }
  916                 NFSLOCKNODE(np);
  917             }
  918             /* 
  919              * Invalidate the attribute cache in all cases.
  920              * An open is going to fetch fresh attrs any way, other procs
  921              * on this node that have file open will be forced to do an 
  922              * otw attr fetch, but this is safe.
  923              * --> A user found that their RPC count dropped by 20% when
  924              *     this was commented out and I can't see any requirement
  925              *     for it, so I've disabled it when negative lookups are
  926              *     enabled. (What does this have to do with negative lookup
  927              *     caching? Well nothing, except it was reported by the
  928              *     same user that needed negative lookup caching and I wanted
  929              *     there to be a way to disable it to see if it
  930              *     is the cause of some caching/coherency issue that might
  931              *     crop up.)
  932              */
  933             if (VFSTONFS(vp->v_mount)->nm_negnametimeo == 0) {
  934                     np->n_attrstamp = 0;
  935                     KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
  936             }
  937             if (np->n_flag & NWRITEERR) {
  938                 np->n_flag &= ~NWRITEERR;
  939                 error = np->n_error;
  940             }
  941             NFSUNLOCKNODE(np);
  942         }
  943 
  944         if (NFS_ISV4(vp)) {
  945                 /*
  946                  * Get attributes so "change" is up to date.
  947                  */
  948                 if (error == 0 && nfscl_mustflush(vp) != 0 &&
  949                     vp->v_type == VREG &&
  950                     (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOCTO) == 0) {
  951                         ret = nfsrpc_getattr(vp, cred, ap->a_td, &nfsva);
  952                         if (!ret) {
  953                                 np->n_change = nfsva.na_filerev;
  954                                 (void) nfscl_loadattrcache(&vp, &nfsva, NULL,
  955                                     0, 0);
  956                         }
  957                 }
  958 
  959                 /*
  960                  * and do the close.
  961                  */
  962                 ret = nfsrpc_close(vp, 0, ap->a_td);
  963                 if (!error && ret)
  964                         error = ret;
  965                 if (error)
  966                         error = nfscl_maperr(ap->a_td, error, (uid_t)0,
  967                             (gid_t)0);
  968         }
  969         if (newnfs_directio_enable)
  970                 KASSERT((np->n_directio_asyncwr == 0),
  971                         ("nfs_close: dirty unflushed (%d) directio buffers\n",
  972                          np->n_directio_asyncwr));
  973         if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
  974                 NFSLOCKNODE(np);
  975                 KASSERT((np->n_directio_opens > 0), 
  976                         ("nfs_close: unexpectedly value (0) of n_directio_opens\n"));
  977                 np->n_directio_opens--;
  978                 if (np->n_directio_opens == 0)
  979                         np->n_flag &= ~NNONCACHE;
  980                 NFSUNLOCKNODE(np);
  981         }
  982         if (localcred)
  983                 NFSFREECRED(cred);
  984         return (error);
  985 }
  986 
  987 /*
  988  * nfs getattr call from vfs.
  989  */
  990 static int
  991 nfs_getattr(struct vop_getattr_args *ap)
  992 {
  993         struct vnode *vp = ap->a_vp;
  994         struct thread *td = curthread;  /* XXX */
  995         struct nfsnode *np = VTONFS(vp);
  996         int error = 0;
  997         struct nfsvattr nfsva;
  998         struct vattr *vap = ap->a_vap;
  999         struct vattr vattr;
 1000 
 1001         /*
 1002          * Update local times for special files.
 1003          */
 1004         NFSLOCKNODE(np);
 1005         if (np->n_flag & (NACC | NUPD))
 1006                 np->n_flag |= NCHG;
 1007         NFSUNLOCKNODE(np);
 1008         /*
 1009          * First look in the cache.
 1010          */
 1011         if (ncl_getattrcache(vp, &vattr) == 0) {
 1012                 ncl_copy_vattr(vap, &vattr);
 1013 
 1014                 /*
 1015                  * Get the local modify time for the case of a write
 1016                  * delegation.
 1017                  */
 1018                 nfscl_deleggetmodtime(vp, &vap->va_mtime);
 1019                 return (0);
 1020         }
 1021 
 1022         if (NFS_ISV34(vp) && nfs_prime_access_cache &&
 1023             nfsaccess_cache_timeout > 0) {
 1024                 NFSINCRGLOBAL(nfsstatsv1.accesscache_misses);
 1025                 nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL);
 1026                 if (ncl_getattrcache(vp, ap->a_vap) == 0) {
 1027                         nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime);
 1028                         return (0);
 1029                 }
 1030         }
 1031         error = nfsrpc_getattr(vp, ap->a_cred, td, &nfsva);
 1032         if (!error)
 1033                 error = nfscl_loadattrcache(&vp, &nfsva, vap, 0, 0);
 1034         if (!error) {
 1035                 /*
 1036                  * Get the local modify time for the case of a write
 1037                  * delegation.
 1038                  */
 1039                 nfscl_deleggetmodtime(vp, &vap->va_mtime);
 1040         } else if (NFS_ISV4(vp)) {
 1041                 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
 1042         }
 1043         return (error);
 1044 }
 1045 
 1046 /*
 1047  * nfs setattr call.
 1048  */
 1049 static int
 1050 nfs_setattr(struct vop_setattr_args *ap)
 1051 {
 1052         struct vnode *vp = ap->a_vp;
 1053         struct nfsnode *np = VTONFS(vp);
 1054         struct thread *td = curthread;  /* XXX */
 1055         struct vattr *vap = ap->a_vap;
 1056         int error = 0;
 1057         u_quad_t tsize;
 1058         struct timespec ts;
 1059 
 1060 #ifndef nolint
 1061         tsize = (u_quad_t)0;
 1062 #endif
 1063 
 1064         /*
 1065          * Setting of flags and marking of atimes are not supported.
 1066          */
 1067         if (vap->va_flags != VNOVAL)
 1068                 return (EOPNOTSUPP);
 1069 
 1070         /*
 1071          * Disallow write attempts if the filesystem is mounted read-only.
 1072          */
 1073         if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
 1074             vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
 1075             vap->va_mtime.tv_sec != VNOVAL ||
 1076             vap->va_birthtime.tv_sec != VNOVAL ||
 1077             vap->va_mode != (mode_t)VNOVAL) &&
 1078             (vp->v_mount->mnt_flag & MNT_RDONLY))
 1079                 return (EROFS);
 1080         if (vap->va_size != VNOVAL) {
 1081                 switch (vp->v_type) {
 1082                 case VDIR:
 1083                         return (EISDIR);
 1084                 case VCHR:
 1085                 case VBLK:
 1086                 case VSOCK:
 1087                 case VFIFO:
 1088                         if (vap->va_mtime.tv_sec == VNOVAL &&
 1089                             vap->va_atime.tv_sec == VNOVAL &&
 1090                             vap->va_birthtime.tv_sec == VNOVAL &&
 1091                             vap->va_mode == (mode_t)VNOVAL &&
 1092                             vap->va_uid == (uid_t)VNOVAL &&
 1093                             vap->va_gid == (gid_t)VNOVAL)
 1094                                 return (0);             
 1095                         vap->va_size = VNOVAL;
 1096                         break;
 1097                 default:
 1098                         /*
 1099                          * Disallow write attempts if the filesystem is
 1100                          * mounted read-only.
 1101                          */
 1102                         if (vp->v_mount->mnt_flag & MNT_RDONLY)
 1103                                 return (EROFS);
 1104                         /*
 1105                          *  We run vnode_pager_setsize() early (why?),
 1106                          * we must set np->n_size now to avoid vinvalbuf
 1107                          * V_SAVE races that might setsize a lower
 1108                          * value.
 1109                          */
 1110                         NFSLOCKNODE(np);
 1111                         tsize = np->n_size;
 1112                         NFSUNLOCKNODE(np);
 1113                         error = ncl_meta_setsize(vp, td, vap->va_size);
 1114                         NFSLOCKNODE(np);
 1115                         if (np->n_flag & NMODIFIED) {
 1116                             tsize = np->n_size;
 1117                             NFSUNLOCKNODE(np);
 1118                             error = ncl_vinvalbuf(vp, vap->va_size == 0 ?
 1119                                 0 : V_SAVE, td, 1);
 1120                             if (error != 0) {
 1121                                     vnode_pager_setsize(vp, tsize);
 1122                                     return (error);
 1123                             }
 1124                             /*
 1125                              * Call nfscl_delegmodtime() to set the modify time
 1126                              * locally, as required.
 1127                              */
 1128                             nfscl_delegmodtime(vp);
 1129                         } else
 1130                             NFSUNLOCKNODE(np);
 1131                         /*
 1132                          * np->n_size has already been set to vap->va_size
 1133                          * in ncl_meta_setsize(). We must set it again since
 1134                          * nfs_loadattrcache() could be called through
 1135                          * ncl_meta_setsize() and could modify np->n_size.
 1136                          */
 1137                         NFSLOCKNODE(np);
 1138                         np->n_vattr.na_size = np->n_size = vap->va_size;
 1139                         NFSUNLOCKNODE(np);
 1140                 }
 1141         } else {
 1142                 NFSLOCKNODE(np);
 1143                 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && 
 1144                     (np->n_flag & NMODIFIED) && vp->v_type == VREG) {
 1145                         NFSUNLOCKNODE(np);
 1146                         error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
 1147                         if (error == EINTR || error == EIO)
 1148                                 return (error);
 1149                 } else
 1150                         NFSUNLOCKNODE(np);
 1151         }
 1152         error = nfs_setattrrpc(vp, vap, ap->a_cred, td);
 1153         if (vap->va_size != VNOVAL) {
 1154                 if (error == 0) {
 1155                         nanouptime(&ts);
 1156                         NFSLOCKNODE(np);
 1157                         np->n_localmodtime = ts;
 1158                         NFSUNLOCKNODE(np);
 1159                 } else {
 1160                         NFSLOCKNODE(np);
 1161                         np->n_size = np->n_vattr.na_size = tsize;
 1162                         vnode_pager_setsize(vp, tsize);
 1163                         NFSUNLOCKNODE(np);
 1164                 }
 1165         }
 1166         return (error);
 1167 }
 1168 
 1169 /*
 1170  * Do an nfs setattr rpc.
 1171  */
 1172 static int
 1173 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred,
 1174     struct thread *td)
 1175 {
 1176         struct nfsnode *np = VTONFS(vp);
 1177         int error, ret, attrflag, i;
 1178         struct nfsvattr nfsva;
 1179 
 1180         if (NFS_ISV34(vp)) {
 1181                 NFSLOCKNODE(np);
 1182                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
 1183                         np->n_accesscache[i].stamp = 0;
 1184                 np->n_flag |= NDELEGMOD;
 1185                 NFSUNLOCKNODE(np);
 1186                 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
 1187         }
 1188         error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag);
 1189         if (attrflag) {
 1190                 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 1191                 if (ret && !error)
 1192                         error = ret;
 1193         }
 1194         if (error && NFS_ISV4(vp))
 1195                 error = nfscl_maperr(td, error, vap->va_uid, vap->va_gid);
 1196         return (error);
 1197 }
 1198 
 1199 /*
 1200  * nfs lookup call, one step at a time...
 1201  * First look in cache
 1202  * If not found, unlock the directory nfsnode and do the rpc
 1203  */
 1204 static int
 1205 nfs_lookup(struct vop_lookup_args *ap)
 1206 {
 1207         struct componentname *cnp = ap->a_cnp;
 1208         struct vnode *dvp = ap->a_dvp;
 1209         struct vnode **vpp = ap->a_vpp;
 1210         struct mount *mp = dvp->v_mount;
 1211         int flags = cnp->cn_flags;
 1212         struct vnode *newvp;
 1213         struct nfsmount *nmp;
 1214         struct nfsnode *np, *newnp;
 1215         int error = 0, attrflag, dattrflag, ltype, ncticks;
 1216         struct thread *td = curthread;
 1217         struct nfsfh *nfhp;
 1218         struct nfsvattr dnfsva, nfsva;
 1219         struct vattr vattr;
 1220         struct timespec nctime, ts;
 1221         uint32_t openmode;
 1222 
 1223         *vpp = NULLVP;
 1224         if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) &&
 1225             (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
 1226                 return (EROFS);
 1227         if (dvp->v_type != VDIR)
 1228                 return (ENOTDIR);
 1229         nmp = VFSTONFS(mp);
 1230         np = VTONFS(dvp);
 1231 
 1232         /* For NFSv4, wait until any remove is done. */
 1233         NFSLOCKNODE(np);
 1234         while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) {
 1235                 np->n_flag |= NREMOVEWANT;
 1236                 (void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0);
 1237         }
 1238         NFSUNLOCKNODE(np);
 1239 
 1240         error = vn_dir_check_exec(dvp, cnp);
 1241         if (error != 0)
 1242                 return (error);
 1243         error = cache_lookup(dvp, vpp, cnp, &nctime, &ncticks);
 1244         if (error > 0 && error != ENOENT)
 1245                 return (error);
 1246         if (error == -1) {
 1247                 /*
 1248                  * Lookups of "." are special and always return the
 1249                  * current directory.  cache_lookup() already handles
 1250                  * associated locking bookkeeping, etc.
 1251                  */
 1252                 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
 1253                         return (0);
 1254                 }
 1255 
 1256                 /*
 1257                  * We only accept a positive hit in the cache if the
 1258                  * change time of the file matches our cached copy.
 1259                  * Otherwise, we discard the cache entry and fallback
 1260                  * to doing a lookup RPC.  We also only trust cache
 1261                  * entries for less than nm_nametimeo seconds.
 1262                  *
 1263                  * To better handle stale file handles and attributes,
 1264                  * clear the attribute cache of this node if it is a
 1265                  * leaf component, part of an open() call, and not
 1266                  * locally modified before fetching the attributes.
 1267                  * This should allow stale file handles to be detected
 1268                  * here where we can fall back to a LOOKUP RPC to
 1269                  * recover rather than having nfs_open() detect the
 1270                  * stale file handle and failing open(2) with ESTALE.
 1271                  */
 1272                 newvp = *vpp;
 1273                 newnp = VTONFS(newvp);
 1274                 if (!(nmp->nm_flag & NFSMNT_NOCTO) &&
 1275                     (flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
 1276                     !(newnp->n_flag & NMODIFIED)) {
 1277                         NFSLOCKNODE(newnp);
 1278                         newnp->n_attrstamp = 0;
 1279                         KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
 1280                         NFSUNLOCKNODE(newnp);
 1281                 }
 1282                 if (nfscl_nodeleg(newvp, 0) == 0 ||
 1283                     ((u_int)(ticks - ncticks) < (nmp->nm_nametimeo * hz) &&
 1284                     VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 &&
 1285                     timespeccmp(&vattr.va_ctime, &nctime, ==))) {
 1286                         NFSINCRGLOBAL(nfsstatsv1.lookupcache_hits);
 1287                         return (0);
 1288                 }
 1289                 cache_purge(newvp);
 1290                 if (dvp != newvp)
 1291                         vput(newvp);
 1292                 else 
 1293                         vrele(newvp);
 1294                 *vpp = NULLVP;
 1295         } else if (error == ENOENT) {
 1296                 if (VN_IS_DOOMED(dvp))
 1297                         return (ENOENT);
 1298                 /*
 1299                  * We only accept a negative hit in the cache if the
 1300                  * modification time of the parent directory matches
 1301                  * the cached copy in the name cache entry.
 1302                  * Otherwise, we discard all of the negative cache
 1303                  * entries for this directory.  We also only trust
 1304                  * negative cache entries for up to nm_negnametimeo
 1305                  * seconds.
 1306                  */
 1307                 if ((u_int)(ticks - ncticks) < (nmp->nm_negnametimeo * hz) &&
 1308                     VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 &&
 1309                     timespeccmp(&vattr.va_mtime, &nctime, ==)) {
 1310                         NFSINCRGLOBAL(nfsstatsv1.lookupcache_hits);
 1311                         return (ENOENT);
 1312                 }
 1313                 cache_purge_negative(dvp);
 1314         }
 1315 
 1316         openmode = 0;
 1317         /*
 1318          * If this an NFSv4.1/4.2 mount using the "oneopenown" mount
 1319          * option, it is possible to do the Open operation in the same
 1320          * compound as Lookup, so long as delegations are not being
 1321          * issued.  This saves doing a separate RPC for Open.
 1322          * For pnfs, do not do this, since the Open+LayoutGet will
 1323          * be needed as a separate RPC.
 1324          */
 1325         NFSLOCKMNT(nmp);
 1326         if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp) && !NFSHASPNFS(nmp) &&
 1327             (nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0 &&
 1328             (!NFSMNT_RDONLY(mp) || (flags & OPENWRITE) == 0) &&
 1329             (flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN)) {
 1330                 if ((flags & OPENREAD) != 0)
 1331                         openmode |= NFSV4OPEN_ACCESSREAD;
 1332                 if ((flags & OPENWRITE) != 0)
 1333                         openmode |= NFSV4OPEN_ACCESSWRITE;
 1334         }
 1335         NFSUNLOCKMNT(nmp);
 1336 
 1337         newvp = NULLVP;
 1338         NFSINCRGLOBAL(nfsstatsv1.lookupcache_misses);
 1339         nanouptime(&ts);
 1340         error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
 1341             cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
 1342             openmode);
 1343         if (dattrflag)
 1344                 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, 0, 1);
 1345         if (error) {
 1346                 if (newvp != NULLVP) {
 1347                         vput(newvp);
 1348                         *vpp = NULLVP;
 1349                 }
 1350 
 1351                 if (error != ENOENT) {
 1352                         if (NFS_ISV4(dvp))
 1353                                 error = nfscl_maperr(td, error, (uid_t)0,
 1354                                     (gid_t)0);
 1355                         return (error);
 1356                 }
 1357 
 1358                 /* The requested file was not found. */
 1359                 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
 1360                     (flags & ISLASTCN)) {
 1361                         /*
 1362                          * XXX: UFS does a full VOP_ACCESS(dvp,
 1363                          * VWRITE) here instead of just checking
 1364                          * MNT_RDONLY.
 1365                          */
 1366                         if (mp->mnt_flag & MNT_RDONLY)
 1367                                 return (EROFS);
 1368                         return (EJUSTRETURN);
 1369                 }
 1370 
 1371                 if ((cnp->cn_flags & MAKEENTRY) != 0 && dattrflag) {
 1372                         /*
 1373                          * Cache the modification time of the parent
 1374                          * directory from the post-op attributes in
 1375                          * the name cache entry.  The negative cache
 1376                          * entry will be ignored once the directory
 1377                          * has changed.  Don't bother adding the entry
 1378                          * if the directory has already changed.
 1379                          */
 1380                         NFSLOCKNODE(np);
 1381                         if (timespeccmp(&np->n_vattr.na_mtime,
 1382                             &dnfsva.na_mtime, ==)) {
 1383                                 NFSUNLOCKNODE(np);
 1384                                 cache_enter_time(dvp, NULL, cnp,
 1385                                     &dnfsva.na_mtime, NULL);
 1386                         } else
 1387                                 NFSUNLOCKNODE(np);
 1388                 }
 1389                 return (ENOENT);
 1390         }
 1391 
 1392         /*
 1393          * Handle RENAME case...
 1394          */
 1395         if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
 1396                 if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) {
 1397                         free(nfhp, M_NFSFH);
 1398                         return (EISDIR);
 1399                 }
 1400                 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, LK_EXCLUSIVE);
 1401                 if (error)
 1402                         return (error);
 1403                 newvp = NFSTOV(np);
 1404                 /*
 1405                  * If n_localmodtime >= time before RPC, then
 1406                  * a file modification operation, such as
 1407                  * VOP_SETATTR() of size, has occurred while
 1408                  * the Lookup RPC and acquisition of the vnode
 1409                  * happened.  As such, the attributes might
 1410                  * be stale, with possibly an incorrect size.
 1411                  */
 1412                 NFSLOCKNODE(np);
 1413                 if (timespecisset(&np->n_localmodtime) &&
 1414                     timespeccmp(&np->n_localmodtime, &ts, >=)) {
 1415                         NFSCL_DEBUG(4, "nfs_lookup: rename localmod "
 1416                             "stale attributes\n");
 1417                         attrflag = 0;
 1418                 }
 1419                 NFSUNLOCKNODE(np);
 1420                 if (attrflag)
 1421                         (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 0, 1);
 1422                 *vpp = newvp;
 1423                 return (0);
 1424         }
 1425 
 1426         if (flags & ISDOTDOT) {
 1427                 ltype = NFSVOPISLOCKED(dvp);
 1428                 error = vfs_busy(mp, MBF_NOWAIT);
 1429                 if (error != 0) {
 1430                         vfs_ref(mp);
 1431                         NFSVOPUNLOCK(dvp);
 1432                         error = vfs_busy(mp, 0);
 1433                         NFSVOPLOCK(dvp, ltype | LK_RETRY);
 1434                         vfs_rel(mp);
 1435                         if (error == 0 && VN_IS_DOOMED(dvp)) {
 1436                                 vfs_unbusy(mp);
 1437                                 error = ENOENT;
 1438                         }
 1439                         if (error != 0)
 1440                                 return (error);
 1441                 }
 1442                 NFSVOPUNLOCK(dvp);
 1443                 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np,
 1444                     cnp->cn_lkflags);
 1445                 if (error == 0)
 1446                         newvp = NFSTOV(np);
 1447                 vfs_unbusy(mp);
 1448                 if (newvp != dvp)
 1449                         NFSVOPLOCK(dvp, ltype | LK_RETRY);
 1450                 if (VN_IS_DOOMED(dvp)) {
 1451                         if (error == 0) {
 1452                                 if (newvp == dvp)
 1453                                         vrele(newvp);
 1454                                 else
 1455                                         vput(newvp);
 1456                         }
 1457                         error = ENOENT;
 1458                 }
 1459                 if (error != 0)
 1460                         return (error);
 1461                 if (attrflag)
 1462                         (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 0, 1);
 1463         } else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) {
 1464                 free(nfhp, M_NFSFH);
 1465                 VREF(dvp);
 1466                 newvp = dvp;
 1467                 if (attrflag)
 1468                         (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 0, 1);
 1469         } else {
 1470                 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np,
 1471                     cnp->cn_lkflags);
 1472                 if (error)
 1473                         return (error);
 1474                 newvp = NFSTOV(np);
 1475                 /*
 1476                  * If n_localmodtime >= time before RPC, then
 1477                  * a file modification operation, such as
 1478                  * VOP_SETATTR() of size, has occurred while
 1479                  * the Lookup RPC and acquisition of the vnode
 1480                  * happened.  As such, the attributes might
 1481                  * be stale, with possibly an incorrect size.
 1482                  */
 1483                 NFSLOCKNODE(np);
 1484                 if (timespecisset(&np->n_localmodtime) &&
 1485                     timespeccmp(&np->n_localmodtime, &ts, >=)) {
 1486                         NFSCL_DEBUG(4, "nfs_lookup: localmod "
 1487                             "stale attributes\n");
 1488                         attrflag = 0;
 1489                 }
 1490                 NFSUNLOCKNODE(np);
 1491                 if (attrflag)
 1492                         (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 0, 1);
 1493                 else if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
 1494                     !(np->n_flag & NMODIFIED)) {                        
 1495                         /*
 1496                          * Flush the attribute cache when opening a
 1497                          * leaf node to ensure that fresh attributes
 1498                          * are fetched in nfs_open() since we did not
 1499                          * fetch attributes from the LOOKUP reply.
 1500                          */
 1501                         NFSLOCKNODE(np);
 1502                         np->n_attrstamp = 0;
 1503                         KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
 1504                         NFSUNLOCKNODE(np);
 1505                 }
 1506         }
 1507         if ((cnp->cn_flags & MAKEENTRY) && dvp != newvp &&
 1508             (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) &&
 1509             attrflag != 0 && (newvp->v_type != VDIR || dattrflag != 0))
 1510                 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
 1511                     newvp->v_type != VDIR ? NULL : &dnfsva.na_ctime);
 1512         *vpp = newvp;
 1513         return (0);
 1514 }
 1515 
 1516 /*
 1517  * nfs read call.
 1518  * Just call ncl_bioread() to do the work.
 1519  */
 1520 static int
 1521 nfs_read(struct vop_read_args *ap)
 1522 {
 1523         struct vnode *vp = ap->a_vp;
 1524 
 1525         switch (vp->v_type) {
 1526         case VREG:
 1527                 return (ncl_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
 1528         case VDIR:
 1529                 return (EISDIR);
 1530         default:
 1531                 return (EOPNOTSUPP);
 1532         }
 1533 }
 1534 
 1535 /*
 1536  * nfs readlink call
 1537  */
 1538 static int
 1539 nfs_readlink(struct vop_readlink_args *ap)
 1540 {
 1541         struct vnode *vp = ap->a_vp;
 1542 
 1543         if (vp->v_type != VLNK)
 1544                 return (EINVAL);
 1545         return (ncl_bioread(vp, ap->a_uio, 0, ap->a_cred));
 1546 }
 1547 
 1548 /*
 1549  * Do a readlink rpc.
 1550  * Called by ncl_doio() from below the buffer cache.
 1551  */
 1552 int
 1553 ncl_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 1554 {
 1555         int error, ret, attrflag;
 1556         struct nfsvattr nfsva;
 1557 
 1558         error = nfsrpc_readlink(vp, uiop, cred, uiop->uio_td, &nfsva,
 1559             &attrflag);
 1560         if (attrflag) {
 1561                 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 1562                 if (ret && !error)
 1563                         error = ret;
 1564         }
 1565         if (error && NFS_ISV4(vp))
 1566                 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
 1567         return (error);
 1568 }
 1569 
 1570 /*
 1571  * nfs read rpc call
 1572  * Ditto above
 1573  */
 1574 int
 1575 ncl_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 1576 {
 1577         int error, ret, attrflag;
 1578         struct nfsvattr nfsva;
 1579         struct nfsmount *nmp;
 1580 
 1581         nmp = VFSTONFS(vp->v_mount);
 1582         error = EIO;
 1583         attrflag = 0;
 1584         if (NFSHASPNFS(nmp))
 1585                 error = nfscl_doiods(vp, uiop, NULL, NULL,
 1586                     NFSV4OPEN_ACCESSREAD, 0, cred, uiop->uio_td);
 1587         NFSCL_DEBUG(4, "readrpc: aft doiods=%d\n", error);
 1588         if (error != 0)
 1589                 error = nfsrpc_read(vp, uiop, cred, uiop->uio_td, &nfsva,
 1590                     &attrflag);
 1591         if (attrflag) {
 1592                 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 1593                 if (ret && !error)
 1594                         error = ret;
 1595         }
 1596         if (error && NFS_ISV4(vp))
 1597                 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
 1598         return (error);
 1599 }
 1600 
 1601 /*
 1602  * nfs write call
 1603  */
 1604 int
 1605 ncl_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
 1606     int *iomode, int *must_commit, int called_from_strategy, int ioflag)
 1607 {
 1608         struct nfsvattr nfsva;
 1609         int error, attrflag, ret;
 1610         struct nfsmount *nmp;
 1611 
 1612         nmp = VFSTONFS(vp->v_mount);
 1613         error = EIO;
 1614         attrflag = 0;
 1615         if (NFSHASPNFS(nmp))
 1616                 error = nfscl_doiods(vp, uiop, iomode, must_commit,
 1617                     NFSV4OPEN_ACCESSWRITE, 0, cred, uiop->uio_td);
 1618         NFSCL_DEBUG(4, "writerpc: aft doiods=%d\n", error);
 1619         if (error != 0)
 1620                 error = nfsrpc_write(vp, uiop, iomode, must_commit, cred,
 1621                     uiop->uio_td, &nfsva, &attrflag, called_from_strategy,
 1622                     ioflag);
 1623         if (attrflag) {
 1624                 if (VTONFS(vp)->n_flag & ND_NFSV4)
 1625                         ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 1, 1);
 1626                 else
 1627                         ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 1628                 if (ret && !error)
 1629                         error = ret;
 1630         }
 1631         if (DOINGASYNC(vp))
 1632                 *iomode = NFSWRITE_FILESYNC;
 1633         if (error && NFS_ISV4(vp))
 1634                 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
 1635         return (error);
 1636 }
 1637 
 1638 /*
 1639  * nfs mknod rpc
 1640  * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
 1641  * mode set to specify the file type and the size field for rdev.
 1642  */
 1643 static int
 1644 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
 1645     struct vattr *vap)
 1646 {
 1647         struct nfsvattr nfsva, dnfsva;
 1648         struct vnode *newvp = NULL;
 1649         struct nfsnode *np = NULL, *dnp;
 1650         struct nfsfh *nfhp;
 1651         struct vattr vattr;
 1652         int error = 0, attrflag, dattrflag;
 1653         u_int32_t rdev;
 1654 
 1655         if (vap->va_type == VCHR || vap->va_type == VBLK)
 1656                 rdev = vap->va_rdev;
 1657         else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
 1658                 rdev = 0xffffffff;
 1659         else
 1660                 return (EOPNOTSUPP);
 1661         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)))
 1662                 return (error);
 1663         error = nfsrpc_mknod(dvp, cnp->cn_nameptr, cnp->cn_namelen, vap,
 1664             rdev, vap->va_type, cnp->cn_cred, curthread, &dnfsva,
 1665             &nfsva, &nfhp, &attrflag, &dattrflag);
 1666         if (!error) {
 1667                 if (!nfhp)
 1668                         (void) nfsrpc_lookup(dvp, cnp->cn_nameptr,
 1669                             cnp->cn_namelen, cnp->cn_cred, curthread,
 1670                             &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 0);
 1671                 if (nfhp)
 1672                         error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp,
 1673                             curthread, &np, LK_EXCLUSIVE);
 1674         }
 1675         if (dattrflag)
 1676                 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, 0, 1);
 1677         if (!error) {
 1678                 newvp = NFSTOV(np);
 1679                 if (attrflag != 0) {
 1680                         error = nfscl_loadattrcache(&newvp, &nfsva, NULL, 0, 1);
 1681                         if (error != 0)
 1682                                 vput(newvp);
 1683                 }
 1684         }
 1685         if (!error) {
 1686                 *vpp = newvp;
 1687         } else if (NFS_ISV4(dvp)) {
 1688                 error = nfscl_maperr(curthread, error, vap->va_uid,
 1689                     vap->va_gid);
 1690         }
 1691         dnp = VTONFS(dvp);
 1692         NFSLOCKNODE(dnp);
 1693         dnp->n_flag |= NMODIFIED;
 1694         if (!dattrflag) {
 1695                 dnp->n_attrstamp = 0;
 1696                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1697         }
 1698         NFSUNLOCKNODE(dnp);
 1699         return (error);
 1700 }
 1701 
 1702 /*
 1703  * nfs mknod vop
 1704  * just call nfs_mknodrpc() to do the work.
 1705  */
 1706 /* ARGSUSED */
 1707 static int
 1708 nfs_mknod(struct vop_mknod_args *ap)
 1709 {
 1710         return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap));
 1711 }
 1712 
 1713 static struct mtx nfs_cverf_mtx;
 1714 MTX_SYSINIT(nfs_cverf_mtx, &nfs_cverf_mtx, "NFS create verifier mutex",
 1715     MTX_DEF);
 1716 
 1717 static nfsquad_t
 1718 nfs_get_cverf(void)
 1719 {
 1720         static nfsquad_t cverf;
 1721         nfsquad_t ret;
 1722         static int cverf_initialized = 0;
 1723 
 1724         mtx_lock(&nfs_cverf_mtx);
 1725         if (cverf_initialized == 0) {
 1726                 cverf.lval[0] = arc4random();
 1727                 cverf.lval[1] = arc4random();
 1728                 cverf_initialized = 1;
 1729         } else
 1730                 cverf.qval++;
 1731         ret = cverf;
 1732         mtx_unlock(&nfs_cverf_mtx);
 1733 
 1734         return (ret);
 1735 }
 1736 
 1737 /*
 1738  * nfs file create call
 1739  */
 1740 static int
 1741 nfs_create(struct vop_create_args *ap)
 1742 {
 1743         struct vnode *dvp = ap->a_dvp;
 1744         struct vattr *vap = ap->a_vap;
 1745         struct componentname *cnp = ap->a_cnp;
 1746         struct nfsnode *np = NULL, *dnp;
 1747         struct vnode *newvp = NULL;
 1748         struct nfsmount *nmp;
 1749         struct nfsvattr dnfsva, nfsva;
 1750         struct nfsfh *nfhp;
 1751         nfsquad_t cverf;
 1752         int error = 0, attrflag, dattrflag, fmode = 0;
 1753         struct vattr vattr;
 1754 
 1755         /*
 1756          * Oops, not for me..
 1757          */
 1758         if (vap->va_type == VSOCK)
 1759                 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
 1760 
 1761         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)))
 1762                 return (error);
 1763         if (vap->va_vaflags & VA_EXCLUSIVE)
 1764                 fmode |= O_EXCL;
 1765         dnp = VTONFS(dvp);
 1766         nmp = VFSTONFS(dvp->v_mount);
 1767 again:
 1768         /* For NFSv4, wait until any remove is done. */
 1769         NFSLOCKNODE(dnp);
 1770         while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) {
 1771                 dnp->n_flag |= NREMOVEWANT;
 1772                 (void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0);
 1773         }
 1774         NFSUNLOCKNODE(dnp);
 1775 
 1776         cverf = nfs_get_cverf();
 1777         error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen,
 1778             vap, cverf, fmode, cnp->cn_cred, curthread, &dnfsva, &nfsva,
 1779             &nfhp, &attrflag, &dattrflag);
 1780         if (!error) {
 1781                 if (nfhp == NULL)
 1782                         (void) nfsrpc_lookup(dvp, cnp->cn_nameptr,
 1783                             cnp->cn_namelen, cnp->cn_cred, curthread,
 1784                             &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 0);
 1785                 if (nfhp != NULL)
 1786                         error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp,
 1787                             curthread, &np, LK_EXCLUSIVE);
 1788         }
 1789         if (dattrflag)
 1790                 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, 0, 1);
 1791         if (!error) {
 1792                 newvp = NFSTOV(np);
 1793                 if (attrflag == 0)
 1794                         error = nfsrpc_getattr(newvp, cnp->cn_cred, curthread,
 1795                             &nfsva);
 1796                 if (error == 0)
 1797                         error = nfscl_loadattrcache(&newvp, &nfsva, NULL, 0, 1);
 1798         }
 1799         if (error) {
 1800                 if (newvp != NULL) {
 1801                         vput(newvp);
 1802                         newvp = NULL;
 1803                 }
 1804                 if (NFS_ISV34(dvp) && (fmode & O_EXCL) &&
 1805                     error == NFSERR_NOTSUPP) {
 1806                         fmode &= ~O_EXCL;
 1807                         goto again;
 1808                 }
 1809         } else if (NFS_ISV34(dvp) && (fmode & O_EXCL)) {
 1810                 if (nfscl_checksattr(vap, &nfsva)) {
 1811                         error = nfsrpc_setattr(newvp, vap, NULL, cnp->cn_cred,
 1812                             curthread, &nfsva, &attrflag);
 1813                         if (error && (vap->va_uid != (uid_t)VNOVAL ||
 1814                             vap->va_gid != (gid_t)VNOVAL)) {
 1815                                 /* try again without setting uid/gid */
 1816                                 vap->va_uid = (uid_t)VNOVAL;
 1817                                 vap->va_gid = (uid_t)VNOVAL;
 1818                                 error = nfsrpc_setattr(newvp, vap, NULL, 
 1819                                     cnp->cn_cred, curthread, &nfsva, &attrflag);
 1820                         }
 1821                         if (attrflag)
 1822                                 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL,
 1823                                     0, 1);
 1824                         if (error != 0)
 1825                                 vput(newvp);
 1826                 }
 1827         }
 1828         if (!error) {
 1829                 if ((cnp->cn_flags & MAKEENTRY) && attrflag) {
 1830                         if (dvp != newvp)
 1831                                 cache_enter_time(dvp, newvp, cnp,
 1832                                     &nfsva.na_ctime, NULL);
 1833                         else
 1834                                 printf("nfs_create: bogus NFS server returned "
 1835                                     "the directory as the new file object\n");
 1836                 }
 1837                 *ap->a_vpp = newvp;
 1838         } else if (NFS_ISV4(dvp)) {
 1839                 error = nfscl_maperr(curthread, error, vap->va_uid,
 1840                     vap->va_gid);
 1841         }
 1842         NFSLOCKNODE(dnp);
 1843         dnp->n_flag |= NMODIFIED;
 1844         if (!dattrflag) {
 1845                 dnp->n_attrstamp = 0;
 1846                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1847         }
 1848         NFSUNLOCKNODE(dnp);
 1849         return (error);
 1850 }
 1851 
 1852 /*
 1853  * nfs file remove call
 1854  * To try and make nfs semantics closer to ufs semantics, a file that has
 1855  * other processes using the vnode is renamed instead of removed and then
 1856  * removed later on the last close.
 1857  * - If v_usecount > 1
 1858  *        If a rename is not already in the works
 1859  *           call nfs_sillyrename() to set it up
 1860  *     else
 1861  *        do the remove rpc
 1862  */
 1863 static int
 1864 nfs_remove(struct vop_remove_args *ap)
 1865 {
 1866         struct vnode *vp = ap->a_vp;
 1867         struct vnode *dvp = ap->a_dvp;
 1868         struct componentname *cnp = ap->a_cnp;
 1869         struct nfsnode *np = VTONFS(vp);
 1870         int error = 0;
 1871         struct vattr vattr;
 1872 
 1873         KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount"));
 1874         if (vp->v_type == VDIR)
 1875                 error = EPERM;
 1876         else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
 1877             VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 &&
 1878             vattr.va_nlink > 1)) {
 1879                 /*
 1880                  * Purge the name cache so that the chance of a lookup for
 1881                  * the name succeeding while the remove is in progress is
 1882                  * minimized. Without node locking it can still happen, such
 1883                  * that an I/O op returns ESTALE, but since you get this if
 1884                  * another host removes the file..
 1885                  */
 1886                 cache_purge(vp);
 1887                 /*
 1888                  * throw away biocache buffers, mainly to avoid
 1889                  * unnecessary delayed writes later.
 1890                  */
 1891                 error = ncl_vinvalbuf(vp, 0, curthread, 1);
 1892                 if (error != EINTR && error != EIO)
 1893                         /* Do the rpc */
 1894                         error = nfs_removerpc(dvp, vp, cnp->cn_nameptr,
 1895                             cnp->cn_namelen, cnp->cn_cred, curthread);
 1896                 /*
 1897                  * Kludge City: If the first reply to the remove rpc is lost..
 1898                  *   the reply to the retransmitted request will be ENOENT
 1899                  *   since the file was in fact removed
 1900                  *   Therefore, we cheat and return success.
 1901                  */
 1902                 if (error == ENOENT)
 1903                         error = 0;
 1904         } else if (!np->n_sillyrename)
 1905                 error = nfs_sillyrename(dvp, vp, cnp);
 1906         NFSLOCKNODE(np);
 1907         np->n_attrstamp = 0;
 1908         NFSUNLOCKNODE(np);
 1909         KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
 1910         return (error);
 1911 }
 1912 
 1913 /*
 1914  * nfs file remove rpc called from nfs_inactive
 1915  */
 1916 int
 1917 ncl_removeit(struct sillyrename *sp, struct vnode *vp)
 1918 {
 1919         /*
 1920          * Make sure that the directory vnode is still valid.
 1921          * XXX we should lock sp->s_dvp here.
 1922          */
 1923         if (sp->s_dvp->v_type == VBAD)
 1924                 return (0);
 1925         return (nfs_removerpc(sp->s_dvp, vp, sp->s_name, sp->s_namlen,
 1926             sp->s_cred, NULL));
 1927 }
 1928 
 1929 /*
 1930  * Nfs remove rpc, called from nfs_remove() and ncl_removeit().
 1931  */
 1932 static int
 1933 nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name,
 1934     int namelen, struct ucred *cred, struct thread *td)
 1935 {
 1936         struct nfsvattr dnfsva;
 1937         struct nfsnode *dnp = VTONFS(dvp);
 1938         int error = 0, dattrflag;
 1939 
 1940         NFSLOCKNODE(dnp);
 1941         dnp->n_flag |= NREMOVEINPROG;
 1942         NFSUNLOCKNODE(dnp);
 1943         error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva,
 1944             &dattrflag);
 1945         NFSLOCKNODE(dnp);
 1946         if ((dnp->n_flag & NREMOVEWANT)) {
 1947                 dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG);
 1948                 NFSUNLOCKNODE(dnp);
 1949                 wakeup((caddr_t)dnp);
 1950         } else {
 1951                 dnp->n_flag &= ~NREMOVEINPROG;
 1952                 NFSUNLOCKNODE(dnp);
 1953         }
 1954         if (dattrflag)
 1955                 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, 0, 1);
 1956         NFSLOCKNODE(dnp);
 1957         dnp->n_flag |= NMODIFIED;
 1958         if (!dattrflag) {
 1959                 dnp->n_attrstamp = 0;
 1960                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1961         }
 1962         NFSUNLOCKNODE(dnp);
 1963         if (error && NFS_ISV4(dvp))
 1964                 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
 1965         return (error);
 1966 }
 1967 
 1968 /*
 1969  * nfs file rename call
 1970  */
 1971 static int
 1972 nfs_rename(struct vop_rename_args *ap)
 1973 {
 1974         struct vnode *fvp = ap->a_fvp;
 1975         struct vnode *tvp = ap->a_tvp;
 1976         struct vnode *fdvp = ap->a_fdvp;
 1977         struct vnode *tdvp = ap->a_tdvp;
 1978         struct componentname *tcnp = ap->a_tcnp;
 1979         struct componentname *fcnp = ap->a_fcnp;
 1980         struct nfsnode *fnp = VTONFS(ap->a_fvp);
 1981         struct nfsnode *tdnp = VTONFS(ap->a_tdvp);
 1982         struct nfsv4node *newv4 = NULL;
 1983         int error;
 1984 
 1985         /* Check for cross-device rename */
 1986         if ((fvp->v_mount != tdvp->v_mount) ||
 1987             (tvp && (fvp->v_mount != tvp->v_mount))) {
 1988                 error = EXDEV;
 1989                 goto out;
 1990         }
 1991 
 1992         if (fvp == tvp) {
 1993                 printf("nfs_rename: fvp == tvp (can't happen)\n");
 1994                 error = 0;
 1995                 goto out;
 1996         }
 1997         if ((error = NFSVOPLOCK(fvp, LK_EXCLUSIVE)) != 0)
 1998                 goto out;
 1999 
 2000         /*
 2001          * We have to flush B_DELWRI data prior to renaming
 2002          * the file.  If we don't, the delayed-write buffers
 2003          * can be flushed out later after the file has gone stale
 2004          * under NFSV3.  NFSV2 does not have this problem because
 2005          * ( as far as I can tell ) it flushes dirty buffers more
 2006          * often.
 2007          * 
 2008          * Skip the rename operation if the fsync fails, this can happen
 2009          * due to the server's volume being full, when we pushed out data
 2010          * that was written back to our cache earlier. Not checking for
 2011          * this condition can result in potential (silent) data loss.
 2012          */
 2013         error = VOP_FSYNC(fvp, MNT_WAIT, curthread);
 2014         NFSVOPUNLOCK(fvp);
 2015         if (!error && tvp)
 2016                 error = VOP_FSYNC(tvp, MNT_WAIT, curthread);
 2017         if (error)
 2018                 goto out;
 2019 
 2020         /*
 2021          * If the tvp exists and is in use, sillyrename it before doing the
 2022          * rename of the new file over it.
 2023          * XXX Can't sillyrename a directory.
 2024          */
 2025         if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
 2026                 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
 2027                 vput(tvp);
 2028                 tvp = NULL;
 2029         }
 2030 
 2031         error = nfs_renamerpc(fdvp, fvp, fcnp->cn_nameptr, fcnp->cn_namelen,
 2032             tdvp, tvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
 2033             curthread);
 2034 
 2035         if (error == 0 && NFS_ISV4(tdvp)) {
 2036                 /*
 2037                  * For NFSv4, check to see if it is the same name and
 2038                  * replace the name, if it is different.
 2039                  */
 2040                 newv4 = malloc(
 2041                     sizeof (struct nfsv4node) +
 2042                     tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1,
 2043                     M_NFSV4NODE, M_WAITOK);
 2044                 NFSLOCKNODE(tdnp);
 2045                 NFSLOCKNODE(fnp);
 2046                 if (fnp->n_v4 != NULL && fvp->v_type == VREG &&
 2047                     (fnp->n_v4->n4_namelen != tcnp->cn_namelen ||
 2048                       NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4),
 2049                       tcnp->cn_namelen) ||
 2050                       tdnp->n_fhp->nfh_len != fnp->n_v4->n4_fhlen ||
 2051                       NFSBCMP(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data,
 2052                         tdnp->n_fhp->nfh_len))) {
 2053 #ifdef notdef
 2054 { char nnn[100]; int nnnl;
 2055 nnnl = (tcnp->cn_namelen < 100) ? tcnp->cn_namelen : 99;
 2056 bcopy(tcnp->cn_nameptr, nnn, nnnl);
 2057 nnn[nnnl] = '\0';
 2058 printf("ren replace=%s\n",nnn);
 2059 }
 2060 #endif
 2061                         free(fnp->n_v4, M_NFSV4NODE);
 2062                         fnp->n_v4 = newv4;
 2063                         newv4 = NULL;
 2064                         fnp->n_v4->n4_fhlen = tdnp->n_fhp->nfh_len;
 2065                         fnp->n_v4->n4_namelen = tcnp->cn_namelen;
 2066                         NFSBCOPY(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data,
 2067                             tdnp->n_fhp->nfh_len);
 2068                         NFSBCOPY(tcnp->cn_nameptr,
 2069                             NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen);
 2070                 }
 2071                 NFSUNLOCKNODE(tdnp);
 2072                 NFSUNLOCKNODE(fnp);
 2073                 if (newv4 != NULL)
 2074                         free(newv4, M_NFSV4NODE);
 2075         }
 2076 
 2077         if (fvp->v_type == VDIR) {
 2078                 if (tvp != NULL && tvp->v_type == VDIR)
 2079                         cache_purge(tdvp);
 2080                 cache_purge(fdvp);
 2081         }
 2082 
 2083 out:
 2084         if (tdvp == tvp)
 2085                 vrele(tdvp);
 2086         else
 2087                 vput(tdvp);
 2088         if (tvp)
 2089                 vput(tvp);
 2090         vrele(fdvp);
 2091         vrele(fvp);
 2092         /*
 2093          * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
 2094          */
 2095         if (error == ENOENT)
 2096                 error = 0;
 2097         return (error);
 2098 }
 2099 
 2100 /*
 2101  * nfs file rename rpc called from nfs_remove() above
 2102  */
 2103 static int
 2104 nfs_renameit(struct vnode *sdvp, struct vnode *svp, struct componentname *scnp,
 2105     struct sillyrename *sp)
 2106 {
 2107 
 2108         return (nfs_renamerpc(sdvp, svp, scnp->cn_nameptr, scnp->cn_namelen,
 2109             sdvp, NULL, sp->s_name, sp->s_namlen, scnp->cn_cred,
 2110             curthread));
 2111 }
 2112 
 2113 /*
 2114  * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
 2115  */
 2116 static int
 2117 nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr,
 2118     int fnamelen, struct vnode *tdvp, struct vnode *tvp, char *tnameptr,
 2119     int tnamelen, struct ucred *cred, struct thread *td)
 2120 {
 2121         struct nfsvattr fnfsva, tnfsva;
 2122         struct nfsnode *fdnp = VTONFS(fdvp);
 2123         struct nfsnode *tdnp = VTONFS(tdvp);
 2124         int error = 0, fattrflag, tattrflag;
 2125 
 2126         error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp,
 2127             tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag,
 2128             &tattrflag);
 2129         NFSLOCKNODE(fdnp);
 2130         fdnp->n_flag |= NMODIFIED;
 2131         if (fattrflag != 0) {
 2132                 NFSUNLOCKNODE(fdnp);
 2133                 (void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, 0, 1);
 2134         } else {
 2135                 fdnp->n_attrstamp = 0;
 2136                 NFSUNLOCKNODE(fdnp);
 2137                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp);
 2138         }
 2139         NFSLOCKNODE(tdnp);
 2140         tdnp->n_flag |= NMODIFIED;
 2141         if (tattrflag != 0) {
 2142                 NFSUNLOCKNODE(tdnp);
 2143                 (void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, 0, 1);
 2144         } else {
 2145                 tdnp->n_attrstamp = 0;
 2146                 NFSUNLOCKNODE(tdnp);
 2147                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
 2148         }
 2149         if (error && NFS_ISV4(fdvp))
 2150                 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
 2151         return (error);
 2152 }
 2153 
 2154 /*
 2155  * nfs hard link create call
 2156  */
 2157 static int
 2158 nfs_link(struct vop_link_args *ap)
 2159 {
 2160         struct vnode *vp = ap->a_vp;
 2161         struct vnode *tdvp = ap->a_tdvp;
 2162         struct componentname *cnp = ap->a_cnp;
 2163         struct nfsnode *np, *tdnp;
 2164         struct nfsvattr nfsva, dnfsva;
 2165         int error = 0, attrflag, dattrflag;
 2166 
 2167         /*
 2168          * Push all writes to the server, so that the attribute cache
 2169          * doesn't get "out of sync" with the server.
 2170          * XXX There should be a better way!
 2171          */
 2172         VOP_FSYNC(vp, MNT_WAIT, curthread);
 2173 
 2174         error = nfsrpc_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
 2175             cnp->cn_cred, curthread, &dnfsva, &nfsva, &attrflag, &dattrflag);
 2176         tdnp = VTONFS(tdvp);
 2177         NFSLOCKNODE(tdnp);
 2178         tdnp->n_flag |= NMODIFIED;
 2179         if (dattrflag != 0) {
 2180                 NFSUNLOCKNODE(tdnp);
 2181                 (void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, 0, 1);
 2182         } else {
 2183                 tdnp->n_attrstamp = 0;
 2184                 NFSUNLOCKNODE(tdnp);
 2185                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
 2186         }
 2187         if (attrflag)
 2188                 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 2189         else {
 2190                 np = VTONFS(vp);
 2191                 NFSLOCKNODE(np);
 2192                 np->n_attrstamp = 0;
 2193                 NFSUNLOCKNODE(np);
 2194                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
 2195         }
 2196         /*
 2197          * If negative lookup caching is enabled, I might as well
 2198          * add an entry for this node. Not necessary for correctness,
 2199          * but if negative caching is enabled, then the system
 2200          * must care about lookup caching hit rate, so...
 2201          */
 2202         if (VFSTONFS(vp->v_mount)->nm_negnametimeo != 0 &&
 2203             (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) {
 2204                 if (tdvp != vp)
 2205                         cache_enter_time(tdvp, vp, cnp, &nfsva.na_ctime, NULL);
 2206                 else
 2207                         printf("nfs_link: bogus NFS server returned "
 2208                             "the directory as the new link\n");
 2209         }
 2210         if (error && NFS_ISV4(vp))
 2211                 error = nfscl_maperr(curthread, error, (uid_t)0,
 2212                     (gid_t)0);
 2213         return (error);
 2214 }
 2215 
 2216 /*
 2217  * nfs symbolic link create call
 2218  */
 2219 static int
 2220 nfs_symlink(struct vop_symlink_args *ap)
 2221 {
 2222         struct vnode *dvp = ap->a_dvp;
 2223         struct vattr *vap = ap->a_vap;
 2224         struct componentname *cnp = ap->a_cnp;
 2225         struct nfsvattr nfsva, dnfsva;
 2226         struct nfsfh *nfhp;
 2227         struct nfsnode *np = NULL, *dnp;
 2228         struct vnode *newvp = NULL;
 2229         int error = 0, attrflag, dattrflag, ret;
 2230 
 2231         vap->va_type = VLNK;
 2232         error = nfsrpc_symlink(dvp, cnp->cn_nameptr, cnp->cn_namelen,
 2233             ap->a_target, vap, cnp->cn_cred, curthread, &dnfsva,
 2234             &nfsva, &nfhp, &attrflag, &dattrflag);
 2235         if (nfhp) {
 2236                 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, curthread,
 2237                     &np, LK_EXCLUSIVE);
 2238                 if (!ret)
 2239                         newvp = NFSTOV(np);
 2240                 else if (!error)
 2241                         error = ret;
 2242         }
 2243         if (newvp != NULL) {
 2244                 if (attrflag)
 2245                         (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 0, 1);
 2246         } else if (!error) {
 2247                 /*
 2248                  * If we do not have an error and we could not extract the
 2249                  * newvp from the response due to the request being NFSv2, we
 2250                  * have to do a lookup in order to obtain a newvp to return.
 2251                  */
 2252                 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
 2253                     cnp->cn_cred, curthread, &np);
 2254                 if (!error)
 2255                         newvp = NFSTOV(np);
 2256         }
 2257         if (error) {
 2258                 if (newvp)
 2259                         vput(newvp);
 2260                 if (NFS_ISV4(dvp))
 2261                         error = nfscl_maperr(curthread, error,
 2262                             vap->va_uid, vap->va_gid);
 2263         } else {
 2264                 *ap->a_vpp = newvp;
 2265         }
 2266 
 2267         dnp = VTONFS(dvp);
 2268         NFSLOCKNODE(dnp);
 2269         dnp->n_flag |= NMODIFIED;
 2270         if (dattrflag != 0) {
 2271                 NFSUNLOCKNODE(dnp);
 2272                 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, 0, 1);
 2273         } else {
 2274                 dnp->n_attrstamp = 0;
 2275                 NFSUNLOCKNODE(dnp);
 2276                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2277         }
 2278         /*
 2279          * If negative lookup caching is enabled, I might as well
 2280          * add an entry for this node. Not necessary for correctness,
 2281          * but if negative caching is enabled, then the system
 2282          * must care about lookup caching hit rate, so...
 2283          */
 2284         if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 &&
 2285             (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) {
 2286                 if (dvp != newvp)
 2287                         cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
 2288                             NULL);
 2289                 else
 2290                         printf("nfs_symlink: bogus NFS server returned "
 2291                             "the directory as the new file object\n");
 2292         }
 2293         return (error);
 2294 }
 2295 
 2296 /*
 2297  * nfs make dir call
 2298  */
 2299 static int
 2300 nfs_mkdir(struct vop_mkdir_args *ap)
 2301 {
 2302         struct vnode *dvp = ap->a_dvp;
 2303         struct vattr *vap = ap->a_vap;
 2304         struct componentname *cnp = ap->a_cnp;
 2305         struct nfsnode *np = NULL, *dnp;
 2306         struct vnode *newvp = NULL;
 2307         struct vattr vattr;
 2308         struct nfsfh *nfhp;
 2309         struct nfsvattr nfsva, dnfsva;
 2310         int error = 0, attrflag, dattrflag, ret;
 2311 
 2312         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
 2313                 return (error);
 2314         vap->va_type = VDIR;
 2315         error = nfsrpc_mkdir(dvp, cnp->cn_nameptr, cnp->cn_namelen,
 2316             vap, cnp->cn_cred, curthread, &dnfsva, &nfsva, &nfhp,
 2317             &attrflag, &dattrflag);
 2318         dnp = VTONFS(dvp);
 2319         NFSLOCKNODE(dnp);
 2320         dnp->n_flag |= NMODIFIED;
 2321         if (dattrflag != 0) {
 2322                 NFSUNLOCKNODE(dnp);
 2323                 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, 0, 1);
 2324         } else {
 2325                 dnp->n_attrstamp = 0;
 2326                 NFSUNLOCKNODE(dnp);
 2327                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2328         }
 2329         if (nfhp) {
 2330                 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, curthread,
 2331                     &np, LK_EXCLUSIVE);
 2332                 if (!ret) {
 2333                         newvp = NFSTOV(np);
 2334                         if (attrflag)
 2335                            (void) nfscl_loadattrcache(&newvp, &nfsva, NULL,
 2336                                 0, 1);
 2337                 } else if (!error)
 2338                         error = ret;
 2339         }
 2340         if (!error && newvp == NULL) {
 2341                 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
 2342                     cnp->cn_cred, curthread, &np);
 2343                 if (!error) {
 2344                         newvp = NFSTOV(np);
 2345                         if (newvp->v_type != VDIR)
 2346                                 error = EEXIST;
 2347                 }
 2348         }
 2349         if (error) {
 2350                 if (newvp)
 2351                         vput(newvp);
 2352                 if (NFS_ISV4(dvp))
 2353                         error = nfscl_maperr(curthread, error,
 2354                             vap->va_uid, vap->va_gid);
 2355         } else {
 2356                 /*
 2357                  * If negative lookup caching is enabled, I might as well
 2358                  * add an entry for this node. Not necessary for correctness,
 2359                  * but if negative caching is enabled, then the system
 2360                  * must care about lookup caching hit rate, so...
 2361                  */
 2362                 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 &&
 2363                     (cnp->cn_flags & MAKEENTRY) &&
 2364                     attrflag != 0 && dattrflag != 0) {
 2365                         if (dvp != newvp)
 2366                                 cache_enter_time(dvp, newvp, cnp,
 2367                                     &nfsva.na_ctime, &dnfsva.na_ctime);
 2368                         else
 2369                                 printf("nfs_mkdir: bogus NFS server returned "
 2370                                     "the directory that the directory was "
 2371                                     "created in as the new file object\n");
 2372                 }
 2373                 *ap->a_vpp = newvp;
 2374         }
 2375         return (error);
 2376 }
 2377 
 2378 /*
 2379  * nfs remove directory call
 2380  */
 2381 static int
 2382 nfs_rmdir(struct vop_rmdir_args *ap)
 2383 {
 2384         struct vnode *vp = ap->a_vp;
 2385         struct vnode *dvp = ap->a_dvp;
 2386         struct componentname *cnp = ap->a_cnp;
 2387         struct nfsnode *dnp;
 2388         struct nfsvattr dnfsva;
 2389         int error, dattrflag;
 2390 
 2391         if (dvp == vp)
 2392                 return (EINVAL);
 2393         error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen,
 2394             cnp->cn_cred, curthread, &dnfsva, &dattrflag);
 2395         dnp = VTONFS(dvp);
 2396         NFSLOCKNODE(dnp);
 2397         dnp->n_flag |= NMODIFIED;
 2398         if (dattrflag != 0) {
 2399                 NFSUNLOCKNODE(dnp);
 2400                 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, 0, 1);
 2401         } else {
 2402                 dnp->n_attrstamp = 0;
 2403                 NFSUNLOCKNODE(dnp);
 2404                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2405         }
 2406 
 2407         cache_purge(dvp);
 2408         cache_purge(vp);
 2409         if (error && NFS_ISV4(dvp))
 2410                 error = nfscl_maperr(curthread, error, (uid_t)0,
 2411                     (gid_t)0);
 2412         /*
 2413          * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
 2414          */
 2415         if (error == ENOENT)
 2416                 error = 0;
 2417         return (error);
 2418 }
 2419 
 2420 /*
 2421  * nfs readdir call
 2422  */
 2423 static int
 2424 nfs_readdir(struct vop_readdir_args *ap)
 2425 {
 2426         struct vnode *vp = ap->a_vp;
 2427         struct nfsnode *np = VTONFS(vp);
 2428         struct uio *uio = ap->a_uio;
 2429         ssize_t tresid, left;
 2430         int error = 0;
 2431         struct vattr vattr;
 2432 
 2433         if (ap->a_eofflag != NULL)
 2434                 *ap->a_eofflag = 0;
 2435         if (vp->v_type != VDIR) 
 2436                 return(EPERM);
 2437 
 2438         /*
 2439          * First, check for hit on the EOF offset cache
 2440          */
 2441         NFSLOCKNODE(np);
 2442         if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
 2443             (np->n_flag & NMODIFIED) == 0) {
 2444                 NFSUNLOCKNODE(np);
 2445                 if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) {
 2446                         NFSLOCKNODE(np);
 2447                         if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) ||
 2448                             !NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
 2449                                 NFSUNLOCKNODE(np);
 2450                                 NFSINCRGLOBAL(nfsstatsv1.direofcache_hits);
 2451                                 if (ap->a_eofflag != NULL)
 2452                                         *ap->a_eofflag = 1;
 2453                                 return (0);
 2454                         } else
 2455                                 NFSUNLOCKNODE(np);
 2456                 }
 2457         } else
 2458                 NFSUNLOCKNODE(np);
 2459 
 2460         /*
 2461          * NFS always guarantees that directory entries don't straddle
 2462          * DIRBLKSIZ boundaries.  As such, we need to limit the size
 2463          * to an exact multiple of DIRBLKSIZ, to avoid copying a partial
 2464          * directory entry.
 2465          */
 2466         left = uio->uio_resid % DIRBLKSIZ;
 2467         if (left == uio->uio_resid)
 2468                 return (EINVAL);
 2469         uio->uio_resid -= left;
 2470 
 2471         /*
 2472          * Call ncl_bioread() to do the real work.
 2473          */
 2474         tresid = uio->uio_resid;
 2475         error = ncl_bioread(vp, uio, 0, ap->a_cred);
 2476 
 2477         if (!error && uio->uio_resid == tresid) {
 2478                 NFSINCRGLOBAL(nfsstatsv1.direofcache_misses);
 2479                 if (ap->a_eofflag != NULL)
 2480                         *ap->a_eofflag = 1;
 2481         }
 2482 
 2483         /* Add the partial DIRBLKSIZ (left) back in. */
 2484         uio->uio_resid += left;
 2485         return (error);
 2486 }
 2487 
 2488 /*
 2489  * Readdir rpc call.
 2490  * Called from below the buffer cache by ncl_doio().
 2491  */
 2492 int
 2493 ncl_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
 2494     struct thread *td)
 2495 {
 2496         struct nfsvattr nfsva;
 2497         nfsuint64 *cookiep, cookie;
 2498         struct nfsnode *dnp = VTONFS(vp);
 2499         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2500         int error = 0, eof, attrflag;
 2501 
 2502         KASSERT(uiop->uio_iovcnt == 1 &&
 2503             (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
 2504             (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
 2505             ("nfs readdirrpc bad uio"));
 2506 
 2507         /*
 2508          * If there is no cookie, assume directory was stale.
 2509          */
 2510         ncl_dircookie_lock(dnp);
 2511         NFSUNLOCKNODE(dnp);
 2512         cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0);
 2513         if (cookiep) {
 2514                 cookie = *cookiep;
 2515                 ncl_dircookie_unlock(dnp);
 2516         } else {
 2517                 ncl_dircookie_unlock(dnp);              
 2518                 return (NFSERR_BAD_COOKIE);
 2519         }
 2520 
 2521         if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp))
 2522                 (void)ncl_fsinfo(nmp, vp, cred, td);
 2523 
 2524         error = nfsrpc_readdir(vp, uiop, &cookie, cred, td, &nfsva,
 2525             &attrflag, &eof);
 2526         if (attrflag)
 2527                 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 2528 
 2529         if (!error) {
 2530                 /*
 2531                  * We are now either at the end of the directory or have filled
 2532                  * the block.
 2533                  */
 2534                 if (eof) {
 2535                         NFSLOCKNODE(dnp);
 2536                         dnp->n_direofoffset = uiop->uio_offset;
 2537                         NFSUNLOCKNODE(dnp);
 2538                 } else {
 2539                         if (uiop->uio_resid > 0)
 2540                                 printf("EEK! readdirrpc resid > 0\n");
 2541                         ncl_dircookie_lock(dnp);
 2542                         NFSUNLOCKNODE(dnp);
 2543                         cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1);
 2544                         *cookiep = cookie;
 2545                         ncl_dircookie_unlock(dnp);
 2546                 }
 2547         } else if (NFS_ISV4(vp)) {
 2548                 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
 2549         }
 2550         return (error);
 2551 }
 2552 
 2553 /*
 2554  * NFS V3 readdir plus RPC. Used in place of ncl_readdirrpc().
 2555  */
 2556 int
 2557 ncl_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
 2558     struct thread *td)
 2559 {
 2560         struct nfsvattr nfsva;
 2561         nfsuint64 *cookiep, cookie;
 2562         struct nfsnode *dnp = VTONFS(vp);
 2563         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2564         int error = 0, attrflag, eof;
 2565 
 2566         KASSERT(uiop->uio_iovcnt == 1 &&
 2567             (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
 2568             (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
 2569             ("nfs readdirplusrpc bad uio"));
 2570 
 2571         /*
 2572          * If there is no cookie, assume directory was stale.
 2573          */
 2574         ncl_dircookie_lock(dnp);
 2575         NFSUNLOCKNODE(dnp);
 2576         cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0);
 2577         if (cookiep) {
 2578                 cookie = *cookiep;
 2579                 ncl_dircookie_unlock(dnp);
 2580         } else {
 2581                 ncl_dircookie_unlock(dnp);
 2582                 return (NFSERR_BAD_COOKIE);
 2583         }
 2584 
 2585         if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp))
 2586                 (void)ncl_fsinfo(nmp, vp, cred, td);
 2587         error = nfsrpc_readdirplus(vp, uiop, &cookie, cred, td, &nfsva,
 2588             &attrflag, &eof);
 2589         if (attrflag)
 2590                 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 2591 
 2592         if (!error) {
 2593                 /*
 2594                  * We are now either at end of the directory or have filled the
 2595                  * the block.
 2596                  */
 2597                 if (eof) {
 2598                         NFSLOCKNODE(dnp);
 2599                         dnp->n_direofoffset = uiop->uio_offset;
 2600                         NFSUNLOCKNODE(dnp);
 2601                 } else {
 2602                         if (uiop->uio_resid > 0)
 2603                                 printf("EEK! readdirplusrpc resid > 0\n");
 2604                         ncl_dircookie_lock(dnp);
 2605                         NFSUNLOCKNODE(dnp);
 2606                         cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1);
 2607                         *cookiep = cookie;
 2608                         ncl_dircookie_unlock(dnp);
 2609                 }
 2610         } else if (NFS_ISV4(vp)) {
 2611                 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
 2612         }
 2613         return (error);
 2614 }
 2615 
 2616 /*
 2617  * Silly rename. To make the NFS filesystem that is stateless look a little
 2618  * more like the "ufs" a remove of an active vnode is translated to a rename
 2619  * to a funny looking filename that is removed by nfs_inactive on the
 2620  * nfsnode. There is the potential for another process on a different client
 2621  * to create the same funny name between the nfs_lookitup() fails and the
 2622  * nfs_rename() completes, but...
 2623  */
 2624 static int
 2625 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
 2626 {
 2627         struct sillyrename *sp;
 2628         struct nfsnode *np;
 2629         int error;
 2630         short pid;
 2631         unsigned int lticks;
 2632 
 2633         cache_purge(dvp);
 2634         np = VTONFS(vp);
 2635         KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir"));
 2636         sp = malloc(sizeof (struct sillyrename),
 2637             M_NEWNFSREQ, M_WAITOK);
 2638         sp->s_cred = crhold(cnp->cn_cred);
 2639         sp->s_dvp = dvp;
 2640         VREF(dvp);
 2641 
 2642         /* 
 2643          * Fudge together a funny name.
 2644          * Changing the format of the funny name to accommodate more 
 2645          * sillynames per directory.
 2646          * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is 
 2647          * CPU ticks since boot.
 2648          */
 2649         pid = curthread->td_proc->p_pid;
 2650         lticks = (unsigned int)ticks;
 2651         for ( ; ; ) {
 2652                 sp->s_namlen = sprintf(sp->s_name, 
 2653                                        ".nfs.%08x.%04x4.4", lticks, 
 2654                                        pid);
 2655                 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
 2656                                  curthread, NULL))
 2657                         break;
 2658                 lticks++;
 2659         }
 2660         error = nfs_renameit(dvp, vp, cnp, sp);
 2661         if (error)
 2662                 goto bad;
 2663         error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
 2664                 curthread, &np);
 2665         np->n_sillyrename = sp;
 2666         return (0);
 2667 bad:
 2668         vrele(sp->s_dvp);
 2669         crfree(sp->s_cred);
 2670         free(sp, M_NEWNFSREQ);
 2671         return (error);
 2672 }
 2673 
 2674 /*
 2675  * Look up a file name and optionally either update the file handle or
 2676  * allocate an nfsnode, depending on the value of npp.
 2677  * npp == NULL  --> just do the lookup
 2678  * *npp == NULL --> allocate a new nfsnode and make sure attributes are
 2679  *                      handled too
 2680  * *npp != NULL --> update the file handle in the vnode
 2681  */
 2682 static int
 2683 nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred,
 2684     struct thread *td, struct nfsnode **npp)
 2685 {
 2686         struct vnode *newvp = NULL, *vp;
 2687         struct nfsnode *np, *dnp = VTONFS(dvp);
 2688         struct nfsfh *nfhp, *onfhp;
 2689         struct nfsvattr nfsva, dnfsva;
 2690         struct componentname cn;
 2691         int error = 0, attrflag, dattrflag;
 2692         u_int hash;
 2693         struct timespec ts;
 2694 
 2695         nanouptime(&ts);
 2696         error = nfsrpc_lookup(dvp, name, len, cred, td, &dnfsva, &nfsva,
 2697             &nfhp, &attrflag, &dattrflag, 0);
 2698         if (dattrflag)
 2699                 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, 0, 1);
 2700         if (npp && !error) {
 2701                 if (*npp != NULL) {
 2702                     np = *npp;
 2703                     vp = NFSTOV(np);
 2704                     /*
 2705                      * For NFSv4, check to see if it is the same name and
 2706                      * replace the name, if it is different.
 2707                      */
 2708                     if (np->n_v4 != NULL && nfsva.na_type == VREG &&
 2709                         (np->n_v4->n4_namelen != len ||
 2710                          NFSBCMP(name, NFS4NODENAME(np->n_v4), len) ||
 2711                          dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
 2712                          NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
 2713                          dnp->n_fhp->nfh_len))) {
 2714 #ifdef notdef
 2715 { char nnn[100]; int nnnl;
 2716 nnnl = (len < 100) ? len : 99;
 2717 bcopy(name, nnn, nnnl);
 2718 nnn[nnnl] = '\0';
 2719 printf("replace=%s\n",nnn);
 2720 }
 2721 #endif
 2722                             free(np->n_v4, M_NFSV4NODE);
 2723                             np->n_v4 = malloc(
 2724                                 sizeof (struct nfsv4node) +
 2725                                 dnp->n_fhp->nfh_len + len - 1,
 2726                                 M_NFSV4NODE, M_WAITOK);
 2727                             np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
 2728                             np->n_v4->n4_namelen = len;
 2729                             NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
 2730                                 dnp->n_fhp->nfh_len);
 2731                             NFSBCOPY(name, NFS4NODENAME(np->n_v4), len);
 2732                     }
 2733                     hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len,
 2734                         FNV1_32_INIT);
 2735                     onfhp = np->n_fhp;
 2736                     /*
 2737                      * Rehash node for new file handle.
 2738                      */
 2739                     vfs_hash_rehash(vp, hash);
 2740                     np->n_fhp = nfhp;
 2741                     if (onfhp != NULL)
 2742                         free(onfhp, M_NFSFH);
 2743                     newvp = NFSTOV(np);
 2744                 } else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) {
 2745                     free(nfhp, M_NFSFH);
 2746                     VREF(dvp);
 2747                     newvp = dvp;
 2748                 } else {
 2749                     cn.cn_nameptr = name;
 2750                     cn.cn_namelen = len;
 2751                     error = nfscl_nget(dvp->v_mount, dvp, nfhp, &cn, td,
 2752                         &np, LK_EXCLUSIVE);
 2753                     if (error)
 2754                         return (error);
 2755                     newvp = NFSTOV(np);
 2756                     /*
 2757                      * If n_localmodtime >= time before RPC, then
 2758                      * a file modification operation, such as
 2759                      * VOP_SETATTR() of size, has occurred while
 2760                      * the Lookup RPC and acquisition of the vnode
 2761                      * happened.  As such, the attributes might
 2762                      * be stale, with possibly an incorrect size.
 2763                      */
 2764                     NFSLOCKNODE(np);
 2765                     if (timespecisset(&np->n_localmodtime) &&
 2766                         timespeccmp(&np->n_localmodtime, &ts, >=)) {
 2767                         NFSCL_DEBUG(4, "nfs_lookitup: localmod "
 2768                             "stale attributes\n");
 2769                         attrflag = 0;
 2770                     }
 2771                     NFSUNLOCKNODE(np);
 2772                 }
 2773                 if (!attrflag && *npp == NULL) {
 2774                         if (newvp == dvp)
 2775                                 vrele(newvp);
 2776                         else
 2777                                 vput(newvp);
 2778                         return (ENOENT);
 2779                 }
 2780                 if (attrflag)
 2781                         (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 0, 1);
 2782         }
 2783         if (npp && *npp == NULL) {
 2784                 if (error) {
 2785                         if (newvp) {
 2786                                 if (newvp == dvp)
 2787                                         vrele(newvp);
 2788                                 else
 2789                                         vput(newvp);
 2790                         }
 2791                 } else
 2792                         *npp = np;
 2793         }
 2794         if (error && NFS_ISV4(dvp))
 2795                 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
 2796         return (error);
 2797 }
 2798 
 2799 /*
 2800  * Nfs Version 3 and 4 commit rpc
 2801  */
 2802 int
 2803 ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
 2804    struct thread *td)
 2805 {
 2806         struct nfsvattr nfsva;
 2807         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2808         struct nfsnode *np;
 2809         struct uio uio;
 2810         int error, attrflag;
 2811 
 2812         np = VTONFS(vp);
 2813         error = EIO;
 2814         attrflag = 0;
 2815         if (NFSHASPNFS(nmp) && (np->n_flag & NDSCOMMIT) != 0) {
 2816                 uio.uio_offset = offset;
 2817                 uio.uio_resid = cnt;
 2818                 error = nfscl_doiods(vp, &uio, NULL, NULL,
 2819                     NFSV4OPEN_ACCESSWRITE, 1, cred, td);
 2820                 if (error != 0) {
 2821                         NFSLOCKNODE(np);
 2822                         np->n_flag &= ~NDSCOMMIT;
 2823                         NFSUNLOCKNODE(np);
 2824                 }
 2825         }
 2826         if (error != 0) {
 2827                 mtx_lock(&nmp->nm_mtx);
 2828                 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
 2829                         mtx_unlock(&nmp->nm_mtx);
 2830                         return (0);
 2831                 }
 2832                 mtx_unlock(&nmp->nm_mtx);
 2833                 error = nfsrpc_commit(vp, offset, cnt, cred, td, &nfsva,
 2834                     &attrflag);
 2835         }
 2836         if (attrflag != 0)
 2837                 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 2838         if (error != 0 && NFS_ISV4(vp))
 2839                 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
 2840         return (error);
 2841 }
 2842 
 2843 /*
 2844  * Strategy routine.
 2845  * For async requests when nfsiod(s) are running, queue the request by
 2846  * calling ncl_asyncio(), otherwise just all ncl_doio() to do the
 2847  * request.
 2848  */
 2849 static int
 2850 nfs_strategy(struct vop_strategy_args *ap)
 2851 {
 2852         struct buf *bp;
 2853         struct vnode *vp;
 2854         struct ucred *cr;
 2855 
 2856         bp = ap->a_bp;
 2857         vp = ap->a_vp;
 2858         KASSERT(bp->b_vp == vp, ("missing b_getvp"));
 2859         KASSERT(!(bp->b_flags & B_DONE),
 2860             ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
 2861 
 2862         if (vp->v_type == VREG && bp->b_blkno == bp->b_lblkno)
 2863                 bp->b_blkno = bp->b_lblkno * (vp->v_bufobj.bo_bsize /
 2864                     DEV_BSIZE);
 2865         if (bp->b_iocmd == BIO_READ)
 2866                 cr = bp->b_rcred;
 2867         else
 2868                 cr = bp->b_wcred;
 2869 
 2870         /*
 2871          * If the op is asynchronous and an i/o daemon is waiting
 2872          * queue the request, wake it up and wait for completion
 2873          * otherwise just do it ourselves.
 2874          */
 2875         if ((bp->b_flags & B_ASYNC) == 0 ||
 2876             ncl_asyncio(VFSTONFS(vp->v_mount), bp, NOCRED, curthread))
 2877                 (void) ncl_doio(vp, bp, cr, curthread, 1);
 2878         return (0);
 2879 }
 2880 
 2881 /*
 2882  * fsync vnode op. Just call ncl_flush() with commit == 1.
 2883  */
 2884 /* ARGSUSED */
 2885 static int
 2886 nfs_fsync(struct vop_fsync_args *ap)
 2887 {
 2888 
 2889         if (ap->a_vp->v_type != VREG) {
 2890                 /*
 2891                  * For NFS, metadata is changed synchronously on the server,
 2892                  * so there is nothing to flush. Also, ncl_flush() clears
 2893                  * the NMODIFIED flag and that shouldn't be done here for
 2894                  * directories.
 2895                  */
 2896                 return (0);
 2897         }
 2898         return (ncl_flush(ap->a_vp, ap->a_waitfor, ap->a_td, 1, 0));
 2899 }
 2900 
 2901 /*
 2902  * Flush all the blocks associated with a vnode.
 2903  *      Walk through the buffer pool and push any dirty pages
 2904  *      associated with the vnode.
 2905  * If the called_from_renewthread argument is TRUE, it has been called
 2906  * from the NFSv4 renew thread and, as such, cannot block indefinitely
 2907  * waiting for a buffer write to complete.
 2908  */
 2909 int
 2910 ncl_flush(struct vnode *vp, int waitfor, struct thread *td,
 2911     int commit, int called_from_renewthread)
 2912 {
 2913         struct nfsnode *np = VTONFS(vp);
 2914         struct buf *bp;
 2915         int i;
 2916         struct buf *nbp;
 2917         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2918         int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
 2919         int passone = 1, trycnt = 0;
 2920         u_quad_t off, endoff, toff;
 2921         struct ucred* wcred = NULL;
 2922         struct buf **bvec = NULL;
 2923         struct bufobj *bo;
 2924 #ifndef NFS_COMMITBVECSIZ
 2925 #define NFS_COMMITBVECSIZ       20
 2926 #endif
 2927         struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
 2928         u_int bvecsize = 0, bveccount;
 2929         struct timespec ts;
 2930 
 2931         if (called_from_renewthread != 0)
 2932                 slptimeo = hz;
 2933         if (nmp->nm_flag & NFSMNT_INT)
 2934                 slpflag = PCATCH;
 2935         if (!commit)
 2936                 passone = 0;
 2937         bo = &vp->v_bufobj;
 2938         /*
 2939          * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
 2940          * server, but has not been committed to stable storage on the server
 2941          * yet. On the first pass, the byte range is worked out and the commit
 2942          * rpc is done. On the second pass, ncl_writebp() is called to do the
 2943          * job.
 2944          */
 2945 again:
 2946         off = (u_quad_t)-1;
 2947         endoff = 0;
 2948         bvecpos = 0;
 2949         if (NFS_ISV34(vp) && commit) {
 2950                 if (bvec != NULL && bvec != bvec_on_stack)
 2951                         free(bvec, M_TEMP);
 2952                 /*
 2953                  * Count up how many buffers waiting for a commit.
 2954                  */
 2955                 bveccount = 0;
 2956                 BO_LOCK(bo);
 2957                 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 2958                         if (!BUF_ISLOCKED(bp) &&
 2959                             (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
 2960                                 == (B_DELWRI | B_NEEDCOMMIT))
 2961                                 bveccount++;
 2962                 }
 2963                 /*
 2964                  * Allocate space to remember the list of bufs to commit.  It is
 2965                  * important to use M_NOWAIT here to avoid a race with nfs_write.
 2966                  * If we can't get memory (for whatever reason), we will end up
 2967                  * committing the buffers one-by-one in the loop below.
 2968                  */
 2969                 if (bveccount > NFS_COMMITBVECSIZ) {
 2970                         /*
 2971                          * Release the vnode interlock to avoid a lock
 2972                          * order reversal.
 2973                          */
 2974                         BO_UNLOCK(bo);
 2975                         bvec = (struct buf **)
 2976                                 malloc(bveccount * sizeof(struct buf *),
 2977                                        M_TEMP, M_NOWAIT);
 2978                         BO_LOCK(bo);
 2979                         if (bvec == NULL) {
 2980                                 bvec = bvec_on_stack;
 2981                                 bvecsize = NFS_COMMITBVECSIZ;
 2982                         } else
 2983                                 bvecsize = bveccount;
 2984                 } else {
 2985                         bvec = bvec_on_stack;
 2986                         bvecsize = NFS_COMMITBVECSIZ;
 2987                 }
 2988                 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 2989                         if (bvecpos >= bvecsize)
 2990                                 break;
 2991                         if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
 2992                                 nbp = TAILQ_NEXT(bp, b_bobufs);
 2993                                 continue;
 2994                         }
 2995                         if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
 2996                             (B_DELWRI | B_NEEDCOMMIT)) {
 2997                                 BUF_UNLOCK(bp);
 2998                                 nbp = TAILQ_NEXT(bp, b_bobufs);
 2999                                 continue;
 3000                         }
 3001                         BO_UNLOCK(bo);
 3002                         bremfree(bp);
 3003                         /*
 3004                          * Work out if all buffers are using the same cred
 3005                          * so we can deal with them all with one commit.
 3006                          *
 3007                          * NOTE: we are not clearing B_DONE here, so we have
 3008                          * to do it later on in this routine if we intend to
 3009                          * initiate I/O on the bp.
 3010                          *
 3011                          * Note: to avoid loopback deadlocks, we do not
 3012                          * assign b_runningbufspace.
 3013                          */
 3014                         if (wcred == NULL)
 3015                                 wcred = bp->b_wcred;
 3016                         else if (wcred != bp->b_wcred)
 3017                                 wcred = NOCRED;
 3018                         vfs_busy_pages(bp, 1);
 3019 
 3020                         BO_LOCK(bo);
 3021                         /*
 3022                          * bp is protected by being locked, but nbp is not
 3023                          * and vfs_busy_pages() may sleep.  We have to
 3024                          * recalculate nbp.
 3025                          */
 3026                         nbp = TAILQ_NEXT(bp, b_bobufs);
 3027 
 3028                         /*
 3029                          * A list of these buffers is kept so that the
 3030                          * second loop knows which buffers have actually
 3031                          * been committed. This is necessary, since there
 3032                          * may be a race between the commit rpc and new
 3033                          * uncommitted writes on the file.
 3034                          */
 3035                         bvec[bvecpos++] = bp;
 3036                         toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
 3037                                 bp->b_dirtyoff;
 3038                         if (toff < off)
 3039                                 off = toff;
 3040                         toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
 3041                         if (toff > endoff)
 3042                                 endoff = toff;
 3043                 }
 3044                 BO_UNLOCK(bo);
 3045         }
 3046         if (bvecpos > 0) {
 3047                 /*
 3048                  * Commit data on the server, as required.
 3049                  * If all bufs are using the same wcred, then use that with
 3050                  * one call for all of them, otherwise commit each one
 3051                  * separately.
 3052                  */
 3053                 if (wcred != NOCRED)
 3054                         retv = ncl_commit(vp, off, (int)(endoff - off),
 3055                                           wcred, td);
 3056                 else {
 3057                         retv = 0;
 3058                         for (i = 0; i < bvecpos; i++) {
 3059                                 off_t off, size;
 3060                                 bp = bvec[i];
 3061                                 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
 3062                                         bp->b_dirtyoff;
 3063                                 size = (u_quad_t)(bp->b_dirtyend
 3064                                                   - bp->b_dirtyoff);
 3065                                 retv = ncl_commit(vp, off, (int)size,
 3066                                                   bp->b_wcred, td);
 3067                                 if (retv) break;
 3068                         }
 3069                 }
 3070 
 3071                 if (retv == NFSERR_STALEWRITEVERF)
 3072                         ncl_clearcommit(vp->v_mount);
 3073 
 3074                 /*
 3075                  * Now, either mark the blocks I/O done or mark the
 3076                  * blocks dirty, depending on whether the commit
 3077                  * succeeded.
 3078                  */
 3079                 for (i = 0; i < bvecpos; i++) {
 3080                         bp = bvec[i];
 3081                         bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
 3082                         if (!NFSCL_FORCEDISM(vp->v_mount) && retv) {
 3083                                 /*
 3084                                  * Error, leave B_DELWRI intact
 3085                                  */
 3086                                 vfs_unbusy_pages(bp);
 3087                                 brelse(bp);
 3088                         } else {
 3089                                 /*
 3090                                  * Success, remove B_DELWRI ( bundirty() ).
 3091                                  *
 3092                                  * b_dirtyoff/b_dirtyend seem to be NFS
 3093                                  * specific.  We should probably move that
 3094                                  * into bundirty(). XXX
 3095                                  */
 3096                                 bufobj_wref(bo);
 3097                                 bp->b_flags |= B_ASYNC;
 3098                                 bundirty(bp);
 3099                                 bp->b_flags &= ~B_DONE;
 3100                                 bp->b_ioflags &= ~BIO_ERROR;
 3101                                 bp->b_dirtyoff = bp->b_dirtyend = 0;
 3102                                 bufdone(bp);
 3103                         }
 3104                 }
 3105         }
 3106 
 3107         /*
 3108          * Start/do any write(s) that are required.
 3109          */
 3110 loop:
 3111         BO_LOCK(bo);
 3112         TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 3113                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
 3114                         if (waitfor != MNT_WAIT || passone)
 3115                                 continue;
 3116 
 3117                         error = BUF_TIMELOCK(bp,
 3118                             LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
 3119                             BO_LOCKPTR(bo), "nfsfsync", slpflag, slptimeo);
 3120                         if (error == 0) {
 3121                                 BUF_UNLOCK(bp);
 3122                                 goto loop;
 3123                         }
 3124                         if (error == ENOLCK) {
 3125                                 error = 0;
 3126                                 goto loop;
 3127                         }
 3128                         if (called_from_renewthread != 0) {
 3129                                 /*
 3130                                  * Return EIO so the flush will be retried
 3131                                  * later.
 3132                                  */
 3133                                 error = EIO;
 3134                                 goto done;
 3135                         }
 3136                         if (newnfs_sigintr(nmp, td)) {
 3137                                 error = EINTR;
 3138                                 goto done;
 3139                         }
 3140                         if (slpflag == PCATCH) {
 3141                                 slpflag = 0;
 3142                                 slptimeo = 2 * hz;
 3143                         }
 3144                         goto loop;
 3145                 }
 3146                 if ((bp->b_flags & B_DELWRI) == 0)
 3147                         panic("nfs_fsync: not dirty");
 3148                 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
 3149                         BUF_UNLOCK(bp);
 3150                         continue;
 3151                 }
 3152                 BO_UNLOCK(bo);
 3153                 bremfree(bp);
 3154                 bp->b_flags |= B_ASYNC;
 3155                 bwrite(bp);
 3156                 if (newnfs_sigintr(nmp, td)) {
 3157                         error = EINTR;
 3158                         goto done;
 3159                 }
 3160                 goto loop;
 3161         }
 3162         if (passone) {
 3163                 passone = 0;
 3164                 BO_UNLOCK(bo);
 3165                 goto again;
 3166         }
 3167         if (waitfor == MNT_WAIT) {
 3168                 while (bo->bo_numoutput) {
 3169                         error = bufobj_wwait(bo, slpflag, slptimeo);
 3170                         if (error) {
 3171                             BO_UNLOCK(bo);
 3172                             if (called_from_renewthread != 0) {
 3173                                 /*
 3174                                  * Return EIO so that the flush will be
 3175                                  * retried later.
 3176                                  */
 3177                                 error = EIO;
 3178                                 goto done;
 3179                             }
 3180                             error = newnfs_sigintr(nmp, td);
 3181                             if (error)
 3182                                 goto done;
 3183                             if (slpflag == PCATCH) {
 3184                                 slpflag = 0;
 3185                                 slptimeo = 2 * hz;
 3186                             }
 3187                             BO_LOCK(bo);
 3188                         }
 3189                 }
 3190                 if (bo->bo_dirty.bv_cnt != 0 && commit) {
 3191                         BO_UNLOCK(bo);
 3192                         goto loop;
 3193                 }
 3194                 /*
 3195                  * Wait for all the async IO requests to drain
 3196                  */
 3197                 BO_UNLOCK(bo);
 3198                 NFSLOCKNODE(np);
 3199                 while (np->n_directio_asyncwr > 0) {
 3200                         np->n_flag |= NFSYNCWAIT;
 3201                         error = newnfs_msleep(td, &np->n_directio_asyncwr,
 3202                             &np->n_mtx, slpflag | (PRIBIO + 1), 
 3203                             "nfsfsync", 0);
 3204                         if (error) {
 3205                                 if (newnfs_sigintr(nmp, td)) {
 3206                                         NFSUNLOCKNODE(np);
 3207                                         error = EINTR;  
 3208                                         goto done;
 3209                                 }
 3210                         }
 3211                 }
 3212                 NFSUNLOCKNODE(np);
 3213         } else
 3214                 BO_UNLOCK(bo);
 3215         if (NFSHASPNFS(nmp)) {
 3216                 nfscl_layoutcommit(vp, td);
 3217                 /*
 3218                  * Invalidate the attribute cache, since writes to a DS
 3219                  * won't update the size attribute.
 3220                  */
 3221                 NFSLOCKNODE(np);
 3222                 np->n_attrstamp = 0;
 3223         } else
 3224                 NFSLOCKNODE(np);
 3225         if (np->n_flag & NWRITEERR) {
 3226                 error = np->n_error;
 3227                 np->n_flag &= ~NWRITEERR;
 3228         }
 3229         if (commit && bo->bo_dirty.bv_cnt == 0 &&
 3230             bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0)
 3231                 np->n_flag &= ~NMODIFIED;
 3232         NFSUNLOCKNODE(np);
 3233 done:
 3234         if (bvec != NULL && bvec != bvec_on_stack)
 3235                 free(bvec, M_TEMP);
 3236         if (error == 0 && commit != 0 && waitfor == MNT_WAIT &&
 3237             (bo->bo_dirty.bv_cnt != 0 || bo->bo_numoutput != 0 ||
 3238             np->n_directio_asyncwr != 0)) {
 3239                 if (trycnt++ < 5) {
 3240                         /* try, try again... */
 3241                         passone = 1;
 3242                         wcred = NULL;
 3243                         bvec = NULL;
 3244                         bvecsize = 0;
 3245                         goto again;
 3246                 }
 3247                 vn_printf(vp, "ncl_flush failed");
 3248                 error = called_from_renewthread != 0 ? EIO : EBUSY;
 3249         }
 3250         if (error == 0) {
 3251                 nanouptime(&ts);
 3252                 NFSLOCKNODE(np);
 3253                 np->n_localmodtime = ts;
 3254                 NFSUNLOCKNODE(np);
 3255         }
 3256         return (error);
 3257 }
 3258 
 3259 /*
 3260  * NFS advisory byte-level locks.
 3261  */
 3262 static int
 3263 nfs_advlock(struct vop_advlock_args *ap)
 3264 {
 3265         struct vnode *vp = ap->a_vp;
 3266         struct ucred *cred;
 3267         struct nfsnode *np = VTONFS(ap->a_vp);
 3268         struct proc *p = (struct proc *)ap->a_id;
 3269         struct thread *td = curthread;  /* XXX */
 3270         struct vattr va;
 3271         int ret, error;
 3272         u_quad_t size;
 3273         struct nfsmount *nmp;
 3274 
 3275         error = NFSVOPLOCK(vp, LK_SHARED);
 3276         if (error != 0)
 3277                 return (EBADF);
 3278         nmp = VFSTONFS(vp->v_mount);
 3279         if (!NFS_ISV4(vp) || (nmp->nm_flag & NFSMNT_NOLOCKD) != 0) {
 3280                 if ((nmp->nm_flag & NFSMNT_NOLOCKD) != 0) {
 3281                         size = np->n_size;
 3282                         NFSVOPUNLOCK(vp);
 3283                         error = lf_advlock(ap, &(vp->v_lockf), size);
 3284                 } else {
 3285                         if (nfs_advlock_p != NULL)
 3286                                 error = nfs_advlock_p(ap);
 3287                         else {
 3288                                 NFSVOPUNLOCK(vp);
 3289                                 error = ENOLCK;
 3290                         }
 3291                 }
 3292                 if (error == 0 && ap->a_op == F_SETLK) {
 3293                         error = NFSVOPLOCK(vp, LK_SHARED);
 3294                         if (error == 0) {
 3295                                 /* Mark that a file lock has been acquired. */
 3296                                 NFSLOCKNODE(np);
 3297                                 np->n_flag |= NHASBEENLOCKED;
 3298                                 NFSUNLOCKNODE(np);
 3299                                 NFSVOPUNLOCK(vp);
 3300                         }
 3301                 }
 3302                 return (error);
 3303         } else if ((ap->a_flags & (F_POSIX | F_FLOCK)) != 0) {
 3304                 if (vp->v_type != VREG) {
 3305                         error = EINVAL;
 3306                         goto out;
 3307                 }
 3308                 if ((ap->a_flags & F_POSIX) != 0)
 3309                         cred = p->p_ucred;
 3310                 else
 3311                         cred = td->td_ucred;
 3312                 NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
 3313                 if (VN_IS_DOOMED(vp)) {
 3314                         error = EBADF;
 3315                         goto out;
 3316                 }
 3317 
 3318                 /*
 3319                  * If this is unlocking a write locked region, flush and
 3320                  * commit them before unlocking. This is required by
 3321                  * RFC3530 Sec. 9.3.2.
 3322                  */
 3323                 if (ap->a_op == F_UNLCK &&
 3324                     nfscl_checkwritelocked(vp, ap->a_fl, cred, td, ap->a_id,
 3325                     ap->a_flags))
 3326                         (void) ncl_flush(vp, MNT_WAIT, td, 1, 0);
 3327 
 3328                 /*
 3329                  * Mark NFS node as might have acquired a lock.
 3330                  * This is separate from NHASBEENLOCKED, because it must
 3331                  * be done before the nfsrpc_advlock() call, which might
 3332                  * add a nfscllock structure to the client state.
 3333                  * It is used to check for the case where a nfscllock
 3334                  * state structure cannot exist for the file.
 3335                  * Only done for "oneopenown" NFSv4.1/4.2 mounts.
 3336                  */
 3337                 if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp)) {
 3338                         NFSLOCKNODE(np);
 3339                         np->n_flag |= NMIGHTBELOCKED;
 3340                         NFSUNLOCKNODE(np);
 3341                 }
 3342 
 3343                 /*
 3344                  * Loop around doing the lock op, while a blocking lock
 3345                  * must wait for the lock op to succeed.
 3346                  */
 3347                 do {
 3348                         ret = nfsrpc_advlock(vp, np->n_size, ap->a_op,
 3349                             ap->a_fl, 0, cred, td, ap->a_id, ap->a_flags);
 3350                         if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) &&
 3351                             ap->a_op == F_SETLK) {
 3352                                 NFSVOPUNLOCK(vp);
 3353                                 error = nfs_catnap(PZERO | PCATCH, ret,
 3354                                     "ncladvl");
 3355                                 if (error)
 3356                                         return (EINTR);
 3357                                 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
 3358                                 if (VN_IS_DOOMED(vp)) {
 3359                                         error = EBADF;
 3360                                         goto out;
 3361                                 }
 3362                         }
 3363                 } while (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) &&
 3364                      ap->a_op == F_SETLK);
 3365                 if (ret == NFSERR_DENIED) {
 3366                         error = EAGAIN;
 3367                         goto out;
 3368                 } else if (ret == EINVAL || ret == EBADF || ret == EINTR) {
 3369                         error = ret;
 3370                         goto out;
 3371                 } else if (ret != 0) {
 3372                         error = EACCES;
 3373                         goto out;
 3374                 }
 3375 
 3376                 /*
 3377                  * Now, if we just got a lock, invalidate data in the buffer
 3378                  * cache, as required, so that the coherency conforms with
 3379                  * RFC3530 Sec. 9.3.2.
 3380                  */
 3381                 if (ap->a_op == F_SETLK) {
 3382                         if ((np->n_flag & NMODIFIED) == 0) {
 3383                                 np->n_attrstamp = 0;
 3384                                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
 3385                                 ret = VOP_GETATTR(vp, &va, cred);
 3386                         }
 3387                         if ((np->n_flag & NMODIFIED) || ret ||
 3388                             np->n_change != va.va_filerev) {
 3389                                 (void) ncl_vinvalbuf(vp, V_SAVE, td, 1);
 3390                                 np->n_attrstamp = 0;
 3391                                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
 3392                                 ret = VOP_GETATTR(vp, &va, cred);
 3393                                 if (!ret) {
 3394                                         np->n_mtime = va.va_mtime;
 3395                                         np->n_change = va.va_filerev;
 3396                                 }
 3397                         }
 3398                         /* Mark that a file lock has been acquired. */
 3399                         NFSLOCKNODE(np);
 3400                         np->n_flag |= NHASBEENLOCKED;
 3401                         NFSUNLOCKNODE(np);
 3402                 }
 3403         } else
 3404                 error = EOPNOTSUPP;
 3405 out:
 3406         NFSVOPUNLOCK(vp);
 3407         return (error);
 3408 }
 3409 
 3410 /*
 3411  * NFS advisory byte-level locks.
 3412  */
 3413 static int
 3414 nfs_advlockasync(struct vop_advlockasync_args *ap)
 3415 {
 3416         struct vnode *vp = ap->a_vp;
 3417         u_quad_t size;
 3418         int error;
 3419 
 3420         error = NFSVOPLOCK(vp, LK_SHARED);
 3421         if (error)
 3422                 return (error);
 3423         if (NFS_ISV4(vp)) {
 3424                 NFSVOPUNLOCK(vp);
 3425                 return (EOPNOTSUPP);
 3426         }
 3427         if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
 3428                 size = VTONFS(vp)->n_size;
 3429                 NFSVOPUNLOCK(vp);
 3430                 error = lf_advlockasync(ap, &(vp->v_lockf), size);
 3431         } else {
 3432                 NFSVOPUNLOCK(vp);
 3433                 error = EOPNOTSUPP;
 3434         }
 3435         return (error);
 3436 }
 3437 
 3438 /*
 3439  * Print out the contents of an nfsnode.
 3440  */
 3441 static int
 3442 nfs_print(struct vop_print_args *ap)
 3443 {
 3444         struct vnode *vp = ap->a_vp;
 3445         struct nfsnode *np = VTONFS(vp);
 3446 
 3447         printf("\tfileid %jd fsid 0x%jx", (uintmax_t)np->n_vattr.na_fileid,
 3448             (uintmax_t)np->n_vattr.na_fsid);
 3449         if (vp->v_type == VFIFO)
 3450                 fifo_printinfo(vp);
 3451         printf("\n");
 3452         return (0);
 3453 }
 3454 
 3455 /*
 3456  * This is the "real" nfs::bwrite(struct buf*).
 3457  * We set B_CACHE if this is a VMIO buffer.
 3458  */
 3459 int
 3460 ncl_writebp(struct buf *bp, int force __unused, struct thread *td)
 3461 {
 3462         int oldflags, rtval;
 3463 
 3464         if (bp->b_flags & B_INVAL) {
 3465                 brelse(bp);
 3466                 return (0);
 3467         }
 3468 
 3469         oldflags = bp->b_flags;
 3470         bp->b_flags |= B_CACHE;
 3471 
 3472         /*
 3473          * Undirty the bp.  We will redirty it later if the I/O fails.
 3474          */
 3475         bundirty(bp);
 3476         bp->b_flags &= ~B_DONE;
 3477         bp->b_ioflags &= ~BIO_ERROR;
 3478         bp->b_iocmd = BIO_WRITE;
 3479 
 3480         bufobj_wref(bp->b_bufobj);
 3481         curthread->td_ru.ru_oublock++;
 3482 
 3483         /*
 3484          * Note: to avoid loopback deadlocks, we do not
 3485          * assign b_runningbufspace.
 3486          */
 3487         vfs_busy_pages(bp, 1);
 3488 
 3489         BUF_KERNPROC(bp);
 3490         bp->b_iooffset = dbtob(bp->b_blkno);
 3491         bstrategy(bp);
 3492 
 3493         if ((oldflags & B_ASYNC) != 0)
 3494                 return (0);
 3495 
 3496         rtval = bufwait(bp);
 3497         if (oldflags & B_DELWRI)
 3498                 reassignbuf(bp);
 3499         brelse(bp);
 3500         return (rtval);
 3501 }
 3502 
 3503 /*
 3504  * nfs special file access vnode op.
 3505  * Essentially just get vattr and then imitate iaccess() since the device is
 3506  * local to the client.
 3507  */
 3508 static int
 3509 nfsspec_access(struct vop_access_args *ap)
 3510 {
 3511         struct vattr *vap;
 3512         struct ucred *cred = ap->a_cred;
 3513         struct vnode *vp = ap->a_vp;
 3514         accmode_t accmode = ap->a_accmode;
 3515         struct vattr vattr;
 3516         int error;
 3517 
 3518         /*
 3519          * Disallow write attempts on filesystems mounted read-only;
 3520          * unless the file is a socket, fifo, or a block or character
 3521          * device resident on the filesystem.
 3522          */
 3523         if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
 3524                 switch (vp->v_type) {
 3525                 case VREG:
 3526                 case VDIR:
 3527                 case VLNK:
 3528                         return (EROFS);
 3529                 default:
 3530                         break;
 3531                 }
 3532         }
 3533         vap = &vattr;
 3534         error = VOP_GETATTR(vp, vap, cred);
 3535         if (error)
 3536                 goto out;
 3537         error = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid,
 3538             accmode, cred);
 3539 out:
 3540         return error;
 3541 }
 3542 
 3543 /*
 3544  * Read wrapper for fifos.
 3545  */
 3546 static int
 3547 nfsfifo_read(struct vop_read_args *ap)
 3548 {
 3549         struct nfsnode *np = VTONFS(ap->a_vp);
 3550         int error;
 3551 
 3552         /*
 3553          * Set access flag.
 3554          */
 3555         NFSLOCKNODE(np);
 3556         np->n_flag |= NACC;
 3557         vfs_timestamp(&np->n_atim);
 3558         NFSUNLOCKNODE(np);
 3559         error = fifo_specops.vop_read(ap);
 3560         return error;   
 3561 }
 3562 
 3563 /*
 3564  * Write wrapper for fifos.
 3565  */
 3566 static int
 3567 nfsfifo_write(struct vop_write_args *ap)
 3568 {
 3569         struct nfsnode *np = VTONFS(ap->a_vp);
 3570 
 3571         /*
 3572          * Set update flag.
 3573          */
 3574         NFSLOCKNODE(np);
 3575         np->n_flag |= NUPD;
 3576         vfs_timestamp(&np->n_mtim);
 3577         NFSUNLOCKNODE(np);
 3578         return(fifo_specops.vop_write(ap));
 3579 }
 3580 
 3581 /*
 3582  * Close wrapper for fifos.
 3583  *
 3584  * Update the times on the nfsnode then do fifo close.
 3585  */
 3586 static int
 3587 nfsfifo_close(struct vop_close_args *ap)
 3588 {
 3589         struct vnode *vp = ap->a_vp;
 3590         struct nfsnode *np = VTONFS(vp);
 3591         struct vattr vattr;
 3592         struct timespec ts;
 3593 
 3594         NFSLOCKNODE(np);
 3595         if (np->n_flag & (NACC | NUPD)) {
 3596                 vfs_timestamp(&ts);
 3597                 if (np->n_flag & NACC)
 3598                         np->n_atim = ts;
 3599                 if (np->n_flag & NUPD)
 3600                         np->n_mtim = ts;
 3601                 np->n_flag |= NCHG;
 3602                 if (vrefcnt(vp) == 1 &&
 3603                     (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
 3604                         VATTR_NULL(&vattr);
 3605                         if (np->n_flag & NACC)
 3606                                 vattr.va_atime = np->n_atim;
 3607                         if (np->n_flag & NUPD)
 3608                                 vattr.va_mtime = np->n_mtim;
 3609                         NFSUNLOCKNODE(np);
 3610                         (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
 3611                         goto out;
 3612                 }
 3613         }
 3614         NFSUNLOCKNODE(np);
 3615 out:
 3616         return (fifo_specops.vop_close(ap));
 3617 }
 3618 
 3619 /*
 3620  * Just call ncl_writebp() with the force argument set to 1.
 3621  *
 3622  * NOTE: B_DONE may or may not be set in a_bp on call.
 3623  */
 3624 static int
 3625 nfs_bwrite(struct buf *bp)
 3626 {
 3627 
 3628         return (ncl_writebp(bp, 1, curthread));
 3629 }
 3630 
 3631 struct buf_ops buf_ops_newnfs = {
 3632         .bop_name       =       "buf_ops_nfs",
 3633         .bop_write      =       nfs_bwrite,
 3634         .bop_strategy   =       bufstrategy,
 3635         .bop_sync       =       bufsync,
 3636         .bop_bdflush    =       bufbdflush,
 3637 };
 3638 
 3639 static int
 3640 nfs_getacl(struct vop_getacl_args *ap)
 3641 {
 3642         int error;
 3643 
 3644         if (ap->a_type != ACL_TYPE_NFS4)
 3645                 return (EOPNOTSUPP);
 3646         error = nfsrpc_getacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp);
 3647         if (error > NFSERR_STALE) {
 3648                 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0);
 3649                 error = EPERM;
 3650         }
 3651         return (error);
 3652 }
 3653 
 3654 static int
 3655 nfs_setacl(struct vop_setacl_args *ap)
 3656 {
 3657         int error;
 3658 
 3659         if (ap->a_type != ACL_TYPE_NFS4)
 3660                 return (EOPNOTSUPP);
 3661         error = nfsrpc_setacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp);
 3662         if (error > NFSERR_STALE) {
 3663                 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0);
 3664                 error = EPERM;
 3665         }
 3666         return (error);
 3667 }
 3668 
 3669 /*
 3670  * VOP_ADVISE for NFS.
 3671  * Just return 0 for any errors, since it is just a hint.
 3672  */
 3673 static int
 3674 nfs_advise(struct vop_advise_args *ap)
 3675 {
 3676         struct thread *td = curthread;
 3677         struct nfsmount *nmp;
 3678         uint64_t len;
 3679         int error;
 3680 
 3681         /*
 3682          * First do vop_stdadvise() to handle the buffer cache.
 3683          */
 3684         error = vop_stdadvise(ap);
 3685         if (error != 0)
 3686                 return (error);
 3687         if (ap->a_start < 0 || ap->a_end < 0)
 3688                 return (0);
 3689         if (ap->a_end == OFF_MAX)
 3690                 len = 0;
 3691         else if (ap->a_end < ap->a_start)
 3692                 return (0);
 3693         else
 3694                 len = ap->a_end - ap->a_start + 1;
 3695         nmp = VFSTONFS(ap->a_vp->v_mount);
 3696         mtx_lock(&nmp->nm_mtx);
 3697         if (!NFSHASNFSV4(nmp) || nmp->nm_minorvers < NFSV42_MINORVERSION ||
 3698             (NFSHASPNFS(nmp) && (nmp->nm_privflag & NFSMNTP_IOADVISETHRUMDS) ==
 3699             0) || (nmp->nm_privflag & NFSMNTP_NOADVISE) != 0) {
 3700                 mtx_unlock(&nmp->nm_mtx);
 3701                 return (0);
 3702         }
 3703         mtx_unlock(&nmp->nm_mtx);
 3704         error = nfsrpc_advise(ap->a_vp, ap->a_start, len, ap->a_advice,
 3705             td->td_ucred, td);
 3706         if (error == NFSERR_NOTSUPP) {
 3707                 mtx_lock(&nmp->nm_mtx);
 3708                 nmp->nm_privflag |= NFSMNTP_NOADVISE;
 3709                 mtx_unlock(&nmp->nm_mtx);
 3710         }
 3711         return (0);
 3712 }
 3713 
 3714 /*
 3715  * nfs allocate call
 3716  */
 3717 static int
 3718 nfs_allocate(struct vop_allocate_args *ap)
 3719 {
 3720         struct vnode *vp = ap->a_vp;
 3721         struct thread *td = curthread;
 3722         struct nfsvattr nfsva;
 3723         struct nfsmount *nmp;
 3724         struct nfsnode *np;
 3725         off_t alen;
 3726         int attrflag, error, ret;
 3727         struct timespec ts;
 3728         struct uio io;
 3729 
 3730         attrflag = 0;
 3731         nmp = VFSTONFS(vp->v_mount);
 3732         np = VTONFS(vp);
 3733         mtx_lock(&nmp->nm_mtx);
 3734         if (NFSHASNFSV4(nmp) && nmp->nm_minorvers >= NFSV42_MINORVERSION &&
 3735             (nmp->nm_privflag & NFSMNTP_NOALLOCATE) == 0) {
 3736                 mtx_unlock(&nmp->nm_mtx);
 3737                 alen = *ap->a_len;
 3738                 if ((uint64_t)alen > nfs_maxalloclen)
 3739                         alen = nfs_maxalloclen;
 3740 
 3741                 /* Check the file size limit. */
 3742                 io.uio_offset = *ap->a_offset;
 3743                 io.uio_resid = alen;
 3744                 error = vn_rlimit_fsize(vp, &io, td);
 3745 
 3746                 /*
 3747                  * Flush first to ensure that the allocate adds to the
 3748                  * file's allocation on the server.
 3749                  */
 3750                 if (error == 0)
 3751                         error = ncl_flush(vp, MNT_WAIT, td, 1, 0);
 3752                 if (error == 0)
 3753                         error = nfsrpc_allocate(vp, *ap->a_offset, alen,
 3754                             &nfsva, &attrflag, ap->a_cred, td);
 3755                 if (error == 0) {
 3756                         *ap->a_offset += alen;
 3757                         *ap->a_len -= alen;
 3758                         nanouptime(&ts);
 3759                         NFSLOCKNODE(np);
 3760                         np->n_localmodtime = ts;
 3761                         NFSUNLOCKNODE(np);
 3762                 } else if (error == NFSERR_NOTSUPP) {
 3763                         mtx_lock(&nmp->nm_mtx);
 3764                         nmp->nm_privflag |= NFSMNTP_NOALLOCATE;
 3765                         mtx_unlock(&nmp->nm_mtx);
 3766                         error = EINVAL;
 3767                 }
 3768         } else {
 3769                 mtx_unlock(&nmp->nm_mtx);
 3770                 error = EINVAL;
 3771         }
 3772         if (attrflag != 0) {
 3773                 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 3774                 if (error == 0 && ret != 0)
 3775                         error = ret;
 3776         }
 3777         if (error != 0)
 3778                 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
 3779         return (error);
 3780 }
 3781 
 3782 /*
 3783  * nfs deallocate call
 3784  */
 3785 static int
 3786 nfs_deallocate(struct vop_deallocate_args *ap)
 3787 {
 3788         struct vnode *vp = ap->a_vp;
 3789         struct thread *td = curthread;
 3790         struct nfsvattr nfsva;
 3791         struct nfsmount *nmp;
 3792         struct nfsnode *np;
 3793         off_t tlen, mlen;
 3794         int attrflag, error, ret;
 3795         bool clipped;
 3796         struct timespec ts;
 3797 
 3798         error = 0;
 3799         attrflag = 0;
 3800         nmp = VFSTONFS(vp->v_mount);
 3801         np = VTONFS(vp);
 3802         mtx_lock(&nmp->nm_mtx);
 3803         if (NFSHASNFSV4(nmp) && nmp->nm_minorvers >= NFSV42_MINORVERSION &&
 3804             (nmp->nm_privflag & NFSMNTP_NODEALLOCATE) == 0) {
 3805                 mtx_unlock(&nmp->nm_mtx);
 3806                 tlen = omin(OFF_MAX - *ap->a_offset, *ap->a_len);
 3807                 NFSCL_DEBUG(4, "dealloc: off=%jd len=%jd maxfilesize=%ju\n",
 3808                     (intmax_t)*ap->a_offset, (intmax_t)tlen,
 3809                     (uintmax_t)nmp->nm_maxfilesize);
 3810                 if ((uint64_t)*ap->a_offset >= nmp->nm_maxfilesize) {
 3811                         /* Avoid EFBIG error return from the NFSv4.2 server. */
 3812                         *ap->a_len = 0;
 3813                         return (0);
 3814                 }
 3815                 clipped = false;
 3816                 if ((uint64_t)*ap->a_offset + tlen > nmp->nm_maxfilesize)
 3817                         tlen = nmp->nm_maxfilesize - *ap->a_offset;
 3818                 if ((uint64_t)*ap->a_offset < np->n_size) {
 3819                         /* Limit the len to nfs_maxalloclen before EOF. */
 3820                         mlen = omin((off_t)np->n_size - *ap->a_offset, tlen);
 3821                         if ((uint64_t)mlen > nfs_maxalloclen) {
 3822                                 NFSCL_DEBUG(4, "dealloc: tlen maxalloclen\n");
 3823                                 tlen = nfs_maxalloclen;
 3824                                 clipped = true;
 3825                         }
 3826                 }
 3827                 if (error == 0)
 3828                         error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
 3829                 if (error == 0) {
 3830                         vnode_pager_purge_range(vp, *ap->a_offset,
 3831                             *ap->a_offset + tlen);
 3832                         error = nfsrpc_deallocate(vp, *ap->a_offset, tlen,
 3833                             &nfsva, &attrflag, ap->a_cred, td);
 3834                         NFSCL_DEBUG(4, "dealloc: rpc=%d\n", error);
 3835                 }
 3836                 if (error == 0) {
 3837                         NFSCL_DEBUG(4, "dealloc: attrflag=%d na_size=%ju\n",
 3838                             attrflag, (uintmax_t)nfsva.na_size);
 3839                         nanouptime(&ts);
 3840                         NFSLOCKNODE(np);
 3841                         np->n_localmodtime = ts;
 3842                         NFSUNLOCKNODE(np);
 3843                         if (attrflag != 0) {
 3844                                 if ((uint64_t)*ap->a_offset < nfsva.na_size)
 3845                                         *ap->a_offset += omin((off_t)
 3846                                             nfsva.na_size - *ap->a_offset,
 3847                                             tlen);
 3848                         }
 3849                         if (clipped && tlen < *ap->a_len)
 3850                                 *ap->a_len -= tlen;
 3851                         else
 3852                                 *ap->a_len = 0;
 3853                 } else if (error == NFSERR_NOTSUPP) {
 3854                         mtx_lock(&nmp->nm_mtx);
 3855                         nmp->nm_privflag |= NFSMNTP_NODEALLOCATE;
 3856                         mtx_unlock(&nmp->nm_mtx);
 3857                 }
 3858         } else {
 3859                 mtx_unlock(&nmp->nm_mtx);
 3860                 error = EIO;
 3861         }
 3862         /*
 3863          * If the NFS server cannot perform the Deallocate operation, just call
 3864          * vop_stddeallocate() to perform it.
 3865          */
 3866         if (error != 0 && error != NFSERR_FBIG && error != NFSERR_INVAL) {
 3867                 error = vop_stddeallocate(ap);
 3868                 NFSCL_DEBUG(4, "dealloc: stddeallocate=%d\n", error);
 3869         }
 3870         if (attrflag != 0) {
 3871                 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 3872                 if (error == 0 && ret != 0)
 3873                         error = ret;
 3874         }
 3875         if (error != 0)
 3876                 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
 3877         return (error);
 3878 }
 3879 
 3880 /*
 3881  * nfs copy_file_range call
 3882  */
 3883 static int
 3884 nfs_copy_file_range(struct vop_copy_file_range_args *ap)
 3885 {
 3886         struct vnode *invp = ap->a_invp;
 3887         struct vnode *outvp = ap->a_outvp;
 3888         struct mount *mp;
 3889         struct nfsvattr innfsva, outnfsva;
 3890         struct vattr *vap;
 3891         struct uio io;
 3892         struct nfsmount *nmp;
 3893         size_t len, len2;
 3894         ssize_t r;
 3895         int error, inattrflag, outattrflag, ret, ret2;
 3896         off_t inoff, outoff;
 3897         bool consecutive, must_commit, tryoutcred;
 3898 
 3899         /* NFSv4.2 Copy is not permitted for infile == outfile. */
 3900         if (invp == outvp) {
 3901 generic_copy:
 3902                 return (vn_generic_copy_file_range(invp, ap->a_inoffp,
 3903                     outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags,
 3904                     ap->a_incred, ap->a_outcred, ap->a_fsizetd));
 3905         }
 3906 
 3907         /* Lock both vnodes, avoiding risk of deadlock. */
 3908         do {
 3909                 mp = NULL;
 3910                 error = vn_start_write(outvp, &mp, V_WAIT);
 3911                 if (error == 0) {
 3912                         error = vn_lock(outvp, LK_EXCLUSIVE);
 3913                         if (error == 0) {
 3914                                 error = vn_lock(invp, LK_SHARED | LK_NOWAIT);
 3915                                 if (error == 0)
 3916                                         break;
 3917                                 VOP_UNLOCK(outvp);
 3918                                 if (mp != NULL)
 3919                                         vn_finished_write(mp);
 3920                                 mp = NULL;
 3921                                 error = vn_lock(invp, LK_SHARED);
 3922                                 if (error == 0)
 3923                                         VOP_UNLOCK(invp);
 3924                         }
 3925                 }
 3926                 if (mp != NULL)
 3927                         vn_finished_write(mp);
 3928         } while (error == 0);
 3929         if (error != 0)
 3930                 return (error);
 3931 
 3932         /*
 3933          * More reasons to avoid nfs copy: not NFSv4.2, or explicitly
 3934          * disabled.
 3935          */
 3936         nmp = VFSTONFS(invp->v_mount);
 3937         mtx_lock(&nmp->nm_mtx);
 3938         if (!NFSHASNFSV4(nmp) || nmp->nm_minorvers < NFSV42_MINORVERSION ||
 3939             (nmp->nm_privflag & NFSMNTP_NOCOPY) != 0) {
 3940                 mtx_unlock(&nmp->nm_mtx);
 3941                 VOP_UNLOCK(invp);
 3942                 VOP_UNLOCK(outvp);
 3943                 if (mp != NULL)
 3944                         vn_finished_write(mp);
 3945                 goto generic_copy;
 3946         }
 3947         mtx_unlock(&nmp->nm_mtx);
 3948 
 3949         /*
 3950          * Do the vn_rlimit_fsize() check.  Should this be above the VOP layer?
 3951          */
 3952         io.uio_offset = *ap->a_outoffp;
 3953         io.uio_resid = *ap->a_lenp;
 3954         error = vn_rlimit_fsizex(outvp, &io, 0, &r, ap->a_fsizetd);
 3955         *ap->a_lenp = io.uio_resid;
 3956         /*
 3957          * No need to call vn_rlimit_fsizex_res before return, since the uio is
 3958          * local.
 3959          */
 3960 
 3961         /*
 3962          * Flush the input file so that the data is up to date before
 3963          * the copy.  Flush writes for the output file so that they
 3964          * do not overwrite the data copied to the output file by the Copy.
 3965          * Set the commit argument for both flushes so that the data is on
 3966          * stable storage before the Copy RPC.  This is done in case the
 3967          * server reboots during the Copy and needs to be redone.
 3968          */
 3969         if (error == 0)
 3970                 error = ncl_flush(invp, MNT_WAIT, curthread, 1, 0);
 3971         if (error == 0)
 3972                 error = ncl_flush(outvp, MNT_WAIT, curthread, 1, 0);
 3973 
 3974         /* Do the actual NFSv4.2 RPC. */
 3975         ret = ret2 = 0;
 3976         len = *ap->a_lenp;
 3977         mtx_lock(&nmp->nm_mtx);
 3978         if ((nmp->nm_privflag & NFSMNTP_NOCONSECUTIVE) == 0)
 3979                 consecutive = true;
 3980         else
 3981                 consecutive = false;
 3982         mtx_unlock(&nmp->nm_mtx);
 3983         inoff = *ap->a_inoffp;
 3984         outoff = *ap->a_outoffp;
 3985         tryoutcred = true;
 3986         must_commit = false;
 3987         if (error == 0) {
 3988                 vap = &VTONFS(invp)->n_vattr.na_vattr;
 3989                 error = VOP_GETATTR(invp, vap, ap->a_incred);
 3990                 if (error == 0) {
 3991                         /*
 3992                          * Clip "len" at va_size so that RFC compliant servers
 3993                          * will not reply NFSERR_INVAL.
 3994                          * Setting "len == 0" for the RPC would be preferred,
 3995                          * but some Linux servers do not support that.
 3996                          */
 3997                         if (inoff >= vap->va_size)
 3998                                 *ap->a_lenp = len = 0;
 3999                         else if (inoff + len > vap->va_size)
 4000                                 *ap->a_lenp = len = vap->va_size - inoff;
 4001                 } else
 4002                         error = 0;
 4003         }
 4004 
 4005         /*
 4006          * len will be set to 0 upon a successful Copy RPC.
 4007          * As such, this only loops when the Copy RPC needs to be retried.
 4008          */
 4009         while (len > 0 && error == 0) {
 4010                 inattrflag = outattrflag = 0;
 4011                 len2 = len;
 4012                 if (tryoutcred)
 4013                         error = nfsrpc_copy_file_range(invp, ap->a_inoffp,
 4014                             outvp, ap->a_outoffp, &len2, ap->a_flags,
 4015                             &inattrflag, &innfsva, &outattrflag, &outnfsva,
 4016                             ap->a_outcred, consecutive, &must_commit);
 4017                 else
 4018                         error = nfsrpc_copy_file_range(invp, ap->a_inoffp,
 4019                             outvp, ap->a_outoffp, &len2, ap->a_flags,
 4020                             &inattrflag, &innfsva, &outattrflag, &outnfsva,
 4021                             ap->a_incred, consecutive, &must_commit);
 4022                 if (inattrflag != 0)
 4023                         ret = nfscl_loadattrcache(&invp, &innfsva, NULL, 0, 1);
 4024                 if (outattrflag != 0)
 4025                         ret2 = nfscl_loadattrcache(&outvp, &outnfsva, NULL,
 4026                             1, 1);
 4027                 if (error == 0) {
 4028                         if (consecutive == false) {
 4029                                 if (len2 == len) {
 4030                                         mtx_lock(&nmp->nm_mtx);
 4031                                         nmp->nm_privflag |=
 4032                                             NFSMNTP_NOCONSECUTIVE;
 4033                                         mtx_unlock(&nmp->nm_mtx);
 4034                                 } else
 4035                                         error = NFSERR_OFFLOADNOREQS;
 4036                         }
 4037                         *ap->a_lenp = len2;
 4038                         len = 0;
 4039                         if (len2 > 0 && must_commit && error == 0)
 4040                                 error = ncl_commit(outvp, outoff, *ap->a_lenp,
 4041                                     ap->a_outcred, curthread);
 4042                         if (error == 0 && ret != 0)
 4043                                 error = ret;
 4044                         if (error == 0 && ret2 != 0)
 4045                                 error = ret2;
 4046                 } else if (error == NFSERR_OFFLOADNOREQS && consecutive) {
 4047                         /*
 4048                          * Try consecutive == false, which is ok only if all
 4049                          * bytes are copied.
 4050                          * If only some bytes were copied when consecutive
 4051                          * is false, there is no way to know which bytes
 4052                          * still need to be written.
 4053                          */
 4054                         consecutive = false;
 4055                         error = 0;
 4056                 } else if (error == NFSERR_ACCES && tryoutcred) {
 4057                         /* Try again with incred. */
 4058                         tryoutcred = false;
 4059                         error = 0;
 4060                 }
 4061                 if (error == NFSERR_STALEWRITEVERF) {
 4062                         /*
 4063                          * Server rebooted, so do it all again.
 4064                          */
 4065                         *ap->a_inoffp = inoff;
 4066                         *ap->a_outoffp = outoff;
 4067                         len = *ap->a_lenp;
 4068                         must_commit = false;
 4069                         error = 0;
 4070                 }
 4071         }
 4072         VOP_UNLOCK(invp);
 4073         VOP_UNLOCK(outvp);
 4074         if (mp != NULL)
 4075                 vn_finished_write(mp);
 4076         if (error == NFSERR_NOTSUPP || error == NFSERR_OFFLOADNOREQS ||
 4077             error == NFSERR_ACCES) {
 4078                 /*
 4079                  * Unlike the NFSv4.2 Copy, vn_generic_copy_file_range() can
 4080                  * use a_incred for the read and a_outcred for the write, so
 4081                  * try this for NFSERR_ACCES failures for the Copy.
 4082                  * For NFSERR_NOTSUPP and NFSERR_OFFLOADNOREQS, the Copy can
 4083                  * never succeed, so disable it.
 4084                  */
 4085                 if (error != NFSERR_ACCES) {
 4086                         /* Can never do Copy on this mount. */
 4087                         mtx_lock(&nmp->nm_mtx);
 4088                         nmp->nm_privflag |= NFSMNTP_NOCOPY;
 4089                         mtx_unlock(&nmp->nm_mtx);
 4090                 }
 4091                 *ap->a_inoffp = inoff;
 4092                 *ap->a_outoffp = outoff;
 4093                 error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp,
 4094                     ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags,
 4095                     ap->a_incred, ap->a_outcred, ap->a_fsizetd);
 4096         } else if (error != 0)
 4097                 *ap->a_lenp = 0;
 4098 
 4099         if (error != 0)
 4100                 error = nfscl_maperr(curthread, error, (uid_t)0, (gid_t)0);
 4101         return (error);
 4102 }
 4103 
 4104 /*
 4105  * nfs ioctl call
 4106  */
 4107 static int
 4108 nfs_ioctl(struct vop_ioctl_args *ap)
 4109 {
 4110         struct vnode *vp = ap->a_vp;
 4111         struct nfsvattr nfsva;
 4112         struct nfsmount *nmp;
 4113         int attrflag, content, error, ret;
 4114         bool eof = false;                       /* shut up compiler. */
 4115 
 4116         /* Do the actual NFSv4.2 RPC. */
 4117         switch (ap->a_command) {
 4118         case FIOSEEKDATA:
 4119                 content = NFSV4CONTENT_DATA;
 4120                 break;
 4121         case FIOSEEKHOLE:
 4122                 content = NFSV4CONTENT_HOLE;
 4123                 break;
 4124         default:
 4125                 return (ENOTTY);
 4126         }
 4127 
 4128         error = vn_lock(vp, LK_SHARED);
 4129         if (error != 0)
 4130                 return (EBADF);
 4131 
 4132         if (vp->v_type != VREG) {
 4133                 VOP_UNLOCK(vp);
 4134                 return (ENOTTY);
 4135         }
 4136         nmp = VFSTONFS(vp->v_mount);
 4137         if (!NFSHASNFSV4(nmp) || nmp->nm_minorvers < NFSV42_MINORVERSION) {
 4138                 VOP_UNLOCK(vp);
 4139                 error = vop_stdioctl(ap);
 4140                 return (error);
 4141         }
 4142 
 4143         attrflag = 0;
 4144         if (*((off_t *)ap->a_data) >= VTONFS(vp)->n_size)
 4145                 error = ENXIO;
 4146         else {
 4147                 /*
 4148                  * Flush all writes, so that the server is up to date.
 4149                  * Although a Commit is not required, the commit argument
 4150                  * is set so that, for a pNFS File/Flexible File Layout
 4151                  * server, the LayoutCommit will be done to ensure the file
 4152                  * size is up to date on the Metadata Server.
 4153                  */
 4154                 error = ncl_flush(vp, MNT_WAIT, ap->a_td, 1, 0);
 4155                 if (error == 0)
 4156                         error = nfsrpc_seek(vp, (off_t *)ap->a_data, &eof,
 4157                             content, ap->a_cred, &nfsva, &attrflag);
 4158                 /* If at eof for FIOSEEKDATA, return ENXIO. */
 4159                 if (eof && error == 0 && content == NFSV4CONTENT_DATA)
 4160                         error = ENXIO;
 4161         }
 4162         if (attrflag != 0) {
 4163                 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 4164                 if (error == 0 && ret != 0)
 4165                         error = ret;
 4166         }
 4167         NFSVOPUNLOCK(vp);
 4168 
 4169         if (error != 0)
 4170                 error = ENXIO;
 4171         return (error);
 4172 }
 4173 
 4174 /*
 4175  * nfs getextattr call
 4176  */
 4177 static int
 4178 nfs_getextattr(struct vop_getextattr_args *ap)
 4179 {
 4180         struct vnode *vp = ap->a_vp;
 4181         struct nfsmount *nmp;
 4182         struct ucred *cred;
 4183         struct thread *td = ap->a_td;
 4184         struct nfsvattr nfsva;
 4185         ssize_t len;
 4186         int attrflag, error, ret;
 4187 
 4188         nmp = VFSTONFS(vp->v_mount);
 4189         mtx_lock(&nmp->nm_mtx);
 4190         if (!NFSHASNFSV4(nmp) || nmp->nm_minorvers < NFSV42_MINORVERSION ||
 4191             (nmp->nm_privflag & NFSMNTP_NOXATTR) != 0 ||
 4192             ap->a_attrnamespace != EXTATTR_NAMESPACE_USER) {
 4193                 mtx_unlock(&nmp->nm_mtx);
 4194                 return (EOPNOTSUPP);
 4195         }
 4196         mtx_unlock(&nmp->nm_mtx);
 4197 
 4198         cred = ap->a_cred;
 4199         if (cred == NULL)
 4200                 cred = td->td_ucred;
 4201         /* Do the actual NFSv4.2 Optional Extended Attribute (RFC-8276) RPC. */
 4202         attrflag = 0;
 4203         error = nfsrpc_getextattr(vp, ap->a_name, ap->a_uio, &len, &nfsva,
 4204             &attrflag, cred, td);
 4205         if (attrflag != 0) {
 4206                 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 4207                 if (error == 0 && ret != 0)
 4208                         error = ret;
 4209         }
 4210         if (error == 0 && ap->a_size != NULL)
 4211                 *ap->a_size = len;
 4212 
 4213         switch (error) {
 4214         case NFSERR_NOTSUPP:
 4215         case NFSERR_OPILLEGAL:
 4216                 mtx_lock(&nmp->nm_mtx);
 4217                 nmp->nm_privflag |= NFSMNTP_NOXATTR;
 4218                 mtx_unlock(&nmp->nm_mtx);
 4219                 error = EOPNOTSUPP;
 4220                 break;
 4221         case NFSERR_NOXATTR:
 4222         case NFSERR_XATTR2BIG:
 4223                 error = ENOATTR;
 4224                 break;
 4225         default:
 4226                 error = nfscl_maperr(td, error, 0, 0);
 4227                 break;
 4228         }
 4229         return (error);
 4230 }
 4231 
 4232 /*
 4233  * nfs setextattr call
 4234  */
 4235 static int
 4236 nfs_setextattr(struct vop_setextattr_args *ap)
 4237 {
 4238         struct vnode *vp = ap->a_vp;
 4239         struct nfsmount *nmp;
 4240         struct ucred *cred;
 4241         struct thread *td = ap->a_td;
 4242         struct nfsvattr nfsva;
 4243         int attrflag, error, ret;
 4244 
 4245         nmp = VFSTONFS(vp->v_mount);
 4246         mtx_lock(&nmp->nm_mtx);
 4247         if (!NFSHASNFSV4(nmp) || nmp->nm_minorvers < NFSV42_MINORVERSION ||
 4248             (nmp->nm_privflag & NFSMNTP_NOXATTR) != 0 ||
 4249             ap->a_attrnamespace != EXTATTR_NAMESPACE_USER) {
 4250                 mtx_unlock(&nmp->nm_mtx);
 4251                 return (EOPNOTSUPP);
 4252         }
 4253         mtx_unlock(&nmp->nm_mtx);
 4254 
 4255         if (ap->a_uio->uio_resid < 0)
 4256                 return (EINVAL);
 4257         cred = ap->a_cred;
 4258         if (cred == NULL)
 4259                 cred = td->td_ucred;
 4260         /* Do the actual NFSv4.2 Optional Extended Attribute (RFC-8276) RPC. */
 4261         attrflag = 0;
 4262         error = nfsrpc_setextattr(vp, ap->a_name, ap->a_uio, &nfsva,
 4263             &attrflag, cred, td);
 4264         if (attrflag != 0) {
 4265                 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 4266                 if (error == 0 && ret != 0)
 4267                         error = ret;
 4268         }
 4269 
 4270         switch (error) {
 4271         case NFSERR_NOTSUPP:
 4272         case NFSERR_OPILLEGAL:
 4273                 mtx_lock(&nmp->nm_mtx);
 4274                 nmp->nm_privflag |= NFSMNTP_NOXATTR;
 4275                 mtx_unlock(&nmp->nm_mtx);
 4276                 error = EOPNOTSUPP;
 4277                 break;
 4278         case NFSERR_NOXATTR:
 4279         case NFSERR_XATTR2BIG:
 4280                 error = ENOATTR;
 4281                 break;
 4282         default:
 4283                 error = nfscl_maperr(td, error, 0, 0);
 4284                 break;
 4285         }
 4286         return (error);
 4287 }
 4288 
 4289 /*
 4290  * nfs listextattr call
 4291  */
 4292 static int
 4293 nfs_listextattr(struct vop_listextattr_args *ap)
 4294 {
 4295         struct vnode *vp = ap->a_vp;
 4296         struct nfsmount *nmp;
 4297         struct ucred *cred;
 4298         struct thread *td = ap->a_td;
 4299         struct nfsvattr nfsva;
 4300         size_t len, len2;
 4301         uint64_t cookie;
 4302         int attrflag, error, ret;
 4303         bool eof;
 4304 
 4305         nmp = VFSTONFS(vp->v_mount);
 4306         mtx_lock(&nmp->nm_mtx);
 4307         if (!NFSHASNFSV4(nmp) || nmp->nm_minorvers < NFSV42_MINORVERSION ||
 4308             (nmp->nm_privflag & NFSMNTP_NOXATTR) != 0 ||
 4309             ap->a_attrnamespace != EXTATTR_NAMESPACE_USER) {
 4310                 mtx_unlock(&nmp->nm_mtx);
 4311                 return (EOPNOTSUPP);
 4312         }
 4313         mtx_unlock(&nmp->nm_mtx);
 4314 
 4315         cred = ap->a_cred;
 4316         if (cred == NULL)
 4317                 cred = td->td_ucred;
 4318 
 4319         /* Loop around doing List Extended Attribute RPCs. */
 4320         eof = false;
 4321         cookie = 0;
 4322         len2 = 0;
 4323         error = 0;
 4324         while (!eof && error == 0) {
 4325                 len = nmp->nm_rsize;
 4326                 attrflag = 0;
 4327                 error = nfsrpc_listextattr(vp, &cookie, ap->a_uio, &len, &eof,
 4328                     &nfsva, &attrflag, cred, td);
 4329                 if (attrflag != 0) {
 4330                         ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 4331                         if (error == 0 && ret != 0)
 4332                                 error = ret;
 4333                 }
 4334                 if (error == 0) {
 4335                         len2 += len;
 4336                         if (len2 > SSIZE_MAX)
 4337                                 error = ENOATTR;
 4338                 }
 4339         }
 4340         if (error == 0 && ap->a_size != NULL)
 4341                 *ap->a_size = len2;
 4342 
 4343         switch (error) {
 4344         case NFSERR_NOTSUPP:
 4345         case NFSERR_OPILLEGAL:
 4346                 mtx_lock(&nmp->nm_mtx);
 4347                 nmp->nm_privflag |= NFSMNTP_NOXATTR;
 4348                 mtx_unlock(&nmp->nm_mtx);
 4349                 error = EOPNOTSUPP;
 4350                 break;
 4351         case NFSERR_NOXATTR:
 4352         case NFSERR_XATTR2BIG:
 4353                 error = ENOATTR;
 4354                 break;
 4355         default:
 4356                 error = nfscl_maperr(td, error, 0, 0);
 4357                 break;
 4358         }
 4359         return (error);
 4360 }
 4361 
 4362 /*
 4363  * nfs setextattr call
 4364  */
 4365 static int
 4366 nfs_deleteextattr(struct vop_deleteextattr_args *ap)
 4367 {
 4368         struct vnode *vp = ap->a_vp;
 4369         struct nfsmount *nmp;
 4370         struct nfsvattr nfsva;
 4371         int attrflag, error, ret;
 4372 
 4373         nmp = VFSTONFS(vp->v_mount);
 4374         mtx_lock(&nmp->nm_mtx);
 4375         if (!NFSHASNFSV4(nmp) || nmp->nm_minorvers < NFSV42_MINORVERSION ||
 4376             (nmp->nm_privflag & NFSMNTP_NOXATTR) != 0 ||
 4377             ap->a_attrnamespace != EXTATTR_NAMESPACE_USER) {
 4378                 mtx_unlock(&nmp->nm_mtx);
 4379                 return (EOPNOTSUPP);
 4380         }
 4381         mtx_unlock(&nmp->nm_mtx);
 4382 
 4383         /* Do the actual NFSv4.2 Optional Extended Attribute (RFC-8276) RPC. */
 4384         attrflag = 0;
 4385         error = nfsrpc_rmextattr(vp, ap->a_name, &nfsva, &attrflag, ap->a_cred,
 4386             ap->a_td);
 4387         if (attrflag != 0) {
 4388                 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 4389                 if (error == 0 && ret != 0)
 4390                         error = ret;
 4391         }
 4392 
 4393         switch (error) {
 4394         case NFSERR_NOTSUPP:
 4395         case NFSERR_OPILLEGAL:
 4396                 mtx_lock(&nmp->nm_mtx);
 4397                 nmp->nm_privflag |= NFSMNTP_NOXATTR;
 4398                 mtx_unlock(&nmp->nm_mtx);
 4399                 error = EOPNOTSUPP;
 4400                 break;
 4401         case NFSERR_NOXATTR:
 4402         case NFSERR_XATTR2BIG:
 4403                 error = ENOATTR;
 4404                 break;
 4405         default:
 4406                 error = nfscl_maperr(ap->a_td, error, 0, 0);
 4407                 break;
 4408         }
 4409         return (error);
 4410 }
 4411 
 4412 /*
 4413  * Return POSIX pathconf information applicable to nfs filesystems.
 4414  */
 4415 static int
 4416 nfs_pathconf(struct vop_pathconf_args *ap)
 4417 {
 4418         struct nfsv3_pathconf pc;
 4419         struct nfsvattr nfsva;
 4420         struct vnode *vp = ap->a_vp;
 4421         struct nfsmount *nmp;
 4422         struct thread *td = curthread;
 4423         off_t off;
 4424         bool eof;
 4425         int attrflag, error;
 4426 
 4427         if ((NFS_ISV34(vp) && (ap->a_name == _PC_LINK_MAX ||
 4428             ap->a_name == _PC_NAME_MAX || ap->a_name == _PC_CHOWN_RESTRICTED ||
 4429             ap->a_name == _PC_NO_TRUNC)) ||
 4430             (NFS_ISV4(vp) && ap->a_name == _PC_ACL_NFS4)) {
 4431                 /*
 4432                  * Since only the above 4 a_names are returned by the NFSv3
 4433                  * Pathconf RPC, there is no point in doing it for others.
 4434                  * For NFSv4, the Pathconf RPC (actually a Getattr Op.) can
 4435                  * be used for _PC_NFS4_ACL as well.
 4436                  */
 4437                 error = nfsrpc_pathconf(vp, &pc, td->td_ucred, td, &nfsva,
 4438                     &attrflag);
 4439                 if (attrflag != 0)
 4440                         (void) nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
 4441                 if (error != 0)
 4442                         return (error);
 4443         } else {
 4444                 /*
 4445                  * For NFSv2 (or NFSv3 when not one of the above 4 a_names),
 4446                  * just fake them.
 4447                  */
 4448                 pc.pc_linkmax = NFS_LINK_MAX;
 4449                 pc.pc_namemax = NFS_MAXNAMLEN;
 4450                 pc.pc_notrunc = 1;
 4451                 pc.pc_chownrestricted = 1;
 4452                 pc.pc_caseinsensitive = 0;
 4453                 pc.pc_casepreserving = 1;
 4454                 error = 0;
 4455         }
 4456         switch (ap->a_name) {
 4457         case _PC_LINK_MAX:
 4458 #ifdef _LP64
 4459                 *ap->a_retval = pc.pc_linkmax;
 4460 #else
 4461                 *ap->a_retval = MIN(LONG_MAX, pc.pc_linkmax);
 4462 #endif
 4463                 break;
 4464         case _PC_NAME_MAX:
 4465                 *ap->a_retval = pc.pc_namemax;
 4466                 break;
 4467         case _PC_PIPE_BUF:
 4468                 if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO)
 4469                         *ap->a_retval = PIPE_BUF;
 4470                 else
 4471                         error = EINVAL;
 4472                 break;
 4473         case _PC_CHOWN_RESTRICTED:
 4474                 *ap->a_retval = pc.pc_chownrestricted;
 4475                 break;
 4476         case _PC_NO_TRUNC:
 4477                 *ap->a_retval = pc.pc_notrunc;
 4478                 break;
 4479         case _PC_ACL_NFS4:
 4480                 if (NFS_ISV4(vp) && nfsrv_useacl != 0 && attrflag != 0 &&
 4481                     NFSISSET_ATTRBIT(&nfsva.na_suppattr, NFSATTRBIT_ACL))
 4482                         *ap->a_retval = 1;
 4483                 else
 4484                         *ap->a_retval = 0;
 4485                 break;
 4486         case _PC_ACL_PATH_MAX:
 4487                 if (NFS_ISV4(vp))
 4488                         *ap->a_retval = ACL_MAX_ENTRIES;
 4489                 else
 4490                         *ap->a_retval = 3;
 4491                 break;
 4492         case _PC_PRIO_IO:
 4493                 *ap->a_retval = 0;
 4494                 break;
 4495         case _PC_SYNC_IO:
 4496                 *ap->a_retval = 0;
 4497                 break;
 4498         case _PC_ALLOC_SIZE_MIN:
 4499                 *ap->a_retval = vp->v_mount->mnt_stat.f_bsize;
 4500                 break;
 4501         case _PC_FILESIZEBITS:
 4502                 if (NFS_ISV34(vp))
 4503                         *ap->a_retval = 64;
 4504                 else
 4505                         *ap->a_retval = 32;
 4506                 break;
 4507         case _PC_REC_INCR_XFER_SIZE:
 4508                 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize;
 4509                 break;
 4510         case _PC_REC_MAX_XFER_SIZE:
 4511                 *ap->a_retval = -1; /* means ``unlimited'' */
 4512                 break;
 4513         case _PC_REC_MIN_XFER_SIZE:
 4514                 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize;
 4515                 break;
 4516         case _PC_REC_XFER_ALIGN:
 4517                 *ap->a_retval = PAGE_SIZE;
 4518                 break;
 4519         case _PC_SYMLINK_MAX:
 4520                 *ap->a_retval = NFS_MAXPATHLEN;
 4521                 break;
 4522         case _PC_MIN_HOLE_SIZE:
 4523                 /* Only some NFSv4.2 servers support Seek for Holes. */
 4524                 *ap->a_retval = 0;
 4525                 nmp = VFSTONFS(vp->v_mount);
 4526                 if (NFS_ISV4(vp) && nmp->nm_minorvers == NFSV42_MINORVERSION) {
 4527                         /*
 4528                          * NFSv4.2 doesn't have an attribute for hole size,
 4529                          * so all we can do is see if the Seek operation is
 4530                          * supported and then use f_iosize as a "best guess".
 4531                          */
 4532                         mtx_lock(&nmp->nm_mtx);
 4533                         if ((nmp->nm_privflag & NFSMNTP_SEEKTESTED) == 0) {
 4534                                 mtx_unlock(&nmp->nm_mtx);
 4535                                 off = 0;
 4536                                 attrflag = 0;
 4537                                 error = nfsrpc_seek(vp, &off, &eof,
 4538                                     NFSV4CONTENT_HOLE, td->td_ucred, &nfsva,
 4539                                     &attrflag);
 4540                                 if (attrflag != 0)
 4541                                         nfscl_loadattrcache(&vp, &nfsva,
 4542                                             NULL, 0, 1);
 4543                                 mtx_lock(&nmp->nm_mtx);
 4544                                 if (error == NFSERR_NOTSUPP)
 4545                                         nmp->nm_privflag |= NFSMNTP_SEEKTESTED;
 4546                                 else
 4547                                         nmp->nm_privflag |= NFSMNTP_SEEKTESTED |
 4548                                             NFSMNTP_SEEK;
 4549                                 error = 0;
 4550                         }
 4551                         if ((nmp->nm_privflag & NFSMNTP_SEEK) != 0)
 4552                                 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize;
 4553                         mtx_unlock(&nmp->nm_mtx);
 4554                 }
 4555                 break;
 4556 
 4557         default:
 4558                 error = vop_stdpathconf(ap);
 4559                 break;
 4560         }
 4561         return (error);
 4562 }

Cache object: a458e2315755accc427467ca9cf83dc1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.