The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/nfsclient/nfs_vnops.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * Rick Macklem at The University of Guelph.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/8.3/sys/nfsclient/nfs_vnops.c 231948 2012-02-21 00:32:24Z kib $");
   37 
   38 /*
   39  * vnode op calls for Sun NFS version 2 and 3
   40  */
   41 
   42 #include "opt_inet.h"
   43 #include "opt_kdtrace.h"
   44 
   45 #include <sys/param.h>
   46 #include <sys/kernel.h>
   47 #include <sys/systm.h>
   48 #include <sys/resourcevar.h>
   49 #include <sys/proc.h>
   50 #include <sys/mount.h>
   51 #include <sys/bio.h>
   52 #include <sys/buf.h>
   53 #include <sys/jail.h>
   54 #include <sys/malloc.h>
   55 #include <sys/mbuf.h>
   56 #include <sys/namei.h>
   57 #include <sys/socket.h>
   58 #include <sys/vnode.h>
   59 #include <sys/dirent.h>
   60 #include <sys/fcntl.h>
   61 #include <sys/lockf.h>
   62 #include <sys/stat.h>
   63 #include <sys/sysctl.h>
   64 #include <sys/signalvar.h>
   65 
   66 #include <vm/vm.h>
   67 #include <vm/vm_object.h>
   68 #include <vm/vm_extern.h>
   69 #include <vm/vm_object.h>
   70 
   71 #include <fs/fifofs/fifo.h>
   72 
   73 #include <nfs/nfsproto.h>
   74 #include <nfsclient/nfs.h>
   75 #include <nfsclient/nfsnode.h>
   76 #include <nfsclient/nfsmount.h>
   77 #include <nfsclient/nfs_kdtrace.h>
   78 #include <nfs/nfs_lock.h>
   79 #include <nfs/xdr_subs.h>
   80 #include <nfsclient/nfsm_subs.h>
   81 
   82 #include <net/if.h>
   83 #include <netinet/in.h>
   84 #include <netinet/in_var.h>
   85 
   86 #include <machine/stdarg.h>
   87 
   88 #ifdef KDTRACE_HOOKS
   89 #include <sys/dtrace_bsd.h>
   90 
   91 dtrace_nfsclient_accesscache_flush_probe_func_t
   92     dtrace_nfsclient_accesscache_flush_done_probe;
   93 uint32_t nfsclient_accesscache_flush_done_id;
   94 
   95 dtrace_nfsclient_accesscache_get_probe_func_t
   96     dtrace_nfsclient_accesscache_get_hit_probe,
   97     dtrace_nfsclient_accesscache_get_miss_probe;
   98 uint32_t nfsclient_accesscache_get_hit_id;
   99 uint32_t nfsclient_accesscache_get_miss_id;
  100 
  101 dtrace_nfsclient_accesscache_load_probe_func_t
  102     dtrace_nfsclient_accesscache_load_done_probe;
  103 uint32_t nfsclient_accesscache_load_done_id;
  104 #endif /* !KDTRACE_HOOKS */
  105 
  106 /* Defs */
  107 #define TRUE    1
  108 #define FALSE   0
  109 
  110 /*
  111  * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
  112  * calls are not in getblk() and brelse() so that they would not be necessary
  113  * here.
  114  */
  115 #ifndef B_VMIO
  116 #define vfs_busy_pages(bp, f)
  117 #endif
  118 
  119 static vop_read_t       nfsfifo_read;
  120 static vop_write_t      nfsfifo_write;
  121 static vop_close_t      nfsfifo_close;
  122 static int      nfs_flush(struct vnode *, int, int);
  123 static int      nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *);
  124 static vop_lookup_t     nfs_lookup;
  125 static vop_create_t     nfs_create;
  126 static vop_mknod_t      nfs_mknod;
  127 static vop_open_t       nfs_open;
  128 static vop_close_t      nfs_close;
  129 static vop_access_t     nfs_access;
  130 static vop_getattr_t    nfs_getattr;
  131 static vop_setattr_t    nfs_setattr;
  132 static vop_read_t       nfs_read;
  133 static vop_fsync_t      nfs_fsync;
  134 static vop_remove_t     nfs_remove;
  135 static vop_link_t       nfs_link;
  136 static vop_rename_t     nfs_rename;
  137 static vop_mkdir_t      nfs_mkdir;
  138 static vop_rmdir_t      nfs_rmdir;
  139 static vop_symlink_t    nfs_symlink;
  140 static vop_readdir_t    nfs_readdir;
  141 static vop_strategy_t   nfs_strategy;
  142 static  int     nfs_lookitup(struct vnode *, const char *, int,
  143                     struct ucred *, struct thread *, struct nfsnode **);
  144 static  int     nfs_sillyrename(struct vnode *, struct vnode *,
  145                     struct componentname *);
  146 static vop_access_t     nfsspec_access;
  147 static vop_readlink_t   nfs_readlink;
  148 static vop_print_t      nfs_print;
  149 static vop_advlock_t    nfs_advlock;
  150 static vop_advlockasync_t nfs_advlockasync;
  151 
  152 /*
  153  * Global vfs data structures for nfs
  154  */
  155 struct vop_vector nfs_vnodeops = {
  156         .vop_default =          &default_vnodeops,
  157         .vop_access =           nfs_access,
  158         .vop_advlock =          nfs_advlock,
  159         .vop_advlockasync =     nfs_advlockasync,
  160         .vop_close =            nfs_close,
  161         .vop_create =           nfs_create,
  162         .vop_fsync =            nfs_fsync,
  163         .vop_getattr =          nfs_getattr,
  164         .vop_getpages =         nfs_getpages,
  165         .vop_putpages =         nfs_putpages,
  166         .vop_inactive =         nfs_inactive,
  167         .vop_link =             nfs_link,
  168         .vop_lookup =           nfs_lookup,
  169         .vop_mkdir =            nfs_mkdir,
  170         .vop_mknod =            nfs_mknod,
  171         .vop_open =             nfs_open,
  172         .vop_print =            nfs_print,
  173         .vop_read =             nfs_read,
  174         .vop_readdir =          nfs_readdir,
  175         .vop_readlink =         nfs_readlink,
  176         .vop_reclaim =          nfs_reclaim,
  177         .vop_remove =           nfs_remove,
  178         .vop_rename =           nfs_rename,
  179         .vop_rmdir =            nfs_rmdir,
  180         .vop_setattr =          nfs_setattr,
  181         .vop_strategy =         nfs_strategy,
  182         .vop_symlink =          nfs_symlink,
  183         .vop_write =            nfs_write,
  184 };
  185 
  186 struct vop_vector nfs_fifoops = {
  187         .vop_default =          &fifo_specops,
  188         .vop_access =           nfsspec_access,
  189         .vop_close =            nfsfifo_close,
  190         .vop_fsync =            nfs_fsync,
  191         .vop_getattr =          nfs_getattr,
  192         .vop_inactive =         nfs_inactive,
  193         .vop_print =            nfs_print,
  194         .vop_read =             nfsfifo_read,
  195         .vop_reclaim =          nfs_reclaim,
  196         .vop_setattr =          nfs_setattr,
  197         .vop_write =            nfsfifo_write,
  198 };
  199 
  200 static int      nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp,
  201                              struct componentname *cnp, struct vattr *vap);
  202 static int      nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
  203                               struct ucred *cred, struct thread *td);
  204 static int      nfs_renamerpc(struct vnode *fdvp, const char *fnameptr,
  205                               int fnamelen, struct vnode *tdvp,
  206                               const char *tnameptr, int tnamelen,
  207                               struct ucred *cred, struct thread *td);
  208 static int      nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
  209                              struct sillyrename *sp);
  210 
  211 /*
  212  * Global variables
  213  */
  214 struct mtx      nfs_iod_mtx;
  215 enum nfsiod_state nfs_iodwant[NFS_MAXASYNCDAEMON];
  216 struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
  217 int              nfs_numasync = 0;
  218 #define DIRHDSIZ        (sizeof (struct dirent) - (MAXNAMLEN + 1))
  219 
  220 SYSCTL_DECL(_vfs_nfs);
  221 
  222 static int      nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
  223 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
  224            &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
  225 
  226 static int      nfs_prime_access_cache = 0;
  227 SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
  228            &nfs_prime_access_cache, 0,
  229            "Prime NFS ACCESS cache when fetching attributes");
  230 
  231 static int      nfsv3_commit_on_close = 0;
  232 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
  233            &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
  234 
  235 static int      nfs_clean_pages_on_close = 1;
  236 SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
  237            &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close");
  238 
  239 int nfs_directio_enable = 0;
  240 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
  241            &nfs_directio_enable, 0, "Enable NFS directio");
  242 
  243 /*
  244  * This sysctl allows other processes to mmap a file that has been opened
  245  * O_DIRECT by a process.  In general, having processes mmap the file while
  246  * Direct IO is in progress can lead to Data Inconsistencies.  But, we allow
  247  * this by default to prevent DoS attacks - to prevent a malicious user from
  248  * opening up files O_DIRECT preventing other users from mmap'ing these
  249  * files.  "Protected" environments where stricter consistency guarantees are
  250  * required can disable this knob.  The process that opened the file O_DIRECT
  251  * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not
  252  * meaningful.
  253  */
  254 int nfs_directio_allow_mmap = 1;
  255 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
  256            &nfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
  257 
  258 #if 0
  259 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
  260            &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
  261 
  262 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
  263            &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
  264 #endif
  265 
  266 #define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY          \
  267                          | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE     \
  268                          | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
  269 
  270 /*
  271  * SMP Locking Note :
  272  * The list of locks after the description of the lock is the ordering
  273  * of other locks acquired with the lock held.
  274  * np->n_mtx : Protects the fields in the nfsnode.
  275        VM Object Lock
  276        VI_MTX (acquired indirectly)
  277  * nmp->nm_mtx : Protects the fields in the nfsmount.
  278        rep->r_mtx
  279  * nfs_iod_mtx : Global lock, protects shared nfsiod state.
  280  * nfs_reqq_mtx : Global lock, protects the nfs_reqq list.
  281        nmp->nm_mtx
  282        rep->r_mtx
  283  * rep->r_mtx : Protects the fields in an nfsreq.
  284  */
  285 
  286 static int
  287 nfs3_access_otw(struct vnode *vp, int wmode, struct thread *td,
  288     struct ucred *cred, uint32_t *retmode)
  289 {
  290         const int v3 = 1;
  291         u_int32_t *tl;
  292         int error = 0, attrflag, i, lrupos;
  293 
  294         struct mbuf *mreq, *mrep, *md, *mb;
  295         caddr_t bpos, dpos;
  296         u_int32_t rmode;
  297         struct nfsnode *np = VTONFS(vp);
  298 
  299         nfsstats.rpccnt[NFSPROC_ACCESS]++;
  300         mreq = nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
  301         mb = mreq;
  302         bpos = mtod(mb, caddr_t);
  303         nfsm_fhtom(vp, v3);
  304         tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
  305         *tl = txdr_unsigned(wmode);
  306         nfsm_request(vp, NFSPROC_ACCESS, td, cred);
  307         nfsm_postop_attr(vp, attrflag);
  308         if (!error) {
  309                 lrupos = 0;
  310                 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
  311                 rmode = fxdr_unsigned(u_int32_t, *tl);
  312                 mtx_lock(&np->n_mtx);
  313                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
  314                         if (np->n_accesscache[i].uid == cred->cr_uid) {
  315                                 np->n_accesscache[i].mode = rmode;
  316                                 np->n_accesscache[i].stamp = time_second;
  317                                 break;
  318                         }
  319                         if (i > 0 && np->n_accesscache[i].stamp <
  320                             np->n_accesscache[lrupos].stamp)
  321                                 lrupos = i;
  322                 }
  323                 if (i == NFS_ACCESSCACHESIZE) {
  324                         np->n_accesscache[lrupos].uid = cred->cr_uid;
  325                         np->n_accesscache[lrupos].mode = rmode;
  326                         np->n_accesscache[lrupos].stamp = time_second;
  327                 }
  328                 mtx_unlock(&np->n_mtx);
  329                 if (retmode != NULL)
  330                         *retmode = rmode;
  331                 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0);
  332         }
  333         m_freem(mrep);
  334 nfsmout:
  335 #ifdef KDTRACE_HOOKS
  336         if (error) {
  337                 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0,
  338                     error);
  339         }
  340 #endif
  341         return (error);
  342 }
  343 
  344 /*
  345  * nfs access vnode op.
  346  * For nfs version 2, just return ok. File accesses may fail later.
  347  * For nfs version 3, use the access rpc to check accessibility. If file modes
  348  * are changed on the server, accesses might still fail later.
  349  */
  350 static int
  351 nfs_access(struct vop_access_args *ap)
  352 {
  353         struct vnode *vp = ap->a_vp;
  354         int error = 0, i, gotahit;
  355         u_int32_t mode, rmode, wmode;
  356         int v3 = NFS_ISV3(vp);
  357         struct nfsnode *np = VTONFS(vp);
  358 
  359         /*
  360          * Disallow write attempts on filesystems mounted read-only;
  361          * unless the file is a socket, fifo, or a block or character
  362          * device resident on the filesystem.
  363          */
  364         if ((ap->a_accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
  365                 switch (vp->v_type) {
  366                 case VREG:
  367                 case VDIR:
  368                 case VLNK:
  369                         return (EROFS);
  370                 default:
  371                         break;
  372                 }
  373         }
  374         /*
  375          * For nfs v3, check to see if we have done this recently, and if
  376          * so return our cached result instead of making an ACCESS call.
  377          * If not, do an access rpc, otherwise you are stuck emulating
  378          * ufs_access() locally using the vattr. This may not be correct,
  379          * since the server may apply other access criteria such as
  380          * client uid-->server uid mapping that we do not know about.
  381          */
  382         if (v3) {
  383                 if (ap->a_accmode & VREAD)
  384                         mode = NFSV3ACCESS_READ;
  385                 else
  386                         mode = 0;
  387                 if (vp->v_type != VDIR) {
  388                         if (ap->a_accmode & VWRITE)
  389                                 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
  390                         if (ap->a_accmode & VEXEC)
  391                                 mode |= NFSV3ACCESS_EXECUTE;
  392                 } else {
  393                         if (ap->a_accmode & VWRITE)
  394                                 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
  395                                          NFSV3ACCESS_DELETE);
  396                         if (ap->a_accmode & VEXEC)
  397                                 mode |= NFSV3ACCESS_LOOKUP;
  398                 }
  399                 /* XXX safety belt, only make blanket request if caching */
  400                 if (nfsaccess_cache_timeout > 0) {
  401                         wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
  402                                 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
  403                                 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
  404                 } else {
  405                         wmode = mode;
  406                 }
  407 
  408                 /*
  409                  * Does our cached result allow us to give a definite yes to
  410                  * this request?
  411                  */
  412                 gotahit = 0;
  413                 mtx_lock(&np->n_mtx);
  414                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
  415                         if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) {
  416                                 if (time_second < (np->n_accesscache[i].stamp +
  417                                     nfsaccess_cache_timeout) &&
  418                                     (np->n_accesscache[i].mode & mode) == mode) {
  419                                         nfsstats.accesscache_hits++;
  420                                         gotahit = 1;
  421                                 }
  422                                 break;
  423                         }
  424                 }
  425                 mtx_unlock(&np->n_mtx);
  426 #ifdef KDTRACE_HOOKS
  427                 if (gotahit)
  428                         KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp,
  429                             ap->a_cred->cr_uid, mode);
  430                 else
  431                         KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp,
  432                             ap->a_cred->cr_uid, mode);
  433 #endif
  434                 if (gotahit == 0) {
  435                         /*
  436                          * Either a no, or a don't know.  Go to the wire.
  437                          */
  438                         nfsstats.accesscache_misses++;
  439                         error = nfs3_access_otw(vp, wmode, ap->a_td, ap->a_cred,
  440                             &rmode);
  441                         if (!error) {
  442                                 if ((rmode & mode) != mode)
  443                                         error = EACCES;
  444                         }
  445                 }
  446                 return (error);
  447         } else {
  448                 if ((error = nfsspec_access(ap)) != 0) {
  449                         return (error);
  450                 }
  451                 /*
  452                  * Attempt to prevent a mapped root from accessing a file
  453                  * which it shouldn't.  We try to read a byte from the file
  454                  * if the user is root and the file is not zero length.
  455                  * After calling nfsspec_access, we should have the correct
  456                  * file size cached.
  457                  */
  458                 mtx_lock(&np->n_mtx);
  459                 if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD)
  460                     && VTONFS(vp)->n_size > 0) {
  461                         struct iovec aiov;
  462                         struct uio auio;
  463                         char buf[1];
  464 
  465                         mtx_unlock(&np->n_mtx);
  466                         aiov.iov_base = buf;
  467                         aiov.iov_len = 1;
  468                         auio.uio_iov = &aiov;
  469                         auio.uio_iovcnt = 1;
  470                         auio.uio_offset = 0;
  471                         auio.uio_resid = 1;
  472                         auio.uio_segflg = UIO_SYSSPACE;
  473                         auio.uio_rw = UIO_READ;
  474                         auio.uio_td = ap->a_td;
  475 
  476                         if (vp->v_type == VREG)
  477                                 error = nfs_readrpc(vp, &auio, ap->a_cred);
  478                         else if (vp->v_type == VDIR) {
  479                                 char* bp;
  480                                 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
  481                                 aiov.iov_base = bp;
  482                                 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
  483                                 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
  484                                 free(bp, M_TEMP);
  485                         } else if (vp->v_type == VLNK)
  486                                 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
  487                         else
  488                                 error = EACCES;
  489                 } else
  490                         mtx_unlock(&np->n_mtx);
  491                 return (error);
  492         }
  493 }
  494 
  495 int nfs_otw_getattr_avoid = 0;
  496 
  497 /*
  498  * nfs open vnode op
  499  * Check to see if the type is ok
  500  * and that deletion is not in progress.
  501  * For paged in text files, you will need to flush the page cache
  502  * if consistency is lost.
  503  */
  504 /* ARGSUSED */
  505 static int
  506 nfs_open(struct vop_open_args *ap)
  507 {
  508         struct vnode *vp = ap->a_vp;
  509         struct nfsnode *np = VTONFS(vp);
  510         struct vattr vattr;
  511         int error;
  512         int fmode = ap->a_mode;
  513 
  514         if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK)
  515                 return (EOPNOTSUPP);
  516 
  517         /*
  518          * Get a valid lease. If cached data is stale, flush it.
  519          */
  520         mtx_lock(&np->n_mtx);
  521         if (np->n_flag & NMODIFIED) {
  522                 mtx_unlock(&np->n_mtx);
  523                 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  524                 if (error == EINTR || error == EIO)
  525                         return (error);
  526                 mtx_lock(&np->n_mtx);
  527                 np->n_attrstamp = 0;
  528                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
  529                 if (vp->v_type == VDIR)
  530                         np->n_direofoffset = 0;
  531                 mtx_unlock(&np->n_mtx);
  532                 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
  533                 if (error)
  534                         return (error);
  535                 mtx_lock(&np->n_mtx);
  536                 np->n_mtime = vattr.va_mtime;
  537         } else {
  538                 mtx_unlock(&np->n_mtx);
  539                 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
  540                 if (error)
  541                         return (error);
  542                 mtx_lock(&np->n_mtx);
  543                 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
  544                         if (vp->v_type == VDIR)
  545                                 np->n_direofoffset = 0;
  546                         mtx_unlock(&np->n_mtx);
  547                         error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  548                         if (error == EINTR || error == EIO) {
  549                                 return (error);
  550                         }
  551                         mtx_lock(&np->n_mtx);
  552                         np->n_mtime = vattr.va_mtime;
  553                 }
  554         }
  555         /*
  556          * If the object has >= 1 O_DIRECT active opens, we disable caching.
  557          */
  558         if (nfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
  559                 if (np->n_directio_opens == 0) {
  560                         mtx_unlock(&np->n_mtx);
  561                         error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  562                         if (error)
  563                                 return (error);
  564                         mtx_lock(&np->n_mtx);
  565                         np->n_flag |= NNONCACHE;
  566                 }
  567                 np->n_directio_opens++;
  568         }
  569         mtx_unlock(&np->n_mtx);
  570         vnode_create_vobject(vp, vattr.va_size, ap->a_td);
  571         return (0);
  572 }
  573 
  574 /*
  575  * nfs close vnode op
  576  * What an NFS client should do upon close after writing is a debatable issue.
  577  * Most NFS clients push delayed writes to the server upon close, basically for
  578  * two reasons:
  579  * 1 - So that any write errors may be reported back to the client process
  580  *     doing the close system call. By far the two most likely errors are
  581  *     NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
  582  * 2 - To put a worst case upper bound on cache inconsistency between
  583  *     multiple clients for the file.
  584  * There is also a consistency problem for Version 2 of the protocol w.r.t.
  585  * not being able to tell if other clients are writing a file concurrently,
  586  * since there is no way of knowing if the changed modify time in the reply
  587  * is only due to the write for this client.
  588  * (NFS Version 3 provides weak cache consistency data in the reply that
  589  *  should be sufficient to detect and handle this case.)
  590  *
  591  * The current code does the following:
  592  * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
  593  * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
  594  *                     or commit them (this satisfies 1 and 2 except for the
  595  *                     case where the server crashes after this close but
  596  *                     before the commit RPC, which is felt to be "good
  597  *                     enough". Changing the last argument to nfs_flush() to
  598  *                     a 1 would force a commit operation, if it is felt a
  599  *                     commit is necessary now.
  600  */
  601 /* ARGSUSED */
  602 static int
  603 nfs_close(struct vop_close_args *ap)
  604 {
  605         struct vnode *vp = ap->a_vp;
  606         struct nfsnode *np = VTONFS(vp);
  607         int error = 0;
  608         int fmode = ap->a_fflag;
  609 
  610         if (vp->v_type == VREG) {
  611             /*
  612              * Examine and clean dirty pages, regardless of NMODIFIED.
  613              * This closes a major hole in close-to-open consistency.
  614              * We want to push out all dirty pages (and buffers) on
  615              * close, regardless of whether they were dirtied by
  616              * mmap'ed writes or via write().
  617              */
  618             if (nfs_clean_pages_on_close && vp->v_object) {
  619                 VM_OBJECT_LOCK(vp->v_object);
  620                 vm_object_page_clean(vp->v_object, 0, 0, 0);
  621                 VM_OBJECT_UNLOCK(vp->v_object);
  622             }
  623             mtx_lock(&np->n_mtx);
  624             if (np->n_flag & NMODIFIED) {
  625                 mtx_unlock(&np->n_mtx);
  626                 if (NFS_ISV3(vp)) {
  627                     /*
  628                      * Under NFSv3 we have dirty buffers to dispose of.  We
  629                      * must flush them to the NFS server.  We have the option
  630                      * of waiting all the way through the commit rpc or just
  631                      * waiting for the initial write.  The default is to only
  632                      * wait through the initial write so the data is in the
  633                      * server's cache, which is roughly similar to the state
  634                      * a standard disk subsystem leaves the file in on close().
  635                      *
  636                      * We cannot clear the NMODIFIED bit in np->n_flag due to
  637                      * potential races with other processes, and certainly
  638                      * cannot clear it if we don't commit.
  639                      */
  640                     int cm = nfsv3_commit_on_close ? 1 : 0;
  641                     error = nfs_flush(vp, MNT_WAIT, cm);
  642                     /* np->n_flag &= ~NMODIFIED; */
  643                 } else
  644                     error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  645                 mtx_lock(&np->n_mtx);
  646             }
  647             if (np->n_flag & NWRITEERR) {
  648                 np->n_flag &= ~NWRITEERR;
  649                 error = np->n_error;
  650             }
  651             mtx_unlock(&np->n_mtx);
  652         }
  653         if (nfs_directio_enable)
  654                 KASSERT((np->n_directio_asyncwr == 0),
  655                         ("nfs_close: dirty unflushed (%d) directio buffers\n",
  656                          np->n_directio_asyncwr));
  657         if (nfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
  658                 mtx_lock(&np->n_mtx);
  659                 KASSERT((np->n_directio_opens > 0), 
  660                         ("nfs_close: unexpectedly value (0) of n_directio_opens\n"));
  661                 np->n_directio_opens--;
  662                 if (np->n_directio_opens == 0)
  663                         np->n_flag &= ~NNONCACHE;
  664                 mtx_unlock(&np->n_mtx);
  665         }
  666         return (error);
  667 }
  668 
  669 /*
  670  * nfs getattr call from vfs.
  671  */
  672 static int
  673 nfs_getattr(struct vop_getattr_args *ap)
  674 {
  675         struct vnode *vp = ap->a_vp;
  676         struct nfsnode *np = VTONFS(vp);
  677         struct thread *td = curthread;
  678         struct vattr *vap = ap->a_vap;
  679         struct vattr vattr;
  680         caddr_t bpos, dpos;
  681         int error = 0;
  682         struct mbuf *mreq, *mrep, *md, *mb;
  683         int v3 = NFS_ISV3(vp);
  684 
  685         /*
  686          * Update local times for special files.
  687          */
  688         mtx_lock(&np->n_mtx);
  689         if (np->n_flag & (NACC | NUPD))
  690                 np->n_flag |= NCHG;
  691         mtx_unlock(&np->n_mtx);
  692         /*
  693          * First look in the cache.
  694          */
  695         if (nfs_getattrcache(vp, &vattr) == 0)
  696                 goto nfsmout;
  697         if (v3 && nfs_prime_access_cache && nfsaccess_cache_timeout > 0) {
  698                 nfsstats.accesscache_misses++;
  699                 nfs3_access_otw(vp, NFSV3ACCESS_ALL, td, ap->a_cred, NULL);
  700                 if (nfs_getattrcache(vp, &vattr) == 0)
  701                         goto nfsmout;
  702         }
  703         nfsstats.rpccnt[NFSPROC_GETATTR]++;
  704         mreq = nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
  705         mb = mreq;
  706         bpos = mtod(mb, caddr_t);
  707         nfsm_fhtom(vp, v3);
  708         nfsm_request(vp, NFSPROC_GETATTR, td, ap->a_cred);
  709         if (!error) {
  710                 nfsm_loadattr(vp, &vattr);
  711         }
  712         m_freem(mrep);
  713 nfsmout:
  714         vap->va_type = vattr.va_type;
  715         vap->va_mode = vattr.va_mode;
  716         vap->va_nlink = vattr.va_nlink;
  717         vap->va_uid = vattr.va_uid;
  718         vap->va_gid = vattr.va_gid;
  719         vap->va_fsid = vattr.va_fsid;
  720         vap->va_fileid = vattr.va_fileid;
  721         vap->va_size = vattr.va_size;
  722         vap->va_blocksize = vattr.va_blocksize;
  723         vap->va_atime = vattr.va_atime;
  724         vap->va_mtime = vattr.va_mtime;
  725         vap->va_ctime = vattr.va_ctime;
  726         vap->va_gen = vattr.va_gen;
  727         vap->va_flags = vattr.va_flags;
  728         vap->va_rdev = vattr.va_rdev;
  729         vap->va_bytes = vattr.va_bytes;
  730         vap->va_filerev = vattr.va_filerev;
  731 
  732         return (error);
  733 }
  734 
  735 /*
  736  * nfs setattr call.
  737  */
  738 static int
  739 nfs_setattr(struct vop_setattr_args *ap)
  740 {
  741         struct vnode *vp = ap->a_vp;
  742         struct nfsnode *np = VTONFS(vp);
  743         struct vattr *vap = ap->a_vap;
  744         struct thread *td = curthread;
  745         int error = 0;
  746         u_quad_t tsize;
  747 
  748 #ifndef nolint
  749         tsize = (u_quad_t)0;
  750 #endif
  751 
  752         /*
  753          * Setting of flags is not supported.
  754          */
  755         if (vap->va_flags != VNOVAL)
  756                 return (EOPNOTSUPP);
  757 
  758         /*
  759          * Disallow write attempts if the filesystem is mounted read-only.
  760          */
  761         if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
  762             vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
  763             vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
  764             (vp->v_mount->mnt_flag & MNT_RDONLY)) {
  765                 error = EROFS;
  766                 goto out;
  767         }
  768         if (vap->va_size != VNOVAL) {
  769                 switch (vp->v_type) {
  770                 case VDIR:
  771                         return (EISDIR);
  772                 case VCHR:
  773                 case VBLK:
  774                 case VSOCK:
  775                 case VFIFO:
  776                         if (vap->va_mtime.tv_sec == VNOVAL &&
  777                             vap->va_atime.tv_sec == VNOVAL &&
  778                             vap->va_mode == (mode_t)VNOVAL &&
  779                             vap->va_uid == (uid_t)VNOVAL &&
  780                             vap->va_gid == (gid_t)VNOVAL)
  781                                 return (0);             
  782                         vap->va_size = VNOVAL;
  783                         break;
  784                 default:
  785                         /*
  786                          * Disallow write attempts if the filesystem is
  787                          * mounted read-only.
  788                          */
  789                         if (vp->v_mount->mnt_flag & MNT_RDONLY)
  790                                 return (EROFS);
  791                         /*
  792                          *  We run vnode_pager_setsize() early (why?),
  793                          * we must set np->n_size now to avoid vinvalbuf
  794                          * V_SAVE races that might setsize a lower
  795                          * value.
  796                          */
  797                         mtx_lock(&np->n_mtx);
  798                         tsize = np->n_size;
  799                         mtx_unlock(&np->n_mtx);
  800                         error = nfs_meta_setsize(vp, ap->a_cred, td,
  801                             vap->va_size);
  802                         mtx_lock(&np->n_mtx);
  803                         if (np->n_flag & NMODIFIED) {
  804                             tsize = np->n_size;
  805                             mtx_unlock(&np->n_mtx);
  806                             if (vap->va_size == 0)
  807                                 error = nfs_vinvalbuf(vp, 0, td, 1);
  808                             else
  809                                 error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
  810                             if (error) {
  811                                 vnode_pager_setsize(vp, tsize);
  812                                 goto out;
  813                             }
  814                         } else
  815                             mtx_unlock(&np->n_mtx);
  816                         /*
  817                          * np->n_size has already been set to vap->va_size
  818                          * in nfs_meta_setsize(). We must set it again since
  819                          * nfs_loadattrcache() could be called through
  820                          * nfs_meta_setsize() and could modify np->n_size.
  821                          */
  822                         mtx_lock(&np->n_mtx);
  823                         np->n_vattr.va_size = np->n_size = vap->va_size;
  824                         mtx_unlock(&np->n_mtx);
  825                 };
  826         } else {
  827                 mtx_lock(&np->n_mtx);
  828                 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && 
  829                     (np->n_flag & NMODIFIED) && vp->v_type == VREG) {
  830                         mtx_unlock(&np->n_mtx);
  831                         if ((error = nfs_vinvalbuf(vp, V_SAVE, td, 1)) != 0 &&
  832                             (error == EINTR || error == EIO))
  833                                 return error;
  834                 } else
  835                         mtx_unlock(&np->n_mtx);
  836         }
  837         error = nfs_setattrrpc(vp, vap, ap->a_cred);
  838         if (error && vap->va_size != VNOVAL) {
  839                 mtx_lock(&np->n_mtx);
  840                 np->n_size = np->n_vattr.va_size = tsize;
  841                 vnode_pager_setsize(vp, tsize);
  842                 mtx_unlock(&np->n_mtx);
  843         }
  844 out:
  845         return (error);
  846 }
  847 
  848 /*
  849  * Do an nfs setattr rpc.
  850  */
  851 static int
  852 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred)
  853 {
  854         struct nfsv2_sattr *sp;
  855         struct nfsnode *np = VTONFS(vp);
  856         caddr_t bpos, dpos;
  857         u_int32_t *tl;
  858         int error = 0, i, wccflag = NFSV3_WCCRATTR;
  859         struct mbuf *mreq, *mrep, *md, *mb;
  860         int v3 = NFS_ISV3(vp);
  861 
  862         nfsstats.rpccnt[NFSPROC_SETATTR]++;
  863         mreq = nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
  864         mb = mreq;
  865         bpos = mtod(mb, caddr_t);
  866         nfsm_fhtom(vp, v3);
  867         if (v3) {
  868                 nfsm_v3attrbuild(vap, TRUE);
  869                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
  870                 *tl = nfs_false;
  871         } else {
  872                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
  873                 if (vap->va_mode == (mode_t)VNOVAL)
  874                         sp->sa_mode = nfs_xdrneg1;
  875                 else
  876                         sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
  877                 if (vap->va_uid == (uid_t)VNOVAL)
  878                         sp->sa_uid = nfs_xdrneg1;
  879                 else
  880                         sp->sa_uid = txdr_unsigned(vap->va_uid);
  881                 if (vap->va_gid == (gid_t)VNOVAL)
  882                         sp->sa_gid = nfs_xdrneg1;
  883                 else
  884                         sp->sa_gid = txdr_unsigned(vap->va_gid);
  885                 sp->sa_size = txdr_unsigned(vap->va_size);
  886                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
  887                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
  888         }
  889         nfsm_request(vp, NFSPROC_SETATTR, curthread, cred);
  890         if (v3) {
  891                 mtx_lock(&np->n_mtx);
  892                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
  893                         np->n_accesscache[i].stamp = 0;
  894                 mtx_unlock(&np->n_mtx);
  895                 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
  896                 nfsm_wcc_data(vp, wccflag);
  897         } else
  898                 nfsm_loadattr(vp, NULL);
  899         m_freem(mrep);
  900 nfsmout:
  901         return (error);
  902 }
  903 
  904 /*
  905  * nfs lookup call, one step at a time...
  906  * First look in cache
  907  * If not found, unlock the directory nfsnode and do the rpc
  908  */
  909 static int
  910 nfs_lookup(struct vop_lookup_args *ap)
  911 {
  912         struct componentname *cnp = ap->a_cnp;
  913         struct vnode *dvp = ap->a_dvp;
  914         struct vnode **vpp = ap->a_vpp;
  915         struct mount *mp = dvp->v_mount;
  916         struct vattr vattr;
  917         struct timespec dmtime;
  918         int flags = cnp->cn_flags;
  919         struct vnode *newvp;
  920         struct nfsmount *nmp;
  921         caddr_t bpos, dpos;
  922         struct mbuf *mreq, *mrep, *md, *mb;
  923         long len;
  924         nfsfh_t *fhp;
  925         struct nfsnode *np, *newnp;
  926         int error = 0, attrflag, fhsize, ltype;
  927         int v3 = NFS_ISV3(dvp);
  928         struct thread *td = cnp->cn_thread;
  929 
  930         *vpp = NULLVP;
  931         if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) &&
  932             (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
  933                 return (EROFS);
  934         if (dvp->v_type != VDIR)
  935                 return (ENOTDIR);
  936         nmp = VFSTONFS(mp);
  937         np = VTONFS(dvp);
  938         if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) {
  939                 *vpp = NULLVP;
  940                 return (error);
  941         }
  942         error = cache_lookup(dvp, vpp, cnp);
  943         if (error > 0 && error != ENOENT)
  944                 return (error);
  945         if (error == -1) {
  946                 /*
  947                  * We only accept a positive hit in the cache if the
  948                  * change time of the file matches our cached copy.
  949                  * Otherwise, we discard the cache entry and fallback
  950                  * to doing a lookup RPC.
  951                  *
  952                  * To better handle stale file handles and attributes,
  953                  * clear the attribute cache of this node if it is a
  954                  * leaf component, part of an open() call, and not
  955                  * locally modified before fetching the attributes.
  956                  * This should allow stale file handles to be detected
  957                  * here where we can fall back to a LOOKUP RPC to
  958                  * recover rather than having nfs_open() detect the
  959                  * stale file handle and failing open(2) with ESTALE.
  960                  */
  961                 newvp = *vpp;
  962                 newnp = VTONFS(newvp);
  963                 if (!(nmp->nm_flag & NFSMNT_NOCTO) &&
  964                     (flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
  965                     !(newnp->n_flag & NMODIFIED)) {
  966                         mtx_lock(&newnp->n_mtx);
  967                         newnp->n_attrstamp = 0;
  968                         KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
  969                         mtx_unlock(&newnp->n_mtx);
  970                 }
  971                 if (VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 &&
  972                     timespeccmp(&vattr.va_ctime, &newnp->n_ctime, ==)) {
  973                         nfsstats.lookupcache_hits++;
  974                         if (cnp->cn_nameiop != LOOKUP &&
  975                             (flags & ISLASTCN))
  976                                 cnp->cn_flags |= SAVENAME;
  977                         return (0);
  978                 }
  979                 cache_purge(newvp);
  980                 if (dvp != newvp)
  981                         vput(newvp);
  982                 else 
  983                         vrele(newvp);
  984                 *vpp = NULLVP;
  985         } else if (error == ENOENT) {
  986                 if (dvp->v_iflag & VI_DOOMED)
  987                         return (ENOENT);
  988                 /*
  989                  * We only accept a negative hit in the cache if the
  990                  * modification time of the parent directory matches
  991                  * our cached copy.  Otherwise, we discard all of the
  992                  * negative cache entries for this directory. We also
  993                  * only trust -ve cache entries for less than
  994                  * nm_negative_namecache_timeout seconds.
  995                  */
  996                 if ((u_int)(ticks - np->n_dmtime_ticks) <
  997                     (nmp->nm_negnametimeo * hz) &&
  998                     VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 &&
  999                     timespeccmp(&vattr.va_mtime, &np->n_dmtime, ==)) {
 1000                         nfsstats.lookupcache_hits++;
 1001                         return (ENOENT);
 1002                 }
 1003                 cache_purge_negative(dvp);
 1004                 mtx_lock(&np->n_mtx);
 1005                 timespecclear(&np->n_dmtime);
 1006                 mtx_unlock(&np->n_mtx);
 1007         }
 1008 
 1009         /*
 1010          * Cache the modification time of the parent directory in case
 1011          * the lookup fails and results in adding the first negative
 1012          * name cache entry for the directory.  Since this is reading
 1013          * a single time_t, don't bother with locking.  The
 1014          * modification time may be a bit stale, but it must be read
 1015          * before performing the lookup RPC to prevent a race where
 1016          * another lookup updates the timestamp on the directory after
 1017          * the lookup RPC has been performed on the server but before
 1018          * n_dmtime is set at the end of this function.
 1019          */
 1020         dmtime = np->n_vattr.va_mtime;
 1021         error = 0;
 1022         newvp = NULLVP;
 1023         nfsstats.lookupcache_misses++;
 1024         nfsstats.rpccnt[NFSPROC_LOOKUP]++;
 1025         len = cnp->cn_namelen;
 1026         mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
 1027                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
 1028         mb = mreq;
 1029         bpos = mtod(mb, caddr_t);
 1030         nfsm_fhtom(dvp, v3);
 1031         nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
 1032         nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_thread, cnp->cn_cred);
 1033         if (error) {
 1034                 if (v3) {
 1035                         nfsm_postop_attr(dvp, attrflag);
 1036                         m_freem(mrep);
 1037                 }
 1038                 goto nfsmout;
 1039         }
 1040         nfsm_getfh(fhp, fhsize, v3);
 1041 
 1042         /*
 1043          * Handle RENAME case...
 1044          */
 1045         if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
 1046                 if (NFS_CMPFH(np, fhp, fhsize)) {
 1047                         m_freem(mrep);
 1048                         return (EISDIR);
 1049                 }
 1050                 error = nfs_nget(mp, fhp, fhsize, &np, LK_EXCLUSIVE);
 1051                 if (error) {
 1052                         m_freem(mrep);
 1053                         return (error);
 1054                 }
 1055                 newvp = NFSTOV(np);
 1056                 if (v3) {
 1057                         nfsm_postop_attr(newvp, attrflag);
 1058                         nfsm_postop_attr(dvp, attrflag);
 1059                 } else
 1060                         nfsm_loadattr(newvp, NULL);
 1061                 *vpp = newvp;
 1062                 m_freem(mrep);
 1063                 cnp->cn_flags |= SAVENAME;
 1064                 return (0);
 1065         }
 1066 
 1067         if (flags & ISDOTDOT) {
 1068                 ltype = VOP_ISLOCKED(dvp);
 1069                 error = vfs_busy(mp, MBF_NOWAIT);
 1070                 if (error != 0) {
 1071                         vfs_ref(mp);
 1072                         VOP_UNLOCK(dvp, 0);
 1073                         error = vfs_busy(mp, 0);
 1074                         vn_lock(dvp, ltype | LK_RETRY);
 1075                         vfs_rel(mp);
 1076                         if (error == 0 && (dvp->v_iflag & VI_DOOMED)) {
 1077                                 vfs_unbusy(mp);
 1078                                 error = ENOENT;
 1079                         }
 1080                         if (error != 0) {
 1081                                 m_freem(mrep);
 1082                                 return (error);
 1083                         }
 1084                 }
 1085                 VOP_UNLOCK(dvp, 0);
 1086                 error = nfs_nget(mp, fhp, fhsize, &np, cnp->cn_lkflags);
 1087                 if (error == 0)
 1088                         newvp = NFSTOV(np);
 1089                 vfs_unbusy(mp);
 1090                 if (newvp != dvp)
 1091                         vn_lock(dvp, ltype | LK_RETRY);
 1092                 if (dvp->v_iflag & VI_DOOMED) {
 1093                         if (error == 0) {
 1094                                 if (newvp == dvp)
 1095                                         vrele(newvp);
 1096                                 else
 1097                                         vput(newvp);
 1098                         }
 1099                         error = ENOENT;
 1100                 }
 1101                 if (error) {
 1102                         m_freem(mrep);
 1103                         return (error);
 1104                 }
 1105         } else if (NFS_CMPFH(np, fhp, fhsize)) {
 1106                 VREF(dvp);
 1107                 newvp = dvp;
 1108         } else {
 1109                 error = nfs_nget(mp, fhp, fhsize, &np, cnp->cn_lkflags);
 1110                 if (error) {
 1111                         m_freem(mrep);
 1112                         return (error);
 1113                 }
 1114                 newvp = NFSTOV(np);
 1115 
 1116                 /*
 1117                  * Flush the attribute cache when opening a leaf node
 1118                  * to ensure that fresh attributes are fetched in
 1119                  * nfs_open() if we are unable to fetch attributes
 1120                  * from the LOOKUP reply.
 1121                  */
 1122                 if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
 1123                     !(np->n_flag & NMODIFIED)) {
 1124                         mtx_lock(&np->n_mtx);
 1125                         np->n_attrstamp = 0;
 1126                         KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
 1127                         mtx_unlock(&np->n_mtx);
 1128                 }
 1129         }
 1130         if (v3) {
 1131                 nfsm_postop_attr(newvp, attrflag);
 1132                 nfsm_postop_attr(dvp, attrflag);
 1133         } else
 1134                 nfsm_loadattr(newvp, NULL);
 1135         if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
 1136                 cnp->cn_flags |= SAVENAME;
 1137         if ((cnp->cn_flags & MAKEENTRY) &&
 1138             (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
 1139                 np->n_ctime = np->n_vattr.va_ctime;
 1140                 cache_enter(dvp, newvp, cnp);
 1141         }
 1142         *vpp = newvp;
 1143         m_freem(mrep);
 1144 nfsmout:
 1145         if (error) {
 1146                 if (newvp != NULLVP) {
 1147                         vput(newvp);
 1148                         *vpp = NULLVP;
 1149                 }
 1150 
 1151                 if (error != ENOENT)
 1152                         goto done;
 1153 
 1154                 /* The requested file was not found. */
 1155                 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
 1156                     (flags & ISLASTCN)) {
 1157                         /*
 1158                          * XXX: UFS does a full VOP_ACCESS(dvp,
 1159                          * VWRITE) here instead of just checking
 1160                          * MNT_RDONLY.
 1161                          */
 1162                         if (mp->mnt_flag & MNT_RDONLY)
 1163                                 return (EROFS);
 1164                         cnp->cn_flags |= SAVENAME;
 1165                         return (EJUSTRETURN);
 1166                 }
 1167 
 1168                 if ((cnp->cn_flags & MAKEENTRY) && cnp->cn_nameiop != CREATE) {
 1169                         /*
 1170                          * Maintain n_dmtime as the modification time
 1171                          * of the parent directory when the oldest -ve
 1172                          * name cache entry for this directory was
 1173                          * added.  If a -ve cache entry has already
 1174                          * been added with a newer modification time
 1175                          * by a concurrent lookup, then don't bother
 1176                          * adding a cache entry.  The modification
 1177                          * time of the directory might have changed
 1178                          * due to the file this lookup failed to find
 1179                          * being created.  In that case a subsequent
 1180                          * lookup would incorrectly use the entry
 1181                          * added here instead of doing an extra
 1182                          * lookup.
 1183                          */
 1184                         mtx_lock(&np->n_mtx);
 1185                         if (timespeccmp(&np->n_dmtime, &dmtime, <=)) {
 1186                                 if (!timespecisset(&np->n_dmtime)) {
 1187                                         np->n_dmtime = dmtime;
 1188                                         np->n_dmtime_ticks = ticks;
 1189                                 }
 1190                                 mtx_unlock(&np->n_mtx);
 1191                                 cache_enter(dvp, NULL, cnp);
 1192                         } else
 1193                                 mtx_unlock(&np->n_mtx);
 1194                 }
 1195                 return (ENOENT);
 1196         }
 1197 done:
 1198         return (error);
 1199 }
 1200 
 1201 /*
 1202  * nfs read call.
 1203  * Just call nfs_bioread() to do the work.
 1204  */
 1205 static int
 1206 nfs_read(struct vop_read_args *ap)
 1207 {
 1208         struct vnode *vp = ap->a_vp;
 1209 
 1210         switch (vp->v_type) {
 1211         case VREG:
 1212                 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
 1213         case VDIR:
 1214                 return (EISDIR);
 1215         default:
 1216                 return (EOPNOTSUPP);
 1217         }
 1218 }
 1219 
 1220 /*
 1221  * nfs readlink call
 1222  */
 1223 static int
 1224 nfs_readlink(struct vop_readlink_args *ap)
 1225 {
 1226         struct vnode *vp = ap->a_vp;
 1227 
 1228         if (vp->v_type != VLNK)
 1229                 return (EINVAL);
 1230         return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
 1231 }
 1232 
 1233 /*
 1234  * Do a readlink rpc.
 1235  * Called by nfs_doio() from below the buffer cache.
 1236  */
 1237 int
 1238 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 1239 {
 1240         caddr_t bpos, dpos;
 1241         int error = 0, len, attrflag;
 1242         struct mbuf *mreq, *mrep, *md, *mb;
 1243         int v3 = NFS_ISV3(vp);
 1244 
 1245         nfsstats.rpccnt[NFSPROC_READLINK]++;
 1246         mreq = nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
 1247         mb = mreq;
 1248         bpos = mtod(mb, caddr_t);
 1249         nfsm_fhtom(vp, v3);
 1250         nfsm_request(vp, NFSPROC_READLINK, uiop->uio_td, cred);
 1251         if (v3)
 1252                 nfsm_postop_attr(vp, attrflag);
 1253         if (!error) {
 1254                 nfsm_strsiz(len, NFS_MAXPATHLEN);
 1255                 if (len == NFS_MAXPATHLEN) {
 1256                         struct nfsnode *np = VTONFS(vp);
 1257                         mtx_lock(&np->n_mtx);
 1258                         if (np->n_size && np->n_size < NFS_MAXPATHLEN)
 1259                                 len = np->n_size;
 1260                         mtx_unlock(&np->n_mtx);
 1261                 }
 1262                 nfsm_mtouio(uiop, len);
 1263         }
 1264         m_freem(mrep);
 1265 nfsmout:
 1266         return (error);
 1267 }
 1268 
 1269 /*
 1270  * nfs read rpc call
 1271  * Ditto above
 1272  */
 1273 int
 1274 nfs_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 1275 {
 1276         u_int32_t *tl;
 1277         caddr_t bpos, dpos;
 1278         struct mbuf *mreq, *mrep, *md, *mb;
 1279         struct nfsmount *nmp;
 1280         off_t end;
 1281         int error = 0, len, retlen, tsiz, eof, attrflag;
 1282         int v3 = NFS_ISV3(vp);
 1283         int rsize;
 1284 
 1285 #ifndef nolint
 1286         eof = 0;
 1287 #endif
 1288         nmp = VFSTONFS(vp->v_mount);
 1289         tsiz = uiop->uio_resid;
 1290         mtx_lock(&nmp->nm_mtx);
 1291         end = uiop->uio_offset + tsiz;
 1292         if (end > nmp->nm_maxfilesize || end < uiop->uio_offset) {
 1293                 mtx_unlock(&nmp->nm_mtx);
 1294                 return (EFBIG);
 1295         }
 1296         rsize = nmp->nm_rsize;
 1297         mtx_unlock(&nmp->nm_mtx);
 1298         while (tsiz > 0) {
 1299                 nfsstats.rpccnt[NFSPROC_READ]++;
 1300                 len = (tsiz > rsize) ? rsize : tsiz;
 1301                 mreq = nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
 1302                 mb = mreq;
 1303                 bpos = mtod(mb, caddr_t);
 1304                 nfsm_fhtom(vp, v3);
 1305                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED * 3);
 1306                 if (v3) {
 1307                         txdr_hyper(uiop->uio_offset, tl);
 1308                         *(tl + 2) = txdr_unsigned(len);
 1309                 } else {
 1310                         *tl++ = txdr_unsigned(uiop->uio_offset);
 1311                         *tl++ = txdr_unsigned(len);
 1312                         *tl = 0;
 1313                 }
 1314                 nfsm_request(vp, NFSPROC_READ, uiop->uio_td, cred);
 1315                 if (v3) {
 1316                         nfsm_postop_attr(vp, attrflag);
 1317                         if (error) {
 1318                                 m_freem(mrep);
 1319                                 goto nfsmout;
 1320                         }
 1321                         tl = nfsm_dissect(u_int32_t *, 2 * NFSX_UNSIGNED);
 1322                         eof = fxdr_unsigned(int, *(tl + 1));
 1323                 } else {
 1324                         nfsm_loadattr(vp, NULL);
 1325                 }
 1326                 nfsm_strsiz(retlen, rsize);
 1327                 nfsm_mtouio(uiop, retlen);
 1328                 m_freem(mrep);
 1329                 tsiz -= retlen;
 1330                 if (v3) {
 1331                         if (eof || retlen == 0) {
 1332                                 tsiz = 0;
 1333                         }
 1334                 } else if (retlen < len) {
 1335                         tsiz = 0;
 1336                 }
 1337         }
 1338 nfsmout:
 1339         return (error);
 1340 }
 1341 
 1342 /*
 1343  * nfs write call
 1344  */
 1345 int
 1346 nfs_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
 1347              int *iomode, int *must_commit)
 1348 {
 1349         u_int32_t *tl;
 1350         int32_t backup;
 1351         caddr_t bpos, dpos;
 1352         struct mbuf *mreq, *mrep, *md, *mb;
 1353         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 1354         off_t end;
 1355         int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
 1356         int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
 1357         int wsize;
 1358         
 1359         KASSERT(uiop->uio_iovcnt == 1, ("nfs: writerpc iovcnt > 1"));
 1360         *must_commit = 0;
 1361         tsiz = uiop->uio_resid;
 1362         mtx_lock(&nmp->nm_mtx);
 1363         end = uiop->uio_offset + tsiz;
 1364         if (end > nmp->nm_maxfilesize || end < uiop->uio_offset) {
 1365                 mtx_unlock(&nmp->nm_mtx);               
 1366                 return (EFBIG);
 1367         }
 1368         wsize = nmp->nm_wsize;
 1369         mtx_unlock(&nmp->nm_mtx);
 1370         while (tsiz > 0) {
 1371                 nfsstats.rpccnt[NFSPROC_WRITE]++;
 1372                 len = (tsiz > wsize) ? wsize : tsiz;
 1373                 mreq = nfsm_reqhead(vp, NFSPROC_WRITE,
 1374                         NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
 1375                 mb = mreq;
 1376                 bpos = mtod(mb, caddr_t);
 1377                 nfsm_fhtom(vp, v3);
 1378                 if (v3) {
 1379                         tl = nfsm_build(u_int32_t *, 5 * NFSX_UNSIGNED);
 1380                         txdr_hyper(uiop->uio_offset, tl);
 1381                         tl += 2;
 1382                         *tl++ = txdr_unsigned(len);
 1383                         *tl++ = txdr_unsigned(*iomode);
 1384                         *tl = txdr_unsigned(len);
 1385                 } else {
 1386                         u_int32_t x;
 1387 
 1388                         tl = nfsm_build(u_int32_t *, 4 * NFSX_UNSIGNED);
 1389                         /* Set both "begin" and "current" to non-garbage. */
 1390                         x = txdr_unsigned((u_int32_t)uiop->uio_offset);
 1391                         *tl++ = x;      /* "begin offset" */
 1392                         *tl++ = x;      /* "current offset" */
 1393                         x = txdr_unsigned(len);
 1394                         *tl++ = x;      /* total to this offset */
 1395                         *tl = x;        /* size of this write */
 1396                 }
 1397                 nfsm_uiotom(uiop, len);
 1398                 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_td, cred);
 1399                 if (v3) {
 1400                         wccflag = NFSV3_WCCCHK;
 1401                         nfsm_wcc_data(vp, wccflag);
 1402                         if (!error) {
 1403                                 tl = nfsm_dissect(u_int32_t *, 2 * NFSX_UNSIGNED
 1404                                         + NFSX_V3WRITEVERF);
 1405                                 rlen = fxdr_unsigned(int, *tl++);
 1406                                 if (rlen == 0) {
 1407                                         error = NFSERR_IO;
 1408                                         m_freem(mrep);
 1409                                         break;
 1410                                 } else if (rlen < len) {
 1411                                         backup = len - rlen;
 1412                                         uiop->uio_iov->iov_base =
 1413                                             (char *)uiop->uio_iov->iov_base -
 1414                                             backup;
 1415                                         uiop->uio_iov->iov_len += backup;
 1416                                         uiop->uio_offset -= backup;
 1417                                         uiop->uio_resid += backup;
 1418                                         len = rlen;
 1419                                 }
 1420                                 commit = fxdr_unsigned(int, *tl++);
 1421 
 1422                                 /*
 1423                                  * Return the lowest committment level
 1424                                  * obtained by any of the RPCs.
 1425                                  */
 1426                                 if (committed == NFSV3WRITE_FILESYNC)
 1427                                         committed = commit;
 1428                                 else if (committed == NFSV3WRITE_DATASYNC &&
 1429                                         commit == NFSV3WRITE_UNSTABLE)
 1430                                         committed = commit;
 1431                                 mtx_lock(&nmp->nm_mtx);
 1432                                 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
 1433                                     bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
 1434                                         NFSX_V3WRITEVERF);
 1435                                     nmp->nm_state |= NFSSTA_HASWRITEVERF;
 1436                                 } else if (bcmp((caddr_t)tl,
 1437                                     (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
 1438                                     *must_commit = 1;
 1439                                     bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
 1440                                         NFSX_V3WRITEVERF);
 1441                                 }
 1442                                 mtx_unlock(&nmp->nm_mtx);
 1443                         }
 1444                 } else {
 1445                         nfsm_loadattr(vp, NULL);
 1446                 }
 1447                 if (wccflag) {
 1448                         mtx_lock(&(VTONFS(vp))->n_mtx);
 1449                         VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime;
 1450                         mtx_unlock(&(VTONFS(vp))->n_mtx);
 1451                 }
 1452                 m_freem(mrep);
 1453                 if (error)
 1454                         break;
 1455                 tsiz -= len;
 1456         }
 1457 nfsmout:
 1458         if (DOINGASYNC(vp))
 1459                 committed = NFSV3WRITE_FILESYNC;
 1460         *iomode = committed;
 1461         if (error)
 1462                 uiop->uio_resid = tsiz;
 1463         return (error);
 1464 }
 1465 
 1466 /*
 1467  * nfs mknod rpc
 1468  * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
 1469  * mode set to specify the file type and the size field for rdev.
 1470  */
 1471 static int
 1472 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
 1473     struct vattr *vap)
 1474 {
 1475         struct nfsv2_sattr *sp;
 1476         u_int32_t *tl;
 1477         struct vnode *newvp = NULL;
 1478         struct nfsnode *np = NULL;
 1479         struct vattr vattr;
 1480         caddr_t bpos, dpos;
 1481         int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
 1482         struct mbuf *mreq, *mrep, *md, *mb;
 1483         u_int32_t rdev;
 1484         int v3 = NFS_ISV3(dvp);
 1485 
 1486         if (vap->va_type == VCHR || vap->va_type == VBLK)
 1487                 rdev = txdr_unsigned(vap->va_rdev);
 1488         else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
 1489                 rdev = nfs_xdrneg1;
 1490         else {
 1491                 return (EOPNOTSUPP);
 1492         }
 1493         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
 1494                 return (error);
 1495         nfsstats.rpccnt[NFSPROC_MKNOD]++;
 1496         mreq = nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
 1497                 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
 1498         mb = mreq;
 1499         bpos = mtod(mb, caddr_t);
 1500         nfsm_fhtom(dvp, v3);
 1501         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 1502         if (v3) {
 1503                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
 1504                 *tl++ = vtonfsv3_type(vap->va_type);
 1505                 nfsm_v3attrbuild(vap, FALSE);
 1506                 if (vap->va_type == VCHR || vap->va_type == VBLK) {
 1507                         tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
 1508                         *tl++ = txdr_unsigned(major(vap->va_rdev));
 1509                         *tl = txdr_unsigned(minor(vap->va_rdev));
 1510                 }
 1511         } else {
 1512                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 1513                 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
 1514                 sp->sa_uid = nfs_xdrneg1;
 1515                 sp->sa_gid = nfs_xdrneg1;
 1516                 sp->sa_size = rdev;
 1517                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 1518                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 1519         }
 1520         nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_thread, cnp->cn_cred);
 1521         if (!error) {
 1522                 nfsm_mtofh(dvp, newvp, v3, gotvp);
 1523                 if (!gotvp) {
 1524                         if (newvp) {
 1525                                 vput(newvp);
 1526                                 newvp = NULL;
 1527                         }
 1528                         error = nfs_lookitup(dvp, cnp->cn_nameptr,
 1529                             cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
 1530                         if (!error)
 1531                                 newvp = NFSTOV(np);
 1532                 }
 1533         }
 1534         if (v3)
 1535                 nfsm_wcc_data(dvp, wccflag);
 1536         m_freem(mrep);
 1537 nfsmout:
 1538         if (error) {
 1539                 if (newvp)
 1540                         vput(newvp);
 1541         } else {
 1542                 if (cnp->cn_flags & MAKEENTRY)
 1543                         cache_enter(dvp, newvp, cnp);
 1544                 *vpp = newvp;
 1545         }
 1546         mtx_lock(&(VTONFS(dvp))->n_mtx);
 1547         VTONFS(dvp)->n_flag |= NMODIFIED;
 1548         if (!wccflag) {
 1549                 VTONFS(dvp)->n_attrstamp = 0;
 1550                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1551         }
 1552         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 1553         return (error);
 1554 }
 1555 
 1556 /*
 1557  * nfs mknod vop
 1558  * just call nfs_mknodrpc() to do the work.
 1559  */
 1560 /* ARGSUSED */
 1561 static int
 1562 nfs_mknod(struct vop_mknod_args *ap)
 1563 {
 1564         return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap));
 1565 }
 1566 
 1567 static u_long create_verf;
 1568 /*
 1569  * nfs file create call
 1570  */
 1571 static int
 1572 nfs_create(struct vop_create_args *ap)
 1573 {
 1574         struct vnode *dvp = ap->a_dvp;
 1575         struct vattr *vap = ap->a_vap;
 1576         struct componentname *cnp = ap->a_cnp;
 1577         struct nfsv2_sattr *sp;
 1578         u_int32_t *tl;
 1579         struct nfsnode *np = NULL;
 1580         struct vnode *newvp = NULL;
 1581         caddr_t bpos, dpos;
 1582         int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
 1583         struct mbuf *mreq, *mrep, *md, *mb;
 1584         struct vattr vattr;
 1585         int v3 = NFS_ISV3(dvp);
 1586 
 1587         /*
 1588          * Oops, not for me..
 1589          */
 1590         if (vap->va_type == VSOCK) {
 1591                 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap);
 1592                 return (error);
 1593         }
 1594 
 1595         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0) {
 1596                 return (error);
 1597         }
 1598         if (vap->va_vaflags & VA_EXCLUSIVE)
 1599                 fmode |= O_EXCL;
 1600 again:
 1601         nfsstats.rpccnt[NFSPROC_CREATE]++;
 1602         mreq = nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
 1603                 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
 1604         mb = mreq;
 1605         bpos = mtod(mb, caddr_t);
 1606         nfsm_fhtom(dvp, v3);
 1607         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 1608         if (v3) {
 1609                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
 1610                 if (fmode & O_EXCL) {
 1611                         *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
 1612                         tl = nfsm_build(u_int32_t *, NFSX_V3CREATEVERF);
 1613 #ifdef INET
 1614                         CURVNET_SET(CRED_TO_VNET(cnp->cn_cred));
 1615                         IN_IFADDR_RLOCK();
 1616                         if (!TAILQ_EMPTY(&V_in_ifaddrhead))
 1617                                 *tl++ = IA_SIN(TAILQ_FIRST(&V_in_ifaddrhead))->sin_addr.s_addr;
 1618                         else
 1619 #endif
 1620                                 *tl++ = create_verf;
 1621 #ifdef INET
 1622                         IN_IFADDR_RUNLOCK();
 1623                         CURVNET_RESTORE();
 1624 #endif
 1625                         *tl = ++create_verf;
 1626                 } else {
 1627                         *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
 1628                         nfsm_v3attrbuild(vap, FALSE);
 1629                 }
 1630         } else {
 1631                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 1632                 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
 1633                 sp->sa_uid = nfs_xdrneg1;
 1634                 sp->sa_gid = nfs_xdrneg1;
 1635                 sp->sa_size = 0;
 1636                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 1637                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 1638         }
 1639         nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_thread, cnp->cn_cred);
 1640         if (!error) {
 1641                 nfsm_mtofh(dvp, newvp, v3, gotvp);
 1642                 if (!gotvp) {
 1643                         if (newvp) {
 1644                                 vput(newvp);
 1645                                 newvp = NULL;
 1646                         }
 1647                         error = nfs_lookitup(dvp, cnp->cn_nameptr,
 1648                             cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
 1649                         if (!error)
 1650                                 newvp = NFSTOV(np);
 1651                 }
 1652         }
 1653         if (v3)
 1654                 nfsm_wcc_data(dvp, wccflag);
 1655         m_freem(mrep);
 1656 nfsmout:
 1657         if (error) {
 1658                 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
 1659                         fmode &= ~O_EXCL;
 1660                         goto again;
 1661                 }
 1662                 if (newvp)
 1663                         vput(newvp);
 1664         } else if (v3 && (fmode & O_EXCL)) {
 1665                 /*
 1666                  * We are normally called with only a partially initialized
 1667                  * VAP.  Since the NFSv3 spec says that server may use the
 1668                  * file attributes to store the verifier, the spec requires
 1669                  * us to do a SETATTR RPC. FreeBSD servers store the verifier
 1670                  * in atime, but we can't really assume that all servers will
 1671                  * so we ensure that our SETATTR sets both atime and mtime.
 1672                  */
 1673                 if (vap->va_mtime.tv_sec == VNOVAL)
 1674                         vfs_timestamp(&vap->va_mtime);
 1675                 if (vap->va_atime.tv_sec == VNOVAL)
 1676                         vap->va_atime = vap->va_mtime;
 1677                 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred);
 1678                 if (error)
 1679                         vput(newvp);
 1680         }
 1681         if (!error) {
 1682                 if (cnp->cn_flags & MAKEENTRY)
 1683                         cache_enter(dvp, newvp, cnp);
 1684                 *ap->a_vpp = newvp;
 1685         }
 1686         mtx_lock(&(VTONFS(dvp))->n_mtx);
 1687         VTONFS(dvp)->n_flag |= NMODIFIED;
 1688         if (!wccflag) {
 1689                 VTONFS(dvp)->n_attrstamp = 0;
 1690                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1691         }
 1692         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 1693         return (error);
 1694 }
 1695 
 1696 /*
 1697  * nfs file remove call
 1698  * To try and make nfs semantics closer to ufs semantics, a file that has
 1699  * other processes using the vnode is renamed instead of removed and then
 1700  * removed later on the last close.
 1701  * - If v_usecount > 1
 1702  *        If a rename is not already in the works
 1703  *           call nfs_sillyrename() to set it up
 1704  *     else
 1705  *        do the remove rpc
 1706  */
 1707 static int
 1708 nfs_remove(struct vop_remove_args *ap)
 1709 {
 1710         struct vnode *vp = ap->a_vp;
 1711         struct vnode *dvp = ap->a_dvp;
 1712         struct componentname *cnp = ap->a_cnp;
 1713         struct nfsnode *np = VTONFS(vp);
 1714         int error = 0;
 1715         struct vattr vattr;
 1716 
 1717         KASSERT((cnp->cn_flags & HASBUF) != 0, ("nfs_remove: no name"));
 1718         KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount"));
 1719         if (vp->v_type == VDIR)
 1720                 error = EPERM;
 1721         else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
 1722             !VOP_GETATTR(vp, &vattr, cnp->cn_cred) && vattr.va_nlink > 1)) {
 1723                 /*
 1724                  * Purge the name cache so that the chance of a lookup for
 1725                  * the name succeeding while the remove is in progress is
 1726                  * minimized. Without node locking it can still happen, such
 1727                  * that an I/O op returns ESTALE, but since you get this if
 1728                  * another host removes the file..
 1729                  */
 1730                 cache_purge(vp);
 1731                 /*
 1732                  * throw away biocache buffers, mainly to avoid
 1733                  * unnecessary delayed writes later.
 1734                  */
 1735                 error = nfs_vinvalbuf(vp, 0, cnp->cn_thread, 1);
 1736                 /* Do the rpc */
 1737                 if (error != EINTR && error != EIO)
 1738                         error = nfs_removerpc(dvp, cnp->cn_nameptr,
 1739                                 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread);
 1740                 /*
 1741                  * Kludge City: If the first reply to the remove rpc is lost..
 1742                  *   the reply to the retransmitted request will be ENOENT
 1743                  *   since the file was in fact removed
 1744                  *   Therefore, we cheat and return success.
 1745                  */
 1746                 if (error == ENOENT)
 1747                         error = 0;
 1748         } else if (!np->n_sillyrename)
 1749                 error = nfs_sillyrename(dvp, vp, cnp);
 1750         mtx_lock(&np->n_mtx);
 1751         np->n_attrstamp = 0;
 1752         mtx_unlock(&np->n_mtx);
 1753         KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
 1754         return (error);
 1755 }
 1756 
 1757 /*
 1758  * nfs file remove rpc called from nfs_inactive
 1759  */
 1760 int
 1761 nfs_removeit(struct sillyrename *sp)
 1762 {
 1763         /*
 1764          * Make sure that the directory vnode is still valid.
 1765          * XXX we should lock sp->s_dvp here.
 1766          */
 1767         if (sp->s_dvp->v_type == VBAD)
 1768                 return (0);
 1769         return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
 1770                 NULL));
 1771 }
 1772 
 1773 /*
 1774  * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
 1775  */
 1776 static int
 1777 nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
 1778     struct ucred *cred, struct thread *td)
 1779 {
 1780         caddr_t bpos, dpos;
 1781         int error = 0, wccflag = NFSV3_WCCRATTR;
 1782         struct mbuf *mreq, *mrep, *md, *mb;
 1783         int v3 = NFS_ISV3(dvp);
 1784 
 1785         nfsstats.rpccnt[NFSPROC_REMOVE]++;
 1786         mreq = nfsm_reqhead(dvp, NFSPROC_REMOVE,
 1787                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
 1788         mb = mreq;
 1789         bpos = mtod(mb, caddr_t);
 1790         nfsm_fhtom(dvp, v3);
 1791         nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
 1792         nfsm_request(dvp, NFSPROC_REMOVE, td, cred);
 1793         if (v3)
 1794                 nfsm_wcc_data(dvp, wccflag);
 1795         m_freem(mrep);
 1796 nfsmout:
 1797         mtx_lock(&(VTONFS(dvp))->n_mtx);
 1798         VTONFS(dvp)->n_flag |= NMODIFIED;
 1799         if (!wccflag) {
 1800                 VTONFS(dvp)->n_attrstamp = 0;
 1801                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1802         }
 1803         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 1804         return (error);
 1805 }
 1806 
 1807 /*
 1808  * nfs file rename call
 1809  */
 1810 static int
 1811 nfs_rename(struct vop_rename_args *ap)
 1812 {
 1813         struct vnode *fvp = ap->a_fvp;
 1814         struct vnode *tvp = ap->a_tvp;
 1815         struct vnode *fdvp = ap->a_fdvp;
 1816         struct vnode *tdvp = ap->a_tdvp;
 1817         struct componentname *tcnp = ap->a_tcnp;
 1818         struct componentname *fcnp = ap->a_fcnp;
 1819         int error;
 1820 
 1821         KASSERT((tcnp->cn_flags & HASBUF) != 0 &&
 1822             (fcnp->cn_flags & HASBUF) != 0, ("nfs_rename: no name"));
 1823         /* Check for cross-device rename */
 1824         if ((fvp->v_mount != tdvp->v_mount) ||
 1825             (tvp && (fvp->v_mount != tvp->v_mount))) {
 1826                 error = EXDEV;
 1827                 goto out;
 1828         }
 1829 
 1830         if (fvp == tvp) {
 1831                 nfs_printf("nfs_rename: fvp == tvp (can't happen)\n");
 1832                 error = 0;
 1833                 goto out;
 1834         }
 1835         if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
 1836                 goto out;
 1837 
 1838         /*
 1839          * We have to flush B_DELWRI data prior to renaming
 1840          * the file.  If we don't, the delayed-write buffers
 1841          * can be flushed out later after the file has gone stale
 1842          * under NFSV3.  NFSV2 does not have this problem because
 1843          * ( as far as I can tell ) it flushes dirty buffers more
 1844          * often.
 1845          * 
 1846          * Skip the rename operation if the fsync fails, this can happen
 1847          * due to the server's volume being full, when we pushed out data
 1848          * that was written back to our cache earlier. Not checking for
 1849          * this condition can result in potential (silent) data loss.
 1850          */
 1851         error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread);
 1852         VOP_UNLOCK(fvp, 0);
 1853         if (!error && tvp)
 1854                 error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread);
 1855         if (error)
 1856                 goto out;
 1857 
 1858         /*
 1859          * If the tvp exists and is in use, sillyrename it before doing the
 1860          * rename of the new file over it.
 1861          * XXX Can't sillyrename a directory.
 1862          */
 1863         if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
 1864                 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
 1865                 vput(tvp);
 1866                 tvp = NULL;
 1867         }
 1868 
 1869         error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
 1870                 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
 1871                 tcnp->cn_thread);
 1872 
 1873         if (fvp->v_type == VDIR) {
 1874                 if (tvp != NULL && tvp->v_type == VDIR)
 1875                         cache_purge(tdvp);
 1876                 cache_purge(fdvp);
 1877         }
 1878 
 1879 out:
 1880         if (tdvp == tvp)
 1881                 vrele(tdvp);
 1882         else
 1883                 vput(tdvp);
 1884         if (tvp)
 1885                 vput(tvp);
 1886         vrele(fdvp);
 1887         vrele(fvp);
 1888         /*
 1889          * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
 1890          */
 1891         if (error == ENOENT)
 1892                 error = 0;
 1893         return (error);
 1894 }
 1895 
 1896 /*
 1897  * nfs file rename rpc called from nfs_remove() above
 1898  */
 1899 static int
 1900 nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
 1901     struct sillyrename *sp)
 1902 {
 1903 
 1904         return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, sdvp,
 1905             sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_thread));
 1906 }
 1907 
 1908 /*
 1909  * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
 1910  */
 1911 static int
 1912 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen,
 1913     struct vnode *tdvp, const char *tnameptr, int tnamelen, struct ucred *cred,
 1914     struct thread *td)
 1915 {
 1916         caddr_t bpos, dpos;
 1917         int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
 1918         struct mbuf *mreq, *mrep, *md, *mb;
 1919         int v3 = NFS_ISV3(fdvp);
 1920 
 1921         nfsstats.rpccnt[NFSPROC_RENAME]++;
 1922         mreq = nfsm_reqhead(fdvp, NFSPROC_RENAME,
 1923                 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
 1924                 nfsm_rndup(tnamelen));
 1925         mb = mreq;
 1926         bpos = mtod(mb, caddr_t);
 1927         nfsm_fhtom(fdvp, v3);
 1928         nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
 1929         nfsm_fhtom(tdvp, v3);
 1930         nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
 1931         nfsm_request(fdvp, NFSPROC_RENAME, td, cred);
 1932         if (v3) {
 1933                 nfsm_wcc_data(fdvp, fwccflag);
 1934                 nfsm_wcc_data(tdvp, twccflag);
 1935         }
 1936         m_freem(mrep);
 1937 nfsmout:
 1938         mtx_lock(&(VTONFS(fdvp))->n_mtx);
 1939         VTONFS(fdvp)->n_flag |= NMODIFIED;
 1940         mtx_unlock(&(VTONFS(fdvp))->n_mtx);
 1941         mtx_lock(&(VTONFS(tdvp))->n_mtx);
 1942         VTONFS(tdvp)->n_flag |= NMODIFIED;
 1943         mtx_unlock(&(VTONFS(tdvp))->n_mtx);
 1944         if (!fwccflag) {
 1945                 VTONFS(fdvp)->n_attrstamp = 0;
 1946                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp);
 1947         }
 1948         if (!twccflag) {
 1949                 VTONFS(tdvp)->n_attrstamp = 0;
 1950                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
 1951         }
 1952         return (error);
 1953 }
 1954 
 1955 /*
 1956  * nfs hard link create call
 1957  */
 1958 static int
 1959 nfs_link(struct vop_link_args *ap)
 1960 {
 1961         struct vnode *vp = ap->a_vp;
 1962         struct vnode *tdvp = ap->a_tdvp;
 1963         struct componentname *cnp = ap->a_cnp;
 1964         caddr_t bpos, dpos;
 1965         int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
 1966         struct mbuf *mreq, *mrep, *md, *mb;
 1967         int v3;
 1968 
 1969         if (vp->v_mount != tdvp->v_mount) {
 1970                 return (EXDEV);
 1971         }
 1972 
 1973         /*
 1974          * Push all writes to the server, so that the attribute cache
 1975          * doesn't get "out of sync" with the server.
 1976          * XXX There should be a better way!
 1977          */
 1978         VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread);
 1979 
 1980         v3 = NFS_ISV3(vp);
 1981         nfsstats.rpccnt[NFSPROC_LINK]++;
 1982         mreq = nfsm_reqhead(vp, NFSPROC_LINK,
 1983                 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
 1984         mb = mreq;
 1985         bpos = mtod(mb, caddr_t);
 1986         nfsm_fhtom(vp, v3);
 1987         nfsm_fhtom(tdvp, v3);
 1988         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 1989         nfsm_request(vp, NFSPROC_LINK, cnp->cn_thread, cnp->cn_cred);
 1990         if (v3) {
 1991                 nfsm_postop_attr(vp, attrflag);
 1992                 nfsm_wcc_data(tdvp, wccflag);
 1993         }
 1994         m_freem(mrep);
 1995 nfsmout:
 1996         mtx_lock(&(VTONFS(tdvp))->n_mtx);
 1997         VTONFS(tdvp)->n_flag |= NMODIFIED;
 1998         mtx_unlock(&(VTONFS(tdvp))->n_mtx);
 1999         if (!attrflag) {
 2000                 VTONFS(vp)->n_attrstamp = 0;
 2001                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
 2002         }
 2003         if (!wccflag) {
 2004                 VTONFS(tdvp)->n_attrstamp = 0;
 2005                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
 2006         }
 2007         return (error);
 2008 }
 2009 
 2010 /*
 2011  * nfs symbolic link create call
 2012  */
 2013 static int
 2014 nfs_symlink(struct vop_symlink_args *ap)
 2015 {
 2016         struct vnode *dvp = ap->a_dvp;
 2017         struct vattr *vap = ap->a_vap;
 2018         struct componentname *cnp = ap->a_cnp;
 2019         struct nfsv2_sattr *sp;
 2020         caddr_t bpos, dpos;
 2021         int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
 2022         struct mbuf *mreq, *mrep, *md, *mb;
 2023         struct vnode *newvp = NULL;
 2024         int v3 = NFS_ISV3(dvp);
 2025 
 2026         nfsstats.rpccnt[NFSPROC_SYMLINK]++;
 2027         slen = strlen(ap->a_target);
 2028         mreq = nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
 2029             nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
 2030         mb = mreq;
 2031         bpos = mtod(mb, caddr_t);
 2032         nfsm_fhtom(dvp, v3);
 2033         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 2034         if (v3) {
 2035                 nfsm_v3attrbuild(vap, FALSE);
 2036         }
 2037         nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
 2038         if (!v3) {
 2039                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 2040                 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
 2041                 sp->sa_uid = nfs_xdrneg1;
 2042                 sp->sa_gid = nfs_xdrneg1;
 2043                 sp->sa_size = nfs_xdrneg1;
 2044                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 2045                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 2046         }
 2047 
 2048         /*
 2049          * Issue the NFS request and get the rpc response.
 2050          *
 2051          * Only NFSv3 responses returning an error of 0 actually return
 2052          * a file handle that can be converted into newvp without having
 2053          * to do an extra lookup rpc.
 2054          */
 2055         nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_thread, cnp->cn_cred);
 2056         if (v3) {
 2057                 if (error == 0)
 2058                         nfsm_mtofh(dvp, newvp, v3, gotvp);
 2059                 nfsm_wcc_data(dvp, wccflag);
 2060         }
 2061 
 2062         /*
 2063          * out code jumps -> here, mrep is also freed.
 2064          */
 2065 
 2066         m_freem(mrep);
 2067 nfsmout:
 2068 
 2069         /*
 2070          * If we do not have an error and we could not extract the newvp from
 2071          * the response due to the request being NFSv2, we have to do a
 2072          * lookup in order to obtain a newvp to return.
 2073          */
 2074         if (error == 0 && newvp == NULL) {
 2075                 struct nfsnode *np = NULL;
 2076 
 2077                 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
 2078                     cnp->cn_cred, cnp->cn_thread, &np);
 2079                 if (!error)
 2080                         newvp = NFSTOV(np);
 2081         }
 2082         if (error) {
 2083                 if (newvp)
 2084                         vput(newvp);
 2085         } else {
 2086                 *ap->a_vpp = newvp;
 2087         }
 2088         mtx_lock(&(VTONFS(dvp))->n_mtx);
 2089         VTONFS(dvp)->n_flag |= NMODIFIED;
 2090         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 2091         if (!wccflag) {
 2092                 VTONFS(dvp)->n_attrstamp = 0;
 2093                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2094         }
 2095         return (error);
 2096 }
 2097 
 2098 /*
 2099  * nfs make dir call
 2100  */
 2101 static int
 2102 nfs_mkdir(struct vop_mkdir_args *ap)
 2103 {
 2104         struct vnode *dvp = ap->a_dvp;
 2105         struct vattr *vap = ap->a_vap;
 2106         struct componentname *cnp = ap->a_cnp;
 2107         struct nfsv2_sattr *sp;
 2108         int len;
 2109         struct nfsnode *np = NULL;
 2110         struct vnode *newvp = NULL;
 2111         caddr_t bpos, dpos;
 2112         int error = 0, wccflag = NFSV3_WCCRATTR;
 2113         int gotvp = 0;
 2114         struct mbuf *mreq, *mrep, *md, *mb;
 2115         struct vattr vattr;
 2116         int v3 = NFS_ISV3(dvp);
 2117 
 2118         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
 2119                 return (error);
 2120         len = cnp->cn_namelen;
 2121         nfsstats.rpccnt[NFSPROC_MKDIR]++;
 2122         mreq = nfsm_reqhead(dvp, NFSPROC_MKDIR,
 2123           NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
 2124         mb = mreq;
 2125         bpos = mtod(mb, caddr_t);
 2126         nfsm_fhtom(dvp, v3);
 2127         nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
 2128         if (v3) {
 2129                 nfsm_v3attrbuild(vap, FALSE);
 2130         } else {
 2131                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 2132                 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
 2133                 sp->sa_uid = nfs_xdrneg1;
 2134                 sp->sa_gid = nfs_xdrneg1;
 2135                 sp->sa_size = nfs_xdrneg1;
 2136                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 2137                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 2138         }
 2139         nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_thread, cnp->cn_cred);
 2140         if (!error)
 2141                 nfsm_mtofh(dvp, newvp, v3, gotvp);
 2142         if (v3)
 2143                 nfsm_wcc_data(dvp, wccflag);
 2144         m_freem(mrep);
 2145 nfsmout:
 2146         mtx_lock(&(VTONFS(dvp))->n_mtx);
 2147         VTONFS(dvp)->n_flag |= NMODIFIED;
 2148         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 2149         if (!wccflag) {
 2150                 VTONFS(dvp)->n_attrstamp = 0;
 2151                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2152         }
 2153         if (error == 0 && newvp == NULL) {
 2154                 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
 2155                         cnp->cn_thread, &np);
 2156                 if (!error) {
 2157                         newvp = NFSTOV(np);
 2158                         if (newvp->v_type != VDIR)
 2159                                 error = EEXIST;
 2160                 }
 2161         }
 2162         if (error) {
 2163                 if (newvp)
 2164                         vput(newvp);
 2165         } else
 2166                 *ap->a_vpp = newvp;
 2167         return (error);
 2168 }
 2169 
 2170 /*
 2171  * nfs remove directory call
 2172  */
 2173 static int
 2174 nfs_rmdir(struct vop_rmdir_args *ap)
 2175 {
 2176         struct vnode *vp = ap->a_vp;
 2177         struct vnode *dvp = ap->a_dvp;
 2178         struct componentname *cnp = ap->a_cnp;
 2179         caddr_t bpos, dpos;
 2180         int error = 0, wccflag = NFSV3_WCCRATTR;
 2181         struct mbuf *mreq, *mrep, *md, *mb;
 2182         int v3 = NFS_ISV3(dvp);
 2183 
 2184         if (dvp == vp)
 2185                 return (EINVAL);
 2186         nfsstats.rpccnt[NFSPROC_RMDIR]++;
 2187         mreq = nfsm_reqhead(dvp, NFSPROC_RMDIR,
 2188                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
 2189         mb = mreq;
 2190         bpos = mtod(mb, caddr_t);
 2191         nfsm_fhtom(dvp, v3);
 2192         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 2193         nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_thread, cnp->cn_cred);
 2194         if (v3)
 2195                 nfsm_wcc_data(dvp, wccflag);
 2196         m_freem(mrep);
 2197 nfsmout:
 2198         mtx_lock(&(VTONFS(dvp))->n_mtx);
 2199         VTONFS(dvp)->n_flag |= NMODIFIED;
 2200         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 2201         if (!wccflag) {
 2202                 VTONFS(dvp)->n_attrstamp = 0;
 2203                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2204         }
 2205         cache_purge(dvp);
 2206         cache_purge(vp);
 2207         /*
 2208          * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
 2209          */
 2210         if (error == ENOENT)
 2211                 error = 0;
 2212         return (error);
 2213 }
 2214 
 2215 /*
 2216  * nfs readdir call
 2217  */
 2218 static int
 2219 nfs_readdir(struct vop_readdir_args *ap)
 2220 {
 2221         struct vnode *vp = ap->a_vp;
 2222         struct nfsnode *np = VTONFS(vp);
 2223         struct uio *uio = ap->a_uio;
 2224         int tresid, error = 0;
 2225         struct vattr vattr;
 2226         
 2227         if (vp->v_type != VDIR) 
 2228                 return(EPERM);
 2229 
 2230         /*
 2231          * First, check for hit on the EOF offset cache
 2232          */
 2233         if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
 2234             (np->n_flag & NMODIFIED) == 0) {
 2235                 if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) {
 2236                         mtx_lock(&np->n_mtx);
 2237                         if (!NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
 2238                                 mtx_unlock(&np->n_mtx);
 2239                                 nfsstats.direofcache_hits++;
 2240                                 goto out;
 2241                         } else
 2242                                 mtx_unlock(&np->n_mtx);
 2243                 }
 2244         }
 2245 
 2246         /*
 2247          * Call nfs_bioread() to do the real work.
 2248          */
 2249         tresid = uio->uio_resid;
 2250         error = nfs_bioread(vp, uio, 0, ap->a_cred);
 2251 
 2252         if (!error && uio->uio_resid == tresid) {
 2253                 nfsstats.direofcache_misses++;
 2254         }
 2255 out:
 2256         return (error);
 2257 }
 2258 
 2259 /*
 2260  * Readdir rpc call.
 2261  * Called from below the buffer cache by nfs_doio().
 2262  */
 2263 int
 2264 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 2265 {
 2266         int len, left;
 2267         struct dirent *dp = NULL;
 2268         u_int32_t *tl;
 2269         caddr_t cp;
 2270         nfsuint64 *cookiep;
 2271         caddr_t bpos, dpos;
 2272         struct mbuf *mreq, *mrep, *md, *mb;
 2273         nfsuint64 cookie;
 2274         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2275         struct nfsnode *dnp = VTONFS(vp);
 2276         u_quad_t fileno;
 2277         int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
 2278         int attrflag;
 2279         int v3 = NFS_ISV3(vp);
 2280 
 2281         KASSERT(uiop->uio_iovcnt == 1 &&
 2282             (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
 2283             (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
 2284             ("nfs readdirrpc bad uio"));
 2285 
 2286         /*
 2287          * If there is no cookie, assume directory was stale.
 2288          */
 2289         nfs_dircookie_lock(dnp);
 2290         cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
 2291         if (cookiep) {
 2292                 cookie = *cookiep;
 2293                 nfs_dircookie_unlock(dnp);
 2294         } else {
 2295                 nfs_dircookie_unlock(dnp);              
 2296                 return (NFSERR_BAD_COOKIE);
 2297         }
 2298 
 2299         /*
 2300          * Loop around doing readdir rpc's of size nm_readdirsize
 2301          * truncated to a multiple of DIRBLKSIZ.
 2302          * The stopping criteria is EOF or buffer full.
 2303          */
 2304         while (more_dirs && bigenough) {
 2305                 nfsstats.rpccnt[NFSPROC_READDIR]++;
 2306                 mreq = nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
 2307                         NFSX_READDIR(v3));
 2308                 mb = mreq;
 2309                 bpos = mtod(mb, caddr_t);
 2310                 nfsm_fhtom(vp, v3);
 2311                 if (v3) {
 2312                         tl = nfsm_build(u_int32_t *, 5 * NFSX_UNSIGNED);
 2313                         *tl++ = cookie.nfsuquad[0];
 2314                         *tl++ = cookie.nfsuquad[1];
 2315                         mtx_lock(&dnp->n_mtx);
 2316                         *tl++ = dnp->n_cookieverf.nfsuquad[0];
 2317                         *tl++ = dnp->n_cookieverf.nfsuquad[1];
 2318                         mtx_unlock(&dnp->n_mtx);
 2319                 } else {
 2320                         tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
 2321                         *tl++ = cookie.nfsuquad[0];
 2322                 }
 2323                 *tl = txdr_unsigned(nmp->nm_readdirsize);
 2324                 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_td, cred);
 2325                 if (v3) {
 2326                         nfsm_postop_attr(vp, attrflag);
 2327                         if (!error) {
 2328                                 tl = nfsm_dissect(u_int32_t *,
 2329                                     2 * NFSX_UNSIGNED);
 2330                                 mtx_lock(&dnp->n_mtx);
 2331                                 dnp->n_cookieverf.nfsuquad[0] = *tl++;
 2332                                 dnp->n_cookieverf.nfsuquad[1] = *tl;
 2333                                 mtx_unlock(&dnp->n_mtx);
 2334                         } else {
 2335                                 m_freem(mrep);
 2336                                 goto nfsmout;
 2337                         }
 2338                 }
 2339                 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2340                 more_dirs = fxdr_unsigned(int, *tl);
 2341 
 2342                 /* loop thru the dir entries, doctoring them to 4bsd form */
 2343                 while (more_dirs && bigenough) {
 2344                         if (v3) {
 2345                                 tl = nfsm_dissect(u_int32_t *,
 2346                                     3 * NFSX_UNSIGNED);
 2347                                 fileno = fxdr_hyper(tl);
 2348                                 len = fxdr_unsigned(int, *(tl + 2));
 2349                         } else {
 2350                                 tl = nfsm_dissect(u_int32_t *,
 2351                                     2 * NFSX_UNSIGNED);
 2352                                 fileno = fxdr_unsigned(u_quad_t, *tl++);
 2353                                 len = fxdr_unsigned(int, *tl);
 2354                         }
 2355                         if (len <= 0 || len > NFS_MAXNAMLEN) {
 2356                                 error = EBADRPC;
 2357                                 m_freem(mrep);
 2358                                 goto nfsmout;
 2359                         }
 2360                         tlen = nfsm_rndup(len);
 2361                         if (tlen == len)
 2362                                 tlen += 4;      /* To ensure null termination */
 2363                         left = DIRBLKSIZ - blksiz;
 2364                         if ((tlen + DIRHDSIZ) > left) {
 2365                                 dp->d_reclen += left;
 2366                                 uiop->uio_iov->iov_base =
 2367                                     (char *)uiop->uio_iov->iov_base + left;
 2368                                 uiop->uio_iov->iov_len -= left;
 2369                                 uiop->uio_offset += left;
 2370                                 uiop->uio_resid -= left;
 2371                                 blksiz = 0;
 2372                         }
 2373                         if ((tlen + DIRHDSIZ) > uiop->uio_resid)
 2374                                 bigenough = 0;
 2375                         if (bigenough) {
 2376                                 dp = (struct dirent *)uiop->uio_iov->iov_base;
 2377                                 dp->d_fileno = (int)fileno;
 2378                                 dp->d_namlen = len;
 2379                                 dp->d_reclen = tlen + DIRHDSIZ;
 2380                                 dp->d_type = DT_UNKNOWN;
 2381                                 blksiz += dp->d_reclen;
 2382                                 if (blksiz == DIRBLKSIZ)
 2383                                         blksiz = 0;
 2384                                 uiop->uio_offset += DIRHDSIZ;
 2385                                 uiop->uio_resid -= DIRHDSIZ;
 2386                                 uiop->uio_iov->iov_base =
 2387                                     (char *)uiop->uio_iov->iov_base + DIRHDSIZ;
 2388                                 uiop->uio_iov->iov_len -= DIRHDSIZ;
 2389                                 nfsm_mtouio(uiop, len);
 2390                                 cp = uiop->uio_iov->iov_base;
 2391                                 tlen -= len;
 2392                                 *cp = '\0';     /* null terminate */
 2393                                 uiop->uio_iov->iov_base =
 2394                                     (char *)uiop->uio_iov->iov_base + tlen;
 2395                                 uiop->uio_iov->iov_len -= tlen;
 2396                                 uiop->uio_offset += tlen;
 2397                                 uiop->uio_resid -= tlen;
 2398                         } else
 2399                                 nfsm_adv(nfsm_rndup(len));
 2400                         if (v3) {
 2401                                 tl = nfsm_dissect(u_int32_t *,
 2402                                     3 * NFSX_UNSIGNED);
 2403                         } else {
 2404                                 tl = nfsm_dissect(u_int32_t *,
 2405                                     2 * NFSX_UNSIGNED);
 2406                         }
 2407                         if (bigenough) {
 2408                                 cookie.nfsuquad[0] = *tl++;
 2409                                 if (v3)
 2410                                         cookie.nfsuquad[1] = *tl++;
 2411                         } else if (v3)
 2412                                 tl += 2;
 2413                         else
 2414                                 tl++;
 2415                         more_dirs = fxdr_unsigned(int, *tl);
 2416                 }
 2417                 /*
 2418                  * If at end of rpc data, get the eof boolean
 2419                  */
 2420                 if (!more_dirs) {
 2421                         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2422                         more_dirs = (fxdr_unsigned(int, *tl) == 0);
 2423                 }
 2424                 m_freem(mrep);
 2425         }
 2426         /*
 2427          * Fill last record, iff any, out to a multiple of DIRBLKSIZ
 2428          * by increasing d_reclen for the last record.
 2429          */
 2430         if (blksiz > 0) {
 2431                 left = DIRBLKSIZ - blksiz;
 2432                 dp->d_reclen += left;
 2433                 uiop->uio_iov->iov_base =
 2434                     (char *)uiop->uio_iov->iov_base + left;
 2435                 uiop->uio_iov->iov_len -= left;
 2436                 uiop->uio_offset += left;
 2437                 uiop->uio_resid -= left;
 2438         }
 2439 
 2440         /*
 2441          * We are now either at the end of the directory or have filled the
 2442          * block.
 2443          */
 2444         if (bigenough)
 2445                 dnp->n_direofoffset = uiop->uio_offset;
 2446         else {
 2447                 if (uiop->uio_resid > 0)
 2448                         nfs_printf("EEK! readdirrpc resid > 0\n");
 2449                 nfs_dircookie_lock(dnp);
 2450                 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
 2451                 *cookiep = cookie;
 2452                 nfs_dircookie_unlock(dnp);
 2453         }
 2454 nfsmout:
 2455         return (error);
 2456 }
 2457 
 2458 /*
 2459  * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
 2460  */
 2461 int
 2462 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 2463 {
 2464         int len, left;
 2465         struct dirent *dp;
 2466         u_int32_t *tl;
 2467         caddr_t cp;
 2468         struct vnode *newvp;
 2469         nfsuint64 *cookiep;
 2470         caddr_t bpos, dpos, dpossav1, dpossav2;
 2471         struct mbuf *mreq, *mrep, *md, *mb, *mdsav1, *mdsav2;
 2472         struct nameidata nami, *ndp = &nami;
 2473         struct componentname *cnp = &ndp->ni_cnd;
 2474         nfsuint64 cookie;
 2475         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2476         struct nfsnode *dnp = VTONFS(vp), *np;
 2477         nfsfh_t *fhp;
 2478         u_quad_t fileno;
 2479         int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
 2480         int attrflag, fhsize;
 2481 
 2482 #ifndef nolint
 2483         dp = NULL;
 2484 #endif
 2485         KASSERT(uiop->uio_iovcnt == 1 &&
 2486             (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
 2487             (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
 2488             ("nfs readdirplusrpc bad uio"));
 2489         ndp->ni_dvp = vp;
 2490         newvp = NULLVP;
 2491 
 2492         /*
 2493          * If there is no cookie, assume directory was stale.
 2494          */
 2495         nfs_dircookie_lock(dnp);
 2496         cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
 2497         if (cookiep) {
 2498                 cookie = *cookiep;
 2499                 nfs_dircookie_unlock(dnp);
 2500         } else {
 2501                 nfs_dircookie_unlock(dnp);
 2502                 return (NFSERR_BAD_COOKIE);
 2503         }
 2504         /*
 2505          * Loop around doing readdir rpc's of size nm_readdirsize
 2506          * truncated to a multiple of DIRBLKSIZ.
 2507          * The stopping criteria is EOF or buffer full.
 2508          */
 2509         while (more_dirs && bigenough) {
 2510                 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
 2511                 mreq = nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
 2512                         NFSX_FH(1) + 6 * NFSX_UNSIGNED);
 2513                 mb = mreq;
 2514                 bpos = mtod(mb, caddr_t);
 2515                 nfsm_fhtom(vp, 1);
 2516                 tl = nfsm_build(u_int32_t *, 6 * NFSX_UNSIGNED);
 2517                 *tl++ = cookie.nfsuquad[0];
 2518                 *tl++ = cookie.nfsuquad[1];
 2519                 mtx_lock(&dnp->n_mtx);
 2520                 *tl++ = dnp->n_cookieverf.nfsuquad[0];
 2521                 *tl++ = dnp->n_cookieverf.nfsuquad[1];
 2522                 mtx_unlock(&dnp->n_mtx);
 2523                 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
 2524                 *tl = txdr_unsigned(nmp->nm_rsize);
 2525                 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_td, cred);
 2526                 nfsm_postop_attr(vp, attrflag);
 2527                 if (error) {
 2528                         m_freem(mrep);
 2529                         goto nfsmout;
 2530                 }
 2531                 tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 2532                 mtx_lock(&dnp->n_mtx);
 2533                 dnp->n_cookieverf.nfsuquad[0] = *tl++;
 2534                 dnp->n_cookieverf.nfsuquad[1] = *tl++;
 2535                 mtx_unlock(&dnp->n_mtx);
 2536                 more_dirs = fxdr_unsigned(int, *tl);
 2537 
 2538                 /* loop thru the dir entries, doctoring them to 4bsd form */
 2539                 while (more_dirs && bigenough) {
 2540                         tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 2541                         fileno = fxdr_hyper(tl);
 2542                         len = fxdr_unsigned(int, *(tl + 2));
 2543                         if (len <= 0 || len > NFS_MAXNAMLEN) {
 2544                                 error = EBADRPC;
 2545                                 m_freem(mrep);
 2546                                 goto nfsmout;
 2547                         }
 2548                         tlen = nfsm_rndup(len);
 2549                         if (tlen == len)
 2550                                 tlen += 4;      /* To ensure null termination*/
 2551                         left = DIRBLKSIZ - blksiz;
 2552                         if ((tlen + DIRHDSIZ) > left) {
 2553                                 dp->d_reclen += left;
 2554                                 uiop->uio_iov->iov_base =
 2555                                     (char *)uiop->uio_iov->iov_base + left;
 2556                                 uiop->uio_iov->iov_len -= left;
 2557                                 uiop->uio_offset += left;
 2558                                 uiop->uio_resid -= left;
 2559                                 blksiz = 0;
 2560                         }
 2561                         if ((tlen + DIRHDSIZ) > uiop->uio_resid)
 2562                                 bigenough = 0;
 2563                         if (bigenough) {
 2564                                 dp = (struct dirent *)uiop->uio_iov->iov_base;
 2565                                 dp->d_fileno = (int)fileno;
 2566                                 dp->d_namlen = len;
 2567                                 dp->d_reclen = tlen + DIRHDSIZ;
 2568                                 dp->d_type = DT_UNKNOWN;
 2569                                 blksiz += dp->d_reclen;
 2570                                 if (blksiz == DIRBLKSIZ)
 2571                                         blksiz = 0;
 2572                                 uiop->uio_offset += DIRHDSIZ;
 2573                                 uiop->uio_resid -= DIRHDSIZ;
 2574                                 uiop->uio_iov->iov_base =
 2575                                     (char *)uiop->uio_iov->iov_base + DIRHDSIZ;
 2576                                 uiop->uio_iov->iov_len -= DIRHDSIZ;
 2577                                 cnp->cn_nameptr = uiop->uio_iov->iov_base;
 2578                                 cnp->cn_namelen = len;
 2579                                 nfsm_mtouio(uiop, len);
 2580                                 cp = uiop->uio_iov->iov_base;
 2581                                 tlen -= len;
 2582                                 *cp = '\0';
 2583                                 uiop->uio_iov->iov_base =
 2584                                     (char *)uiop->uio_iov->iov_base + tlen;
 2585                                 uiop->uio_iov->iov_len -= tlen;
 2586                                 uiop->uio_offset += tlen;
 2587                                 uiop->uio_resid -= tlen;
 2588                         } else
 2589                                 nfsm_adv(nfsm_rndup(len));
 2590                         tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 2591                         if (bigenough) {
 2592                                 cookie.nfsuquad[0] = *tl++;
 2593                                 cookie.nfsuquad[1] = *tl++;
 2594                         } else
 2595                                 tl += 2;
 2596 
 2597                         /*
 2598                          * Since the attributes are before the file handle
 2599                          * (sigh), we must skip over the attributes and then
 2600                          * come back and get them.
 2601                          */
 2602                         attrflag = fxdr_unsigned(int, *tl);
 2603                         if (attrflag) {
 2604                             dpossav1 = dpos;
 2605                             mdsav1 = md;
 2606                             nfsm_adv(NFSX_V3FATTR);
 2607                             tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2608                             doit = fxdr_unsigned(int, *tl);
 2609                             /*
 2610                              * Skip loading the attrs for "..". There's a 
 2611                              * race between loading the attrs here and 
 2612                              * lookups that look for the directory currently
 2613                              * being read (in the parent). We try to acquire
 2614                              * the exclusive lock on ".." here, owning the 
 2615                              * lock on the directory being read. Lookup will
 2616                              * hold the lock on ".." and try to acquire the 
 2617                              * lock on the directory being read.
 2618                              * 
 2619                              * There are other ways of fixing this, one would
 2620                              * be to do a trylock on the ".." vnode and skip
 2621                              * loading the attrs on ".." if it happens to be 
 2622                              * locked by another process. But skipping the
 2623                              * attrload on ".." seems the easiest option.
 2624                              */
 2625                             if (strcmp(dp->d_name, "..") == 0) {
 2626                                     doit = 0;
 2627                                     /*
 2628                                      * We've already skipped over the attrs, 
 2629                                      * skip over the filehandle. And store d_type
 2630                                      * as VDIR.
 2631                                      */
 2632                                     tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2633                                     i = fxdr_unsigned(int, *tl);
 2634                                     nfsm_adv(nfsm_rndup(i));
 2635                                     dp->d_type = IFTODT(VTTOIF(VDIR));
 2636                             }       
 2637                             if (doit) {
 2638                                 nfsm_getfh(fhp, fhsize, 1);
 2639                                 if (NFS_CMPFH(dnp, fhp, fhsize)) {
 2640                                     VREF(vp);
 2641                                     newvp = vp;
 2642                                     np = dnp;
 2643                                 } else {
 2644                                     error = nfs_nget(vp->v_mount, fhp,
 2645                                         fhsize, &np, LK_EXCLUSIVE);
 2646                                     if (error)
 2647                                         doit = 0;
 2648                                     else
 2649                                         newvp = NFSTOV(np);
 2650                                 }
 2651                             }
 2652                             if (doit && bigenough) {
 2653                                 dpossav2 = dpos;
 2654                                 dpos = dpossav1;
 2655                                 mdsav2 = md;
 2656                                 md = mdsav1;
 2657                                 nfsm_loadattr(newvp, NULL);
 2658                                 dpos = dpossav2;
 2659                                 md = mdsav2;
 2660                                 dp->d_type =
 2661                                     IFTODT(VTTOIF(np->n_vattr.va_type));
 2662                                 ndp->ni_vp = newvp;
 2663                                 /*
 2664                                  * Update n_ctime so subsequent lookup
 2665                                  * doesn't purge entry.
 2666                                  */
 2667                                 np->n_ctime = np->n_vattr.va_ctime;
 2668                                 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
 2669                             }
 2670                         } else {
 2671                             /* Just skip over the file handle */
 2672                             tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2673                             i = fxdr_unsigned(int, *tl);
 2674                             if (i) {
 2675                                     tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2676                                     fhsize = fxdr_unsigned(int, *tl);
 2677                                     nfsm_adv(nfsm_rndup(fhsize));
 2678                             }
 2679                         }
 2680                         if (newvp != NULLVP) {
 2681                             if (newvp == vp)
 2682                                 vrele(newvp);
 2683                             else
 2684                                 vput(newvp);
 2685                             newvp = NULLVP;
 2686                         }
 2687                         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2688                         more_dirs = fxdr_unsigned(int, *tl);
 2689                 }
 2690                 /*
 2691                  * If at end of rpc data, get the eof boolean
 2692                  */
 2693                 if (!more_dirs) {
 2694                         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2695                         more_dirs = (fxdr_unsigned(int, *tl) == 0);
 2696                 }
 2697                 m_freem(mrep);
 2698         }
 2699         /*
 2700          * Fill last record, iff any, out to a multiple of DIRBLKSIZ
 2701          * by increasing d_reclen for the last record.
 2702          */
 2703         if (blksiz > 0) {
 2704                 left = DIRBLKSIZ - blksiz;
 2705                 dp->d_reclen += left;
 2706                 uiop->uio_iov->iov_base =
 2707                     (char *)uiop->uio_iov->iov_base + left;
 2708                 uiop->uio_iov->iov_len -= left;
 2709                 uiop->uio_offset += left;
 2710                 uiop->uio_resid -= left;
 2711         }
 2712 
 2713         /*
 2714          * We are now either at the end of the directory or have filled the
 2715          * block.
 2716          */
 2717         if (bigenough)
 2718                 dnp->n_direofoffset = uiop->uio_offset;
 2719         else {
 2720                 if (uiop->uio_resid > 0)
 2721                         nfs_printf("EEK! readdirplusrpc resid > 0\n");
 2722                 nfs_dircookie_lock(dnp);
 2723                 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
 2724                 *cookiep = cookie;
 2725                 nfs_dircookie_unlock(dnp);
 2726         }
 2727 nfsmout:
 2728         if (newvp != NULLVP) {
 2729                 if (newvp == vp)
 2730                         vrele(newvp);
 2731                 else
 2732                         vput(newvp);
 2733                 newvp = NULLVP;
 2734         }
 2735         return (error);
 2736 }
 2737 
 2738 /*
 2739  * Silly rename. To make the NFS filesystem that is stateless look a little
 2740  * more like the "ufs" a remove of an active vnode is translated to a rename
 2741  * to a funny looking filename that is removed by nfs_inactive on the
 2742  * nfsnode. There is the potential for another process on a different client
 2743  * to create the same funny name between the nfs_lookitup() fails and the
 2744  * nfs_rename() completes, but...
 2745  */
 2746 static int
 2747 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
 2748 {
 2749         struct sillyrename *sp;
 2750         struct nfsnode *np;
 2751         int error;
 2752         short pid;
 2753         unsigned int lticks;
 2754 
 2755         cache_purge(dvp);
 2756         np = VTONFS(vp);
 2757         KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir"));
 2758         sp = malloc(sizeof (struct sillyrename),
 2759                 M_NFSREQ, M_WAITOK);
 2760         sp->s_cred = crhold(cnp->cn_cred);
 2761         sp->s_dvp = dvp;
 2762         sp->s_removeit = nfs_removeit;
 2763         VREF(dvp);
 2764 
 2765         /* 
 2766          * Fudge together a funny name.
 2767          * Changing the format of the funny name to accomodate more 
 2768          * sillynames per directory.
 2769          * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is 
 2770          * CPU ticks since boot.
 2771          */
 2772         pid = cnp->cn_thread->td_proc->p_pid;
 2773         lticks = (unsigned int)ticks;
 2774         for ( ; ; ) {
 2775                 sp->s_namlen = sprintf(sp->s_name, 
 2776                                        ".nfs.%08x.%04x4.4", lticks, 
 2777                                        pid);
 2778                 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
 2779                                  cnp->cn_thread, NULL))
 2780                         break;
 2781                 lticks++;
 2782         }
 2783         error = nfs_renameit(dvp, cnp, sp);
 2784         if (error)
 2785                 goto bad;
 2786         error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
 2787                 cnp->cn_thread, &np);
 2788         np->n_sillyrename = sp;
 2789         return (0);
 2790 bad:
 2791         vrele(sp->s_dvp);
 2792         crfree(sp->s_cred);
 2793         free((caddr_t)sp, M_NFSREQ);
 2794         return (error);
 2795 }
 2796 
 2797 /*
 2798  * Look up a file name and optionally either update the file handle or
 2799  * allocate an nfsnode, depending on the value of npp.
 2800  * npp == NULL  --> just do the lookup
 2801  * *npp == NULL --> allocate a new nfsnode and make sure attributes are
 2802  *                      handled too
 2803  * *npp != NULL --> update the file handle in the vnode
 2804  */
 2805 static int
 2806 nfs_lookitup(struct vnode *dvp, const char *name, int len, struct ucred *cred,
 2807     struct thread *td, struct nfsnode **npp)
 2808 {
 2809         struct vnode *newvp = NULL;
 2810         struct nfsnode *np, *dnp = VTONFS(dvp);
 2811         caddr_t bpos, dpos;
 2812         int error = 0, fhlen, attrflag;
 2813         struct mbuf *mreq, *mrep, *md, *mb;
 2814         nfsfh_t *nfhp;
 2815         int v3 = NFS_ISV3(dvp);
 2816 
 2817         nfsstats.rpccnt[NFSPROC_LOOKUP]++;
 2818         mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
 2819                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
 2820         mb = mreq;
 2821         bpos = mtod(mb, caddr_t);
 2822         nfsm_fhtom(dvp, v3);
 2823         nfsm_strtom(name, len, NFS_MAXNAMLEN);
 2824         nfsm_request(dvp, NFSPROC_LOOKUP, td, cred);
 2825         if (npp && !error) {
 2826                 nfsm_getfh(nfhp, fhlen, v3);
 2827                 if (*npp) {
 2828                     np = *npp;
 2829                     if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
 2830                         free((caddr_t)np->n_fhp, M_NFSBIGFH);
 2831                         np->n_fhp = &np->n_fh;
 2832                     } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
 2833                         np->n_fhp =(nfsfh_t *)malloc(fhlen, M_NFSBIGFH, M_WAITOK);
 2834                     bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
 2835                     np->n_fhsize = fhlen;
 2836                     newvp = NFSTOV(np);
 2837                 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
 2838                     VREF(dvp);
 2839                     newvp = dvp;
 2840                 } else {
 2841                     error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np, LK_EXCLUSIVE);
 2842                     if (error) {
 2843                         m_freem(mrep);
 2844                         return (error);
 2845                     }
 2846                     newvp = NFSTOV(np);
 2847                 }
 2848                 if (v3) {
 2849                         nfsm_postop_attr(newvp, attrflag);
 2850                         if (!attrflag && *npp == NULL) {
 2851                                 m_freem(mrep);
 2852                                 if (newvp == dvp)
 2853                                         vrele(newvp);
 2854                                 else
 2855                                         vput(newvp);
 2856                                 return (ENOENT);
 2857                         }
 2858                 } else
 2859                         nfsm_loadattr(newvp, NULL);
 2860         }
 2861         m_freem(mrep);
 2862 nfsmout:
 2863         if (npp && *npp == NULL) {
 2864                 if (error) {
 2865                         if (newvp) {
 2866                                 if (newvp == dvp)
 2867                                         vrele(newvp);
 2868                                 else
 2869                                         vput(newvp);
 2870                         }
 2871                 } else
 2872                         *npp = np;
 2873         }
 2874         return (error);
 2875 }
 2876 
 2877 /*
 2878  * Nfs Version 3 commit rpc
 2879  */
 2880 int
 2881 nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
 2882            struct thread *td)
 2883 {
 2884         u_int32_t *tl;
 2885         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2886         caddr_t bpos, dpos;
 2887         int error = 0, wccflag = NFSV3_WCCRATTR;
 2888         struct mbuf *mreq, *mrep, *md, *mb;
 2889 
 2890         mtx_lock(&nmp->nm_mtx);
 2891         if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
 2892                 mtx_unlock(&nmp->nm_mtx);
 2893                 return (0);
 2894         }
 2895         mtx_unlock(&nmp->nm_mtx);
 2896         nfsstats.rpccnt[NFSPROC_COMMIT]++;
 2897         mreq = nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
 2898         mb = mreq;
 2899         bpos = mtod(mb, caddr_t);
 2900         nfsm_fhtom(vp, 1);
 2901         tl = nfsm_build(u_int32_t *, 3 * NFSX_UNSIGNED);
 2902         txdr_hyper(offset, tl);
 2903         tl += 2;
 2904         *tl = txdr_unsigned(cnt);
 2905         nfsm_request(vp, NFSPROC_COMMIT, td, cred);
 2906         nfsm_wcc_data(vp, wccflag);
 2907         if (!error) {
 2908                 tl = nfsm_dissect(u_int32_t *, NFSX_V3WRITEVERF);
 2909                 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
 2910                         NFSX_V3WRITEVERF)) {
 2911                         bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
 2912                                 NFSX_V3WRITEVERF);
 2913                         error = NFSERR_STALEWRITEVERF;
 2914                 }
 2915         }
 2916         m_freem(mrep);
 2917 nfsmout:
 2918         return (error);
 2919 }
 2920 
 2921 /*
 2922  * Strategy routine.
 2923  * For async requests when nfsiod(s) are running, queue the request by
 2924  * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
 2925  * request.
 2926  */
 2927 static int
 2928 nfs_strategy(struct vop_strategy_args *ap)
 2929 {
 2930         struct buf *bp = ap->a_bp;
 2931         struct ucred *cr;
 2932 
 2933         KASSERT(!(bp->b_flags & B_DONE),
 2934             ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
 2935         BUF_ASSERT_HELD(bp);
 2936 
 2937         if (bp->b_iocmd == BIO_READ)
 2938                 cr = bp->b_rcred;
 2939         else
 2940                 cr = bp->b_wcred;
 2941 
 2942         /*
 2943          * If the op is asynchronous and an i/o daemon is waiting
 2944          * queue the request, wake it up and wait for completion
 2945          * otherwise just do it ourselves.
 2946          */
 2947         if ((bp->b_flags & B_ASYNC) == 0 ||
 2948             nfs_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread))
 2949                 (void)nfs_doio(ap->a_vp, bp, cr, curthread);
 2950         return (0);
 2951 }
 2952 
 2953 /*
 2954  * fsync vnode op. Just call nfs_flush() with commit == 1.
 2955  */
 2956 /* ARGSUSED */
 2957 static int
 2958 nfs_fsync(struct vop_fsync_args *ap)
 2959 {
 2960 
 2961         return (nfs_flush(ap->a_vp, ap->a_waitfor, 1));
 2962 }
 2963 
 2964 /*
 2965  * Flush all the blocks associated with a vnode.
 2966  *      Walk through the buffer pool and push any dirty pages
 2967  *      associated with the vnode.
 2968  */
 2969 static int
 2970 nfs_flush(struct vnode *vp, int waitfor, int commit)
 2971 {
 2972         struct nfsnode *np = VTONFS(vp);
 2973         struct buf *bp;
 2974         int i;
 2975         struct buf *nbp;
 2976         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2977         int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
 2978         int passone = 1;
 2979         u_quad_t off, endoff, toff;
 2980         struct ucred* wcred = NULL;
 2981         struct buf **bvec = NULL;
 2982         struct bufobj *bo;
 2983         struct thread *td = curthread;
 2984 #ifndef NFS_COMMITBVECSIZ
 2985 #define NFS_COMMITBVECSIZ       20
 2986 #endif
 2987         struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
 2988         int bvecsize = 0, bveccount;
 2989 
 2990         if (nmp->nm_flag & NFSMNT_INT)
 2991                 slpflag = NFS_PCATCH;
 2992         if (!commit)
 2993                 passone = 0;
 2994         bo = &vp->v_bufobj;
 2995         /*
 2996          * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
 2997          * server, but has not been committed to stable storage on the server
 2998          * yet. On the first pass, the byte range is worked out and the commit
 2999          * rpc is done. On the second pass, nfs_writebp() is called to do the
 3000          * job.
 3001          */
 3002 again:
 3003         off = (u_quad_t)-1;
 3004         endoff = 0;
 3005         bvecpos = 0;
 3006         if (NFS_ISV3(vp) && commit) {
 3007                 if (bvec != NULL && bvec != bvec_on_stack)
 3008                         free(bvec, M_TEMP);
 3009                 /*
 3010                  * Count up how many buffers waiting for a commit.
 3011                  */
 3012                 bveccount = 0;
 3013                 BO_LOCK(bo);
 3014                 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 3015                         if (!BUF_ISLOCKED(bp) &&
 3016                             (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
 3017                                 == (B_DELWRI | B_NEEDCOMMIT))
 3018                                 bveccount++;
 3019                 }
 3020                 /*
 3021                  * Allocate space to remember the list of bufs to commit.  It is
 3022                  * important to use M_NOWAIT here to avoid a race with nfs_write.
 3023                  * If we can't get memory (for whatever reason), we will end up
 3024                  * committing the buffers one-by-one in the loop below.
 3025                  */
 3026                 if (bveccount > NFS_COMMITBVECSIZ) {
 3027                         /*
 3028                          * Release the vnode interlock to avoid a lock
 3029                          * order reversal.
 3030                          */
 3031                         BO_UNLOCK(bo);
 3032                         bvec = (struct buf **)
 3033                                 malloc(bveccount * sizeof(struct buf *),
 3034                                        M_TEMP, M_NOWAIT);
 3035                         BO_LOCK(bo);
 3036                         if (bvec == NULL) {
 3037                                 bvec = bvec_on_stack;
 3038                                 bvecsize = NFS_COMMITBVECSIZ;
 3039                         } else
 3040                                 bvecsize = bveccount;
 3041                 } else {
 3042                         bvec = bvec_on_stack;
 3043                         bvecsize = NFS_COMMITBVECSIZ;
 3044                 }
 3045                 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 3046                         if (bvecpos >= bvecsize)
 3047                                 break;
 3048                         if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
 3049                                 nbp = TAILQ_NEXT(bp, b_bobufs);
 3050                                 continue;
 3051                         }
 3052                         if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
 3053                             (B_DELWRI | B_NEEDCOMMIT)) {
 3054                                 BUF_UNLOCK(bp);
 3055                                 nbp = TAILQ_NEXT(bp, b_bobufs);
 3056                                 continue;
 3057                         }
 3058                         BO_UNLOCK(bo);
 3059                         bremfree(bp);
 3060                         /*
 3061                          * Work out if all buffers are using the same cred
 3062                          * so we can deal with them all with one commit.
 3063                          *
 3064                          * NOTE: we are not clearing B_DONE here, so we have
 3065                          * to do it later on in this routine if we intend to
 3066                          * initiate I/O on the bp.
 3067                          *
 3068                          * Note: to avoid loopback deadlocks, we do not
 3069                          * assign b_runningbufspace.
 3070                          */
 3071                         if (wcred == NULL)
 3072                                 wcred = bp->b_wcred;
 3073                         else if (wcred != bp->b_wcred)
 3074                                 wcred = NOCRED;
 3075                         vfs_busy_pages(bp, 1);
 3076 
 3077                         BO_LOCK(bo);
 3078                         /*
 3079                          * bp is protected by being locked, but nbp is not
 3080                          * and vfs_busy_pages() may sleep.  We have to
 3081                          * recalculate nbp.
 3082                          */
 3083                         nbp = TAILQ_NEXT(bp, b_bobufs);
 3084 
 3085                         /*
 3086                          * A list of these buffers is kept so that the
 3087                          * second loop knows which buffers have actually
 3088                          * been committed. This is necessary, since there
 3089                          * may be a race between the commit rpc and new
 3090                          * uncommitted writes on the file.
 3091                          */
 3092                         bvec[bvecpos++] = bp;
 3093                         toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
 3094                                 bp->b_dirtyoff;
 3095                         if (toff < off)
 3096                                 off = toff;
 3097                         toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
 3098                         if (toff > endoff)
 3099                                 endoff = toff;
 3100                 }
 3101                 BO_UNLOCK(bo);
 3102         }
 3103         if (bvecpos > 0) {
 3104                 /*
 3105                  * Commit data on the server, as required.
 3106                  * If all bufs are using the same wcred, then use that with
 3107                  * one call for all of them, otherwise commit each one
 3108                  * separately.
 3109                  */
 3110                 if (wcred != NOCRED)
 3111                         retv = nfs_commit(vp, off, (int)(endoff - off),
 3112                                           wcred, td);
 3113                 else {
 3114                         retv = 0;
 3115                         for (i = 0; i < bvecpos; i++) {
 3116                                 off_t off, size;
 3117                                 bp = bvec[i];
 3118                                 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
 3119                                         bp->b_dirtyoff;
 3120                                 size = (u_quad_t)(bp->b_dirtyend
 3121                                                   - bp->b_dirtyoff);
 3122                                 retv = nfs_commit(vp, off, (int)size,
 3123                                                   bp->b_wcred, td);
 3124                                 if (retv) break;
 3125                         }
 3126                 }
 3127 
 3128                 if (retv == NFSERR_STALEWRITEVERF)
 3129                         nfs_clearcommit(vp->v_mount);
 3130 
 3131                 /*
 3132                  * Now, either mark the blocks I/O done or mark the
 3133                  * blocks dirty, depending on whether the commit
 3134                  * succeeded.
 3135                  */
 3136                 for (i = 0; i < bvecpos; i++) {
 3137                         bp = bvec[i];
 3138                         bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
 3139                         if (retv) {
 3140                                 /*
 3141                                  * Error, leave B_DELWRI intact
 3142                                  */
 3143                                 vfs_unbusy_pages(bp);
 3144                                 brelse(bp);
 3145                         } else {
 3146                                 /*
 3147                                  * Success, remove B_DELWRI ( bundirty() ).
 3148                                  *
 3149                                  * b_dirtyoff/b_dirtyend seem to be NFS
 3150                                  * specific.  We should probably move that
 3151                                  * into bundirty(). XXX
 3152                                  */
 3153                                 bufobj_wref(bo);
 3154                                 bp->b_flags |= B_ASYNC;
 3155                                 bundirty(bp);
 3156                                 bp->b_flags &= ~B_DONE;
 3157                                 bp->b_ioflags &= ~BIO_ERROR;
 3158                                 bp->b_dirtyoff = bp->b_dirtyend = 0;
 3159                                 bufdone(bp);
 3160                         }
 3161                 }
 3162         }
 3163 
 3164         /*
 3165          * Start/do any write(s) that are required.
 3166          */
 3167 loop:
 3168         BO_LOCK(bo);
 3169         TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 3170                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
 3171                         if (waitfor != MNT_WAIT || passone)
 3172                                 continue;
 3173 
 3174                         error = BUF_TIMELOCK(bp,
 3175                             LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
 3176                             BO_MTX(bo), "nfsfsync", slpflag, slptimeo);
 3177                         if (error == 0) {
 3178                                 BUF_UNLOCK(bp);
 3179                                 goto loop;
 3180                         }
 3181                         if (error == ENOLCK) {
 3182                                 error = 0;
 3183                                 goto loop;
 3184                         }
 3185                         if (nfs_sigintr(nmp, td)) {
 3186                                 error = EINTR;
 3187                                 goto done;
 3188                         }
 3189                         if (slpflag & PCATCH) {
 3190                                 slpflag = 0;
 3191                                 slptimeo = 2 * hz;
 3192                         }
 3193                         goto loop;
 3194                 }
 3195                 if ((bp->b_flags & B_DELWRI) == 0)
 3196                         panic("nfs_fsync: not dirty");
 3197                 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
 3198                         BUF_UNLOCK(bp);
 3199                         continue;
 3200                 }
 3201                 BO_UNLOCK(bo);
 3202                 bremfree(bp);
 3203                 if (passone || !commit)
 3204                     bp->b_flags |= B_ASYNC;
 3205                 else
 3206                     bp->b_flags |= B_ASYNC;
 3207                 bwrite(bp);
 3208                 if (nfs_sigintr(nmp, td)) {
 3209                         error = EINTR;
 3210                         goto done;
 3211                 }
 3212                 goto loop;
 3213         }
 3214         if (passone) {
 3215                 passone = 0;
 3216                 BO_UNLOCK(bo);
 3217                 goto again;
 3218         }
 3219         if (waitfor == MNT_WAIT) {
 3220                 while (bo->bo_numoutput) {
 3221                         error = bufobj_wwait(bo, slpflag, slptimeo);
 3222                         if (error) {
 3223                             BO_UNLOCK(bo);
 3224                             error = nfs_sigintr(nmp, td);
 3225                             if (error)
 3226                                 goto done;
 3227                             if (slpflag & PCATCH) {
 3228                                 slpflag = 0;
 3229                                 slptimeo = 2 * hz;
 3230                             }
 3231                             BO_LOCK(bo);
 3232                         }
 3233                 }
 3234                 if (bo->bo_dirty.bv_cnt != 0 && commit) {
 3235                         BO_UNLOCK(bo);
 3236                         goto loop;
 3237                 }
 3238                 /*
 3239                  * Wait for all the async IO requests to drain
 3240                  */
 3241                 BO_UNLOCK(bo);
 3242                 mtx_lock(&np->n_mtx);
 3243                 while (np->n_directio_asyncwr > 0) {
 3244                         np->n_flag |= NFSYNCWAIT;
 3245                         error = nfs_msleep(td, (caddr_t)&np->n_directio_asyncwr,
 3246                                            &np->n_mtx, slpflag | (PRIBIO + 1), 
 3247                                            "nfsfsync", 0);
 3248                         if (error) {
 3249                                 if (nfs_sigintr(nmp, td)) {
 3250                                         mtx_unlock(&np->n_mtx);
 3251                                         error = EINTR;  
 3252                                         goto done;
 3253                                 }
 3254                         }
 3255                 }
 3256                 mtx_unlock(&np->n_mtx);
 3257         } else
 3258                 BO_UNLOCK(bo);
 3259         mtx_lock(&np->n_mtx);
 3260         if (np->n_flag & NWRITEERR) {
 3261                 error = np->n_error;
 3262                 np->n_flag &= ~NWRITEERR;
 3263         }
 3264         if (commit && bo->bo_dirty.bv_cnt == 0 &&
 3265             bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0)
 3266                 np->n_flag &= ~NMODIFIED;
 3267         mtx_unlock(&np->n_mtx);
 3268 done:
 3269         if (bvec != NULL && bvec != bvec_on_stack)
 3270                 free(bvec, M_TEMP);
 3271         return (error);
 3272 }
 3273 
 3274 /*
 3275  * NFS advisory byte-level locks.
 3276  */
 3277 static int
 3278 nfs_advlock(struct vop_advlock_args *ap)
 3279 {
 3280         struct vnode *vp = ap->a_vp;
 3281         u_quad_t size;
 3282         int error;
 3283 
 3284         error = vn_lock(vp, LK_SHARED);
 3285         if (error)
 3286                 return (error);
 3287         if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
 3288                 size = VTONFS(vp)->n_size;
 3289                 VOP_UNLOCK(vp, 0);
 3290                 error = lf_advlock(ap, &(vp->v_lockf), size);
 3291         } else {
 3292                 if (nfs_advlock_p)
 3293                         error = nfs_advlock_p(ap);
 3294                 else
 3295                         error = ENOLCK;
 3296         }
 3297 
 3298         return (error);
 3299 }
 3300 
 3301 /*
 3302  * NFS advisory byte-level locks.
 3303  */
 3304 static int
 3305 nfs_advlockasync(struct vop_advlockasync_args *ap)
 3306 {
 3307         struct vnode *vp = ap->a_vp;
 3308         u_quad_t size;
 3309         int error;
 3310         
 3311         error = vn_lock(vp, LK_SHARED);
 3312         if (error)
 3313                 return (error);
 3314         if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
 3315                 size = VTONFS(vp)->n_size;
 3316                 VOP_UNLOCK(vp, 0);
 3317                 error = lf_advlockasync(ap, &(vp->v_lockf), size);
 3318         } else {
 3319                 VOP_UNLOCK(vp, 0);
 3320                 error = EOPNOTSUPP;
 3321         }
 3322         return (error);
 3323 }
 3324 
 3325 /*
 3326  * Print out the contents of an nfsnode.
 3327  */
 3328 static int
 3329 nfs_print(struct vop_print_args *ap)
 3330 {
 3331         struct vnode *vp = ap->a_vp;
 3332         struct nfsnode *np = VTONFS(vp);
 3333 
 3334         nfs_printf("\tfileid %ld fsid 0x%x",
 3335            np->n_vattr.va_fileid, np->n_vattr.va_fsid);
 3336         if (vp->v_type == VFIFO)
 3337                 fifo_printinfo(vp);
 3338         printf("\n");
 3339         return (0);
 3340 }
 3341 
 3342 /*
 3343  * This is the "real" nfs::bwrite(struct buf*).
 3344  * We set B_CACHE if this is a VMIO buffer.
 3345  */
 3346 int
 3347 nfs_writebp(struct buf *bp, int force __unused, struct thread *td)
 3348 {
 3349         int s;
 3350         int oldflags = bp->b_flags;
 3351 #if 0
 3352         int retv = 1;
 3353         off_t off;
 3354 #endif
 3355 
 3356         BUF_ASSERT_HELD(bp);
 3357 
 3358         if (bp->b_flags & B_INVAL) {
 3359                 brelse(bp);
 3360                 return(0);
 3361         }
 3362 
 3363         bp->b_flags |= B_CACHE;
 3364 
 3365         /*
 3366          * Undirty the bp.  We will redirty it later if the I/O fails.
 3367          */
 3368 
 3369         s = splbio();
 3370         bundirty(bp);
 3371         bp->b_flags &= ~B_DONE;
 3372         bp->b_ioflags &= ~BIO_ERROR;
 3373         bp->b_iocmd = BIO_WRITE;
 3374 
 3375         bufobj_wref(bp->b_bufobj);
 3376         curthread->td_ru.ru_oublock++;
 3377         splx(s);
 3378 
 3379         /*
 3380          * Note: to avoid loopback deadlocks, we do not
 3381          * assign b_runningbufspace.
 3382          */
 3383         vfs_busy_pages(bp, 1);
 3384 
 3385         BUF_KERNPROC(bp);
 3386         bp->b_iooffset = dbtob(bp->b_blkno);
 3387         bstrategy(bp);
 3388 
 3389         if( (oldflags & B_ASYNC) == 0) {
 3390                 int rtval = bufwait(bp);
 3391 
 3392                 if (oldflags & B_DELWRI) {
 3393                         s = splbio();
 3394                         reassignbuf(bp);
 3395                         splx(s);
 3396                 }
 3397                 brelse(bp);
 3398                 return (rtval);
 3399         }
 3400 
 3401         return (0);
 3402 }
 3403 
 3404 /*
 3405  * nfs special file access vnode op.
 3406  * Essentially just get vattr and then imitate iaccess() since the device is
 3407  * local to the client.
 3408  */
 3409 static int
 3410 nfsspec_access(struct vop_access_args *ap)
 3411 {
 3412         struct vattr *vap;
 3413         struct ucred *cred = ap->a_cred;
 3414         struct vnode *vp = ap->a_vp;
 3415         accmode_t accmode = ap->a_accmode;
 3416         struct vattr vattr;
 3417         int error;
 3418 
 3419         /*
 3420          * Disallow write attempts on filesystems mounted read-only;
 3421          * unless the file is a socket, fifo, or a block or character
 3422          * device resident on the filesystem.
 3423          */
 3424         if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
 3425                 switch (vp->v_type) {
 3426                 case VREG:
 3427                 case VDIR:
 3428                 case VLNK:
 3429                         return (EROFS);
 3430                 default:
 3431                         break;
 3432                 }
 3433         }
 3434         vap = &vattr;
 3435         error = VOP_GETATTR(vp, vap, cred);
 3436         if (error)
 3437                 goto out;
 3438         error  = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid,
 3439                          accmode, cred, NULL);
 3440 out:
 3441         return error;
 3442 }
 3443 
 3444 /*
 3445  * Read wrapper for fifos.
 3446  */
 3447 static int
 3448 nfsfifo_read(struct vop_read_args *ap)
 3449 {
 3450         struct nfsnode *np = VTONFS(ap->a_vp);
 3451         int error;
 3452 
 3453         /*
 3454          * Set access flag.
 3455          */
 3456         mtx_lock(&np->n_mtx);
 3457         np->n_flag |= NACC;
 3458         getnanotime(&np->n_atim);
 3459         mtx_unlock(&np->n_mtx);
 3460         error = fifo_specops.vop_read(ap);
 3461         return error;   
 3462 }
 3463 
 3464 /*
 3465  * Write wrapper for fifos.
 3466  */
 3467 static int
 3468 nfsfifo_write(struct vop_write_args *ap)
 3469 {
 3470         struct nfsnode *np = VTONFS(ap->a_vp);
 3471 
 3472         /*
 3473          * Set update flag.
 3474          */
 3475         mtx_lock(&np->n_mtx);
 3476         np->n_flag |= NUPD;
 3477         getnanotime(&np->n_mtim);
 3478         mtx_unlock(&np->n_mtx);
 3479         return(fifo_specops.vop_write(ap));
 3480 }
 3481 
 3482 /*
 3483  * Close wrapper for fifos.
 3484  *
 3485  * Update the times on the nfsnode then do fifo close.
 3486  */
 3487 static int
 3488 nfsfifo_close(struct vop_close_args *ap)
 3489 {
 3490         struct vnode *vp = ap->a_vp;
 3491         struct nfsnode *np = VTONFS(vp);
 3492         struct vattr vattr;
 3493         struct timespec ts;
 3494 
 3495         mtx_lock(&np->n_mtx);
 3496         if (np->n_flag & (NACC | NUPD)) {
 3497                 getnanotime(&ts);
 3498                 if (np->n_flag & NACC)
 3499                         np->n_atim = ts;
 3500                 if (np->n_flag & NUPD)
 3501                         np->n_mtim = ts;
 3502                 np->n_flag |= NCHG;
 3503                 if (vrefcnt(vp) == 1 &&
 3504                     (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
 3505                         VATTR_NULL(&vattr);
 3506                         if (np->n_flag & NACC)
 3507                                 vattr.va_atime = np->n_atim;
 3508                         if (np->n_flag & NUPD)
 3509                                 vattr.va_mtime = np->n_mtim;
 3510                         mtx_unlock(&np->n_mtx);
 3511                         (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
 3512                         goto out;
 3513                 }
 3514         }
 3515         mtx_unlock(&np->n_mtx);
 3516 out:
 3517         return (fifo_specops.vop_close(ap));
 3518 }
 3519 
 3520 /*
 3521  * Just call nfs_writebp() with the force argument set to 1.
 3522  *
 3523  * NOTE: B_DONE may or may not be set in a_bp on call.
 3524  */
 3525 static int
 3526 nfs_bwrite(struct buf *bp)
 3527 {
 3528 
 3529         return (nfs_writebp(bp, 1, curthread));
 3530 }
 3531 
 3532 struct buf_ops buf_ops_nfs = {
 3533         .bop_name       =       "buf_ops_nfs",
 3534         .bop_write      =       nfs_bwrite,
 3535         .bop_strategy   =       bufstrategy,
 3536         .bop_sync       =       bufsync,
 3537         .bop_bdflush    =       bufbdflush,
 3538 };

Cache object: ec3dddef9b5f7bbd6157fe9c25fd507d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.