The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/nfsclient/nfs_vnops.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * Rick Macklem at The University of Guelph.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/8.0/sys/nfsclient/nfs_vnops.c 198424 2009-10-23 19:52:29Z jhb $");
   37 
   38 /*
   39  * vnode op calls for Sun NFS version 2 and 3
   40  */
   41 
   42 #include "opt_inet.h"
   43 #include "opt_kdtrace.h"
   44 
   45 #include <sys/param.h>
   46 #include <sys/kernel.h>
   47 #include <sys/systm.h>
   48 #include <sys/resourcevar.h>
   49 #include <sys/proc.h>
   50 #include <sys/mount.h>
   51 #include <sys/bio.h>
   52 #include <sys/buf.h>
   53 #include <sys/jail.h>
   54 #include <sys/malloc.h>
   55 #include <sys/mbuf.h>
   56 #include <sys/namei.h>
   57 #include <sys/socket.h>
   58 #include <sys/vnode.h>
   59 #include <sys/dirent.h>
   60 #include <sys/fcntl.h>
   61 #include <sys/lockf.h>
   62 #include <sys/stat.h>
   63 #include <sys/sysctl.h>
   64 #include <sys/signalvar.h>
   65 
   66 #include <vm/vm.h>
   67 #include <vm/vm_object.h>
   68 #include <vm/vm_extern.h>
   69 #include <vm/vm_object.h>
   70 
   71 #include <fs/fifofs/fifo.h>
   72 
   73 #include <nfs/nfsproto.h>
   74 #include <nfsclient/nfs.h>
   75 #include <nfsclient/nfsnode.h>
   76 #include <nfsclient/nfsmount.h>
   77 #include <nfsclient/nfs_kdtrace.h>
   78 #include <nfsclient/nfs_lock.h>
   79 #include <nfs/xdr_subs.h>
   80 #include <nfsclient/nfsm_subs.h>
   81 
   82 #include <net/if.h>
   83 #include <netinet/in.h>
   84 #include <netinet/in_var.h>
   85 
   86 #include <machine/stdarg.h>
   87 
   88 #ifdef KDTRACE_HOOKS
   89 #include <sys/dtrace_bsd.h>
   90 
   91 dtrace_nfsclient_accesscache_flush_probe_func_t
   92     dtrace_nfsclient_accesscache_flush_done_probe;
   93 uint32_t nfsclient_accesscache_flush_done_id;
   94 
   95 dtrace_nfsclient_accesscache_get_probe_func_t
   96     dtrace_nfsclient_accesscache_get_hit_probe,
   97     dtrace_nfsclient_accesscache_get_miss_probe;
   98 uint32_t nfsclient_accesscache_get_hit_id;
   99 uint32_t nfsclient_accesscache_get_miss_id;
  100 
  101 dtrace_nfsclient_accesscache_load_probe_func_t
  102     dtrace_nfsclient_accesscache_load_done_probe;
  103 uint32_t nfsclient_accesscache_load_done_id;
  104 #endif /* !KDTRACE_HOOKS */
  105 
  106 /* Defs */
  107 #define TRUE    1
  108 #define FALSE   0
  109 
  110 /*
  111  * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
  112  * calls are not in getblk() and brelse() so that they would not be necessary
  113  * here.
  114  */
  115 #ifndef B_VMIO
  116 #define vfs_busy_pages(bp, f)
  117 #endif
  118 
  119 static vop_read_t       nfsfifo_read;
  120 static vop_write_t      nfsfifo_write;
  121 static vop_close_t      nfsfifo_close;
  122 static int      nfs_flush(struct vnode *, int, int);
  123 static int      nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *);
  124 static vop_lookup_t     nfs_lookup;
  125 static vop_create_t     nfs_create;
  126 static vop_mknod_t      nfs_mknod;
  127 static vop_open_t       nfs_open;
  128 static vop_close_t      nfs_close;
  129 static vop_access_t     nfs_access;
  130 static vop_getattr_t    nfs_getattr;
  131 static vop_setattr_t    nfs_setattr;
  132 static vop_read_t       nfs_read;
  133 static vop_fsync_t      nfs_fsync;
  134 static vop_remove_t     nfs_remove;
  135 static vop_link_t       nfs_link;
  136 static vop_rename_t     nfs_rename;
  137 static vop_mkdir_t      nfs_mkdir;
  138 static vop_rmdir_t      nfs_rmdir;
  139 static vop_symlink_t    nfs_symlink;
  140 static vop_readdir_t    nfs_readdir;
  141 static vop_strategy_t   nfs_strategy;
  142 static  int     nfs_lookitup(struct vnode *, const char *, int,
  143                     struct ucred *, struct thread *, struct nfsnode **);
  144 static  int     nfs_sillyrename(struct vnode *, struct vnode *,
  145                     struct componentname *);
  146 static vop_access_t     nfsspec_access;
  147 static vop_readlink_t   nfs_readlink;
  148 static vop_print_t      nfs_print;
  149 static vop_advlock_t    nfs_advlock;
  150 static vop_advlockasync_t nfs_advlockasync;
  151 
  152 /*
  153  * Global vfs data structures for nfs
  154  */
  155 struct vop_vector nfs_vnodeops = {
  156         .vop_default =          &default_vnodeops,
  157         .vop_access =           nfs_access,
  158         .vop_advlock =          nfs_advlock,
  159         .vop_advlockasync =     nfs_advlockasync,
  160         .vop_close =            nfs_close,
  161         .vop_create =           nfs_create,
  162         .vop_fsync =            nfs_fsync,
  163         .vop_getattr =          nfs_getattr,
  164         .vop_getpages =         nfs_getpages,
  165         .vop_putpages =         nfs_putpages,
  166         .vop_inactive =         nfs_inactive,
  167         .vop_link =             nfs_link,
  168         .vop_lookup =           nfs_lookup,
  169         .vop_mkdir =            nfs_mkdir,
  170         .vop_mknod =            nfs_mknod,
  171         .vop_open =             nfs_open,
  172         .vop_print =            nfs_print,
  173         .vop_read =             nfs_read,
  174         .vop_readdir =          nfs_readdir,
  175         .vop_readlink =         nfs_readlink,
  176         .vop_reclaim =          nfs_reclaim,
  177         .vop_remove =           nfs_remove,
  178         .vop_rename =           nfs_rename,
  179         .vop_rmdir =            nfs_rmdir,
  180         .vop_setattr =          nfs_setattr,
  181         .vop_strategy =         nfs_strategy,
  182         .vop_symlink =          nfs_symlink,
  183         .vop_write =            nfs_write,
  184 };
  185 
  186 struct vop_vector nfs_fifoops = {
  187         .vop_default =          &fifo_specops,
  188         .vop_access =           nfsspec_access,
  189         .vop_close =            nfsfifo_close,
  190         .vop_fsync =            nfs_fsync,
  191         .vop_getattr =          nfs_getattr,
  192         .vop_inactive =         nfs_inactive,
  193         .vop_print =            nfs_print,
  194         .vop_read =             nfsfifo_read,
  195         .vop_reclaim =          nfs_reclaim,
  196         .vop_setattr =          nfs_setattr,
  197         .vop_write =            nfsfifo_write,
  198 };
  199 
  200 static int      nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp,
  201                              struct componentname *cnp, struct vattr *vap);
  202 static int      nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
  203                               struct ucred *cred, struct thread *td);
  204 static int      nfs_renamerpc(struct vnode *fdvp, const char *fnameptr,
  205                               int fnamelen, struct vnode *tdvp,
  206                               const char *tnameptr, int tnamelen,
  207                               struct ucred *cred, struct thread *td);
  208 static int      nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
  209                              struct sillyrename *sp);
  210 
  211 /*
  212  * Global variables
  213  */
  214 struct mtx      nfs_iod_mtx;
  215 struct proc     *nfs_iodwant[NFS_MAXASYNCDAEMON];
  216 struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
  217 int              nfs_numasync = 0;
  218 vop_advlock_t   *nfs_advlock_p = nfs_dolock;
  219 vop_reclaim_t   *nfs_reclaim_p = NULL;
  220 #define DIRHDSIZ        (sizeof (struct dirent) - (MAXNAMLEN + 1))
  221 
  222 SYSCTL_DECL(_vfs_nfs);
  223 
  224 static int      nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
  225 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
  226            &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
  227 
  228 static int      nfs_prime_access_cache = 0;
  229 SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
  230            &nfs_prime_access_cache, 0,
  231            "Prime NFS ACCESS cache when fetching attributes");
  232 
  233 static int      nfsv3_commit_on_close = 0;
  234 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
  235            &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
  236 
  237 static int      nfs_clean_pages_on_close = 1;
  238 SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
  239            &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close");
  240 
  241 int nfs_directio_enable = 0;
  242 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
  243            &nfs_directio_enable, 0, "Enable NFS directio");
  244 
  245 /*
  246  * This sysctl allows other processes to mmap a file that has been opened
  247  * O_DIRECT by a process.  In general, having processes mmap the file while
  248  * Direct IO is in progress can lead to Data Inconsistencies.  But, we allow
  249  * this by default to prevent DoS attacks - to prevent a malicious user from
  250  * opening up files O_DIRECT preventing other users from mmap'ing these
  251  * files.  "Protected" environments where stricter consistency guarantees are
  252  * required can disable this knob.  The process that opened the file O_DIRECT
  253  * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not
  254  * meaningful.
  255  */
  256 int nfs_directio_allow_mmap = 1;
  257 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
  258            &nfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
  259 
  260 #if 0
  261 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
  262            &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
  263 
  264 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
  265            &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
  266 #endif
  267 
  268 #define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY          \
  269                          | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE     \
  270                          | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
  271 
  272 /*
  273  * SMP Locking Note :
  274  * The list of locks after the description of the lock is the ordering
  275  * of other locks acquired with the lock held.
  276  * np->n_mtx : Protects the fields in the nfsnode.
  277        VM Object Lock
  278        VI_MTX (acquired indirectly)
  279  * nmp->nm_mtx : Protects the fields in the nfsmount.
  280        rep->r_mtx
  281  * nfs_iod_mtx : Global lock, protects shared nfsiod state.
  282  * nfs_reqq_mtx : Global lock, protects the nfs_reqq list.
  283        nmp->nm_mtx
  284        rep->r_mtx
  285  * rep->r_mtx : Protects the fields in an nfsreq.
  286  */
  287 
  288 static int
  289 nfs3_access_otw(struct vnode *vp, int wmode, struct thread *td,
  290     struct ucred *cred, uint32_t *retmode)
  291 {
  292         const int v3 = 1;
  293         u_int32_t *tl;
  294         int error = 0, attrflag, i, lrupos;
  295 
  296         struct mbuf *mreq, *mrep, *md, *mb;
  297         caddr_t bpos, dpos;
  298         u_int32_t rmode;
  299         struct nfsnode *np = VTONFS(vp);
  300 
  301         nfsstats.rpccnt[NFSPROC_ACCESS]++;
  302         mreq = nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
  303         mb = mreq;
  304         bpos = mtod(mb, caddr_t);
  305         nfsm_fhtom(vp, v3);
  306         tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
  307         *tl = txdr_unsigned(wmode);
  308         nfsm_request(vp, NFSPROC_ACCESS, td, cred);
  309         nfsm_postop_attr(vp, attrflag);
  310         if (!error) {
  311                 lrupos = 0;
  312                 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
  313                 rmode = fxdr_unsigned(u_int32_t, *tl);
  314                 mtx_lock(&np->n_mtx);
  315                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
  316                         if (np->n_accesscache[i].uid == cred->cr_uid) {
  317                                 np->n_accesscache[i].mode = rmode;
  318                                 np->n_accesscache[i].stamp = time_second;
  319                                 break;
  320                         }
  321                         if (i > 0 && np->n_accesscache[i].stamp <
  322                             np->n_accesscache[lrupos].stamp)
  323                                 lrupos = i;
  324                 }
  325                 if (i == NFS_ACCESSCACHESIZE) {
  326                         np->n_accesscache[lrupos].uid = cred->cr_uid;
  327                         np->n_accesscache[lrupos].mode = rmode;
  328                         np->n_accesscache[lrupos].stamp = time_second;
  329                 }
  330                 mtx_unlock(&np->n_mtx);
  331                 if (retmode != NULL)
  332                         *retmode = rmode;
  333                 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0);
  334         }
  335         m_freem(mrep);
  336 nfsmout:
  337 #ifdef KDTRACE_HOOKS
  338         if (error) {
  339                 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0,
  340                     error);
  341         }
  342 #endif
  343         return (error);
  344 }
  345 
  346 /*
  347  * nfs access vnode op.
  348  * For nfs version 2, just return ok. File accesses may fail later.
  349  * For nfs version 3, use the access rpc to check accessibility. If file modes
  350  * are changed on the server, accesses might still fail later.
  351  */
  352 static int
  353 nfs_access(struct vop_access_args *ap)
  354 {
  355         struct vnode *vp = ap->a_vp;
  356         int error = 0, i, gotahit;
  357         u_int32_t mode, rmode, wmode;
  358         int v3 = NFS_ISV3(vp);
  359         struct nfsnode *np = VTONFS(vp);
  360 
  361         /*
  362          * Disallow write attempts on filesystems mounted read-only;
  363          * unless the file is a socket, fifo, or a block or character
  364          * device resident on the filesystem.
  365          */
  366         if ((ap->a_accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
  367                 switch (vp->v_type) {
  368                 case VREG:
  369                 case VDIR:
  370                 case VLNK:
  371                         return (EROFS);
  372                 default:
  373                         break;
  374                 }
  375         }
  376         /*
  377          * For nfs v3, check to see if we have done this recently, and if
  378          * so return our cached result instead of making an ACCESS call.
  379          * If not, do an access rpc, otherwise you are stuck emulating
  380          * ufs_access() locally using the vattr. This may not be correct,
  381          * since the server may apply other access criteria such as
  382          * client uid-->server uid mapping that we do not know about.
  383          */
  384         if (v3) {
  385                 if (ap->a_accmode & VREAD)
  386                         mode = NFSV3ACCESS_READ;
  387                 else
  388                         mode = 0;
  389                 if (vp->v_type != VDIR) {
  390                         if (ap->a_accmode & VWRITE)
  391                                 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
  392                         if (ap->a_accmode & VEXEC)
  393                                 mode |= NFSV3ACCESS_EXECUTE;
  394                 } else {
  395                         if (ap->a_accmode & VWRITE)
  396                                 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
  397                                          NFSV3ACCESS_DELETE);
  398                         if (ap->a_accmode & VEXEC)
  399                                 mode |= NFSV3ACCESS_LOOKUP;
  400                 }
  401                 /* XXX safety belt, only make blanket request if caching */
  402                 if (nfsaccess_cache_timeout > 0) {
  403                         wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
  404                                 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
  405                                 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
  406                 } else {
  407                         wmode = mode;
  408                 }
  409 
  410                 /*
  411                  * Does our cached result allow us to give a definite yes to
  412                  * this request?
  413                  */
  414                 gotahit = 0;
  415                 mtx_lock(&np->n_mtx);
  416                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
  417                         if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) {
  418                                 if (time_second < (np->n_accesscache[i].stamp +
  419                                     nfsaccess_cache_timeout) &&
  420                                     (np->n_accesscache[i].mode & mode) == mode) {
  421                                         nfsstats.accesscache_hits++;
  422                                         gotahit = 1;
  423                                 }
  424                                 break;
  425                         }
  426                 }
  427                 mtx_unlock(&np->n_mtx);
  428 #ifdef KDTRACE_HOOKS
  429                 if (gotahit)
  430                         KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp,
  431                             ap->a_cred->cr_uid, mode);
  432                 else
  433                         KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp,
  434                             ap->a_cred->cr_uid, mode);
  435 #endif
  436                 if (gotahit == 0) {
  437                         /*
  438                          * Either a no, or a don't know.  Go to the wire.
  439                          */
  440                         nfsstats.accesscache_misses++;
  441                         error = nfs3_access_otw(vp, wmode, ap->a_td, ap->a_cred,
  442                             &rmode);
  443                         if (!error) {
  444                                 if ((rmode & mode) != mode)
  445                                         error = EACCES;
  446                         }
  447                 }
  448                 return (error);
  449         } else {
  450                 if ((error = nfsspec_access(ap)) != 0) {
  451                         return (error);
  452                 }
  453                 /*
  454                  * Attempt to prevent a mapped root from accessing a file
  455                  * which it shouldn't.  We try to read a byte from the file
  456                  * if the user is root and the file is not zero length.
  457                  * After calling nfsspec_access, we should have the correct
  458                  * file size cached.
  459                  */
  460                 mtx_lock(&np->n_mtx);
  461                 if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD)
  462                     && VTONFS(vp)->n_size > 0) {
  463                         struct iovec aiov;
  464                         struct uio auio;
  465                         char buf[1];
  466 
  467                         mtx_unlock(&np->n_mtx);
  468                         aiov.iov_base = buf;
  469                         aiov.iov_len = 1;
  470                         auio.uio_iov = &aiov;
  471                         auio.uio_iovcnt = 1;
  472                         auio.uio_offset = 0;
  473                         auio.uio_resid = 1;
  474                         auio.uio_segflg = UIO_SYSSPACE;
  475                         auio.uio_rw = UIO_READ;
  476                         auio.uio_td = ap->a_td;
  477 
  478                         if (vp->v_type == VREG)
  479                                 error = nfs_readrpc(vp, &auio, ap->a_cred);
  480                         else if (vp->v_type == VDIR) {
  481                                 char* bp;
  482                                 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
  483                                 aiov.iov_base = bp;
  484                                 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
  485                                 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
  486                                 free(bp, M_TEMP);
  487                         } else if (vp->v_type == VLNK)
  488                                 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
  489                         else
  490                                 error = EACCES;
  491                 } else
  492                         mtx_unlock(&np->n_mtx);
  493                 return (error);
  494         }
  495 }
  496 
  497 int nfs_otw_getattr_avoid = 0;
  498 
  499 /*
  500  * nfs open vnode op
  501  * Check to see if the type is ok
  502  * and that deletion is not in progress.
  503  * For paged in text files, you will need to flush the page cache
  504  * if consistency is lost.
  505  */
  506 /* ARGSUSED */
  507 static int
  508 nfs_open(struct vop_open_args *ap)
  509 {
  510         struct vnode *vp = ap->a_vp;
  511         struct nfsnode *np = VTONFS(vp);
  512         struct vattr vattr;
  513         int error;
  514         int fmode = ap->a_mode;
  515 
  516         if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK)
  517                 return (EOPNOTSUPP);
  518 
  519         /*
  520          * Get a valid lease. If cached data is stale, flush it.
  521          */
  522         mtx_lock(&np->n_mtx);
  523         if (np->n_flag & NMODIFIED) {
  524                 mtx_unlock(&np->n_mtx);                 
  525                 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  526                 if (error == EINTR || error == EIO)
  527                         return (error);
  528                 np->n_attrstamp = 0;
  529                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
  530                 if (vp->v_type == VDIR)
  531                         np->n_direofoffset = 0;
  532                 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
  533                 if (error)
  534                         return (error);
  535                 mtx_lock(&np->n_mtx);
  536                 np->n_mtime = vattr.va_mtime;
  537                 mtx_unlock(&np->n_mtx);
  538         } else {
  539                 struct thread *td = curthread;
  540 
  541                 if (np->n_ac_ts_syscalls != td->td_syscalls ||
  542                     np->n_ac_ts_tid != td->td_tid || 
  543                     td->td_proc == NULL ||
  544                     np->n_ac_ts_pid != td->td_proc->p_pid) {
  545                         np->n_attrstamp = 0;
  546                         KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
  547                 }
  548                 mtx_unlock(&np->n_mtx);                                         
  549                 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
  550                 if (error)
  551                         return (error);
  552                 mtx_lock(&np->n_mtx);
  553                 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
  554                         if (vp->v_type == VDIR)
  555                                 np->n_direofoffset = 0;
  556                         mtx_unlock(&np->n_mtx);
  557                         error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  558                         if (error == EINTR || error == EIO) {
  559                                 return (error);
  560                         }
  561                         mtx_lock(&np->n_mtx);
  562                         np->n_mtime = vattr.va_mtime;
  563                 }
  564                 mtx_unlock(&np->n_mtx);
  565         }
  566         /*
  567          * If the object has >= 1 O_DIRECT active opens, we disable caching.
  568          */
  569         if (nfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
  570                 if (np->n_directio_opens == 0) {
  571                         error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  572                         if (error)
  573                                 return (error);
  574                         mtx_lock(&np->n_mtx);
  575                         np->n_flag |= NNONCACHE;
  576                         mtx_unlock(&np->n_mtx);
  577                 }
  578                 np->n_directio_opens++;
  579         }
  580         vnode_create_vobject(vp, vattr.va_size, ap->a_td);
  581         return (0);
  582 }
  583 
  584 /*
  585  * nfs close vnode op
  586  * What an NFS client should do upon close after writing is a debatable issue.
  587  * Most NFS clients push delayed writes to the server upon close, basically for
  588  * two reasons:
  589  * 1 - So that any write errors may be reported back to the client process
  590  *     doing the close system call. By far the two most likely errors are
  591  *     NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
  592  * 2 - To put a worst case upper bound on cache inconsistency between
  593  *     multiple clients for the file.
  594  * There is also a consistency problem for Version 2 of the protocol w.r.t.
  595  * not being able to tell if other clients are writing a file concurrently,
  596  * since there is no way of knowing if the changed modify time in the reply
  597  * is only due to the write for this client.
  598  * (NFS Version 3 provides weak cache consistency data in the reply that
  599  *  should be sufficient to detect and handle this case.)
  600  *
  601  * The current code does the following:
  602  * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
  603  * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
  604  *                     or commit them (this satisfies 1 and 2 except for the
  605  *                     case where the server crashes after this close but
  606  *                     before the commit RPC, which is felt to be "good
  607  *                     enough". Changing the last argument to nfs_flush() to
  608  *                     a 1 would force a commit operation, if it is felt a
  609  *                     commit is necessary now.
  610  */
  611 /* ARGSUSED */
  612 static int
  613 nfs_close(struct vop_close_args *ap)
  614 {
  615         struct vnode *vp = ap->a_vp;
  616         struct nfsnode *np = VTONFS(vp);
  617         int error = 0;
  618         int fmode = ap->a_fflag;
  619 
  620         if (vp->v_type == VREG) {
  621             /*
  622              * Examine and clean dirty pages, regardless of NMODIFIED.
  623              * This closes a major hole in close-to-open consistency.
  624              * We want to push out all dirty pages (and buffers) on
  625              * close, regardless of whether they were dirtied by
  626              * mmap'ed writes or via write().
  627              */
  628             if (nfs_clean_pages_on_close && vp->v_object) {
  629                 VM_OBJECT_LOCK(vp->v_object);
  630                 vm_object_page_clean(vp->v_object, 0, 0, 0);
  631                 VM_OBJECT_UNLOCK(vp->v_object);
  632             }
  633             mtx_lock(&np->n_mtx);
  634             if (np->n_flag & NMODIFIED) {
  635                 mtx_unlock(&np->n_mtx);
  636                 if (NFS_ISV3(vp)) {
  637                     /*
  638                      * Under NFSv3 we have dirty buffers to dispose of.  We
  639                      * must flush them to the NFS server.  We have the option
  640                      * of waiting all the way through the commit rpc or just
  641                      * waiting for the initial write.  The default is to only
  642                      * wait through the initial write so the data is in the
  643                      * server's cache, which is roughly similar to the state
  644                      * a standard disk subsystem leaves the file in on close().
  645                      *
  646                      * We cannot clear the NMODIFIED bit in np->n_flag due to
  647                      * potential races with other processes, and certainly
  648                      * cannot clear it if we don't commit.
  649                      */
  650                     int cm = nfsv3_commit_on_close ? 1 : 0;
  651                     error = nfs_flush(vp, MNT_WAIT, cm);
  652                     /* np->n_flag &= ~NMODIFIED; */
  653                 } else
  654                     error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
  655                 mtx_lock(&np->n_mtx);
  656             }
  657             if (np->n_flag & NWRITEERR) {
  658                 np->n_flag &= ~NWRITEERR;
  659                 error = np->n_error;
  660             }
  661             mtx_unlock(&np->n_mtx);
  662         }
  663         if (nfs_directio_enable)
  664                 KASSERT((np->n_directio_asyncwr == 0),
  665                         ("nfs_close: dirty unflushed (%d) directio buffers\n",
  666                          np->n_directio_asyncwr));
  667         if (nfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
  668                 mtx_lock(&np->n_mtx);
  669                 KASSERT((np->n_directio_opens > 0), 
  670                         ("nfs_close: unexpectedly value (0) of n_directio_opens\n"));
  671                 np->n_directio_opens--;
  672                 if (np->n_directio_opens == 0)
  673                         np->n_flag &= ~NNONCACHE;
  674                 mtx_unlock(&np->n_mtx);
  675         }
  676         return (error);
  677 }
  678 
  679 /*
  680  * nfs getattr call from vfs.
  681  */
  682 static int
  683 nfs_getattr(struct vop_getattr_args *ap)
  684 {
  685         struct vnode *vp = ap->a_vp;
  686         struct nfsnode *np = VTONFS(vp);
  687         struct thread *td = curthread;
  688         struct vattr *vap = ap->a_vap;
  689         struct vattr vattr;
  690         caddr_t bpos, dpos;
  691         int error = 0;
  692         struct mbuf *mreq, *mrep, *md, *mb;
  693         int v3 = NFS_ISV3(vp);
  694 
  695         /*
  696          * Update local times for special files.
  697          */
  698         mtx_lock(&np->n_mtx);
  699         if (np->n_flag & (NACC | NUPD))
  700                 np->n_flag |= NCHG;
  701         mtx_unlock(&np->n_mtx);
  702         /*
  703          * First look in the cache.
  704          */
  705         if (nfs_getattrcache(vp, &vattr) == 0)
  706                 goto nfsmout;
  707         if (v3 && nfs_prime_access_cache && nfsaccess_cache_timeout > 0) {
  708                 nfsstats.accesscache_misses++;
  709                 nfs3_access_otw(vp, NFSV3ACCESS_ALL, td, ap->a_cred, NULL);
  710                 if (nfs_getattrcache(vp, &vattr) == 0)
  711                         goto nfsmout;
  712         }
  713         nfsstats.rpccnt[NFSPROC_GETATTR]++;
  714         mreq = nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
  715         mb = mreq;
  716         bpos = mtod(mb, caddr_t);
  717         nfsm_fhtom(vp, v3);
  718         nfsm_request(vp, NFSPROC_GETATTR, td, ap->a_cred);
  719         if (!error) {
  720                 nfsm_loadattr(vp, &vattr);
  721         }
  722         m_freem(mrep);
  723 nfsmout:
  724         vap->va_type = vattr.va_type;
  725         vap->va_mode = vattr.va_mode;
  726         vap->va_nlink = vattr.va_nlink;
  727         vap->va_uid = vattr.va_uid;
  728         vap->va_gid = vattr.va_gid;
  729         vap->va_fsid = vattr.va_fsid;
  730         vap->va_fileid = vattr.va_fileid;
  731         vap->va_size = vattr.va_size;
  732         vap->va_blocksize = vattr.va_blocksize;
  733         vap->va_atime = vattr.va_atime;
  734         vap->va_mtime = vattr.va_mtime;
  735         vap->va_ctime = vattr.va_ctime;
  736         vap->va_gen = vattr.va_gen;
  737         vap->va_flags = vattr.va_flags;
  738         vap->va_rdev = vattr.va_rdev;
  739         vap->va_bytes = vattr.va_bytes;
  740         vap->va_filerev = vattr.va_filerev;
  741 
  742         return (error);
  743 }
  744 
  745 /*
  746  * nfs setattr call.
  747  */
  748 static int
  749 nfs_setattr(struct vop_setattr_args *ap)
  750 {
  751         struct vnode *vp = ap->a_vp;
  752         struct nfsnode *np = VTONFS(vp);
  753         struct vattr *vap = ap->a_vap;
  754         struct thread *td = curthread;
  755         int error = 0;
  756         u_quad_t tsize;
  757 
  758 #ifndef nolint
  759         tsize = (u_quad_t)0;
  760 #endif
  761 
  762         /*
  763          * Setting of flags is not supported.
  764          */
  765         if (vap->va_flags != VNOVAL)
  766                 return (EOPNOTSUPP);
  767 
  768         /*
  769          * Disallow write attempts if the filesystem is mounted read-only.
  770          */
  771         if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
  772             vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
  773             vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
  774             (vp->v_mount->mnt_flag & MNT_RDONLY)) {
  775                 error = EROFS;
  776                 goto out;
  777         }
  778         if (vap->va_size != VNOVAL) {
  779                 switch (vp->v_type) {
  780                 case VDIR:
  781                         return (EISDIR);
  782                 case VCHR:
  783                 case VBLK:
  784                 case VSOCK:
  785                 case VFIFO:
  786                         if (vap->va_mtime.tv_sec == VNOVAL &&
  787                             vap->va_atime.tv_sec == VNOVAL &&
  788                             vap->va_mode == (mode_t)VNOVAL &&
  789                             vap->va_uid == (uid_t)VNOVAL &&
  790                             vap->va_gid == (gid_t)VNOVAL)
  791                                 return (0);             
  792                         vap->va_size = VNOVAL;
  793                         break;
  794                 default:
  795                         /*
  796                          * Disallow write attempts if the filesystem is
  797                          * mounted read-only.
  798                          */
  799                         if (vp->v_mount->mnt_flag & MNT_RDONLY)
  800                                 return (EROFS);
  801                         /*
  802                          *  We run vnode_pager_setsize() early (why?),
  803                          * we must set np->n_size now to avoid vinvalbuf
  804                          * V_SAVE races that might setsize a lower
  805                          * value.
  806                          */
  807                         mtx_lock(&np->n_mtx);
  808                         tsize = np->n_size;
  809                         mtx_unlock(&np->n_mtx);
  810                         error = nfs_meta_setsize(vp, ap->a_cred, td,
  811                             vap->va_size);
  812                         mtx_lock(&np->n_mtx);
  813                         if (np->n_flag & NMODIFIED) {
  814                             tsize = np->n_size;
  815                             mtx_unlock(&np->n_mtx);
  816                             if (vap->va_size == 0)
  817                                 error = nfs_vinvalbuf(vp, 0, td, 1);
  818                             else
  819                                 error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
  820                             if (error) {
  821                                 vnode_pager_setsize(vp, tsize);
  822                                 goto out;
  823                             }
  824                         } else
  825                             mtx_unlock(&np->n_mtx);
  826                         /*
  827                          * np->n_size has already been set to vap->va_size
  828                          * in nfs_meta_setsize(). We must set it again since
  829                          * nfs_loadattrcache() could be called through
  830                          * nfs_meta_setsize() and could modify np->n_size.
  831                          */
  832                         mtx_lock(&np->n_mtx);
  833                         np->n_vattr.va_size = np->n_size = vap->va_size;
  834                         mtx_unlock(&np->n_mtx);
  835                 };
  836         } else {
  837                 mtx_lock(&np->n_mtx);
  838                 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && 
  839                     (np->n_flag & NMODIFIED) && vp->v_type == VREG) {
  840                         mtx_unlock(&np->n_mtx);
  841                         if ((error = nfs_vinvalbuf(vp, V_SAVE, td, 1)) != 0 &&
  842                             (error == EINTR || error == EIO))
  843                                 return error;
  844                 } else
  845                         mtx_unlock(&np->n_mtx);
  846         }
  847         error = nfs_setattrrpc(vp, vap, ap->a_cred);
  848         if (error && vap->va_size != VNOVAL) {
  849                 mtx_lock(&np->n_mtx);
  850                 np->n_size = np->n_vattr.va_size = tsize;
  851                 vnode_pager_setsize(vp, tsize);
  852                 mtx_unlock(&np->n_mtx);
  853         }
  854 out:
  855         return (error);
  856 }
  857 
  858 /*
  859  * Do an nfs setattr rpc.
  860  */
  861 static int
  862 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred)
  863 {
  864         struct nfsv2_sattr *sp;
  865         struct nfsnode *np = VTONFS(vp);
  866         caddr_t bpos, dpos;
  867         u_int32_t *tl;
  868         int error = 0, i, wccflag = NFSV3_WCCRATTR;
  869         struct mbuf *mreq, *mrep, *md, *mb;
  870         int v3 = NFS_ISV3(vp);
  871 
  872         nfsstats.rpccnt[NFSPROC_SETATTR]++;
  873         mreq = nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
  874         mb = mreq;
  875         bpos = mtod(mb, caddr_t);
  876         nfsm_fhtom(vp, v3);
  877         if (v3) {
  878                 nfsm_v3attrbuild(vap, TRUE);
  879                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
  880                 *tl = nfs_false;
  881         } else {
  882                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
  883                 if (vap->va_mode == (mode_t)VNOVAL)
  884                         sp->sa_mode = nfs_xdrneg1;
  885                 else
  886                         sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
  887                 if (vap->va_uid == (uid_t)VNOVAL)
  888                         sp->sa_uid = nfs_xdrneg1;
  889                 else
  890                         sp->sa_uid = txdr_unsigned(vap->va_uid);
  891                 if (vap->va_gid == (gid_t)VNOVAL)
  892                         sp->sa_gid = nfs_xdrneg1;
  893                 else
  894                         sp->sa_gid = txdr_unsigned(vap->va_gid);
  895                 sp->sa_size = txdr_unsigned(vap->va_size);
  896                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
  897                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
  898         }
  899         nfsm_request(vp, NFSPROC_SETATTR, curthread, cred);
  900         if (v3) {
  901                 mtx_lock(&np->n_mtx);
  902                 for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
  903                         np->n_accesscache[i].stamp = 0;
  904                 mtx_unlock(&np->n_mtx);
  905                 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
  906                 nfsm_wcc_data(vp, wccflag);
  907         } else
  908                 nfsm_loadattr(vp, NULL);
  909         m_freem(mrep);
  910 nfsmout:
  911         return (error);
  912 }
  913 
  914 /*
  915  * nfs lookup call, one step at a time...
  916  * First look in cache
  917  * If not found, unlock the directory nfsnode and do the rpc
  918  */
  919 static int
  920 nfs_lookup(struct vop_lookup_args *ap)
  921 {
  922         struct componentname *cnp = ap->a_cnp;
  923         struct vnode *dvp = ap->a_dvp;
  924         struct vnode **vpp = ap->a_vpp;
  925         struct mount *mp = dvp->v_mount;
  926         struct vattr vattr;
  927         time_t dmtime;
  928         int flags = cnp->cn_flags;
  929         struct vnode *newvp;
  930         struct nfsmount *nmp;
  931         caddr_t bpos, dpos;
  932         struct mbuf *mreq, *mrep, *md, *mb;
  933         long len;
  934         nfsfh_t *fhp;
  935         struct nfsnode *np;
  936         int error = 0, attrflag, fhsize, ltype;
  937         int v3 = NFS_ISV3(dvp);
  938         struct thread *td = cnp->cn_thread;
  939 
  940         *vpp = NULLVP;
  941         if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) &&
  942             (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
  943                 return (EROFS);
  944         if (dvp->v_type != VDIR)
  945                 return (ENOTDIR);
  946         nmp = VFSTONFS(mp);
  947         np = VTONFS(dvp);
  948         if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) {
  949                 *vpp = NULLVP;
  950                 return (error);
  951         }
  952         error = cache_lookup(dvp, vpp, cnp);
  953         if (error > 0 && error != ENOENT)
  954                 return (error);
  955         if (error == -1) {
  956                 /*
  957                  * We only accept a positive hit in the cache if the
  958                  * change time of the file matches our cached copy.
  959                  * Otherwise, we discard the cache entry and fallback
  960                  * to doing a lookup RPC.
  961                  */
  962                 newvp = *vpp;
  963                 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred)
  964                     && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
  965                         nfsstats.lookupcache_hits++;
  966                         if (cnp->cn_nameiop != LOOKUP &&
  967                             (flags & ISLASTCN))
  968                                 cnp->cn_flags |= SAVENAME;
  969                         return (0);
  970                 }
  971                 cache_purge(newvp);
  972                 if (dvp != newvp)
  973                         vput(newvp);
  974                 else 
  975                         vrele(newvp);
  976                 *vpp = NULLVP;
  977         } else if (error == ENOENT) {
  978                 if (dvp->v_iflag & VI_DOOMED)
  979                         return (ENOENT);
  980                 /*
  981                  * We only accept a negative hit in the cache if the
  982                  * modification time of the parent directory matches
  983                  * our cached copy.  Otherwise, we discard all of the
  984                  * negative cache entries for this directory.
  985                  */
  986                 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 &&
  987                     vattr.va_mtime.tv_sec == np->n_dmtime) {
  988                         nfsstats.lookupcache_hits++;
  989                         return (ENOENT);
  990                 }
  991                 cache_purge_negative(dvp);
  992                 mtx_lock(&np->n_mtx);
  993                 np->n_dmtime = 0;
  994                 mtx_unlock(&np->n_mtx);
  995         }
  996 
  997         /*
  998          * Cache the modification time of the parent directory in case
  999          * the lookup fails and results in adding the first negative
 1000          * name cache entry for the directory.  Since this is reading
 1001          * a single time_t, don't bother with locking.  The
 1002          * modification time may be a bit stale, but it must be read
 1003          * before performing the lookup RPC to prevent a race where
 1004          * another lookup updates the timestamp on the directory after
 1005          * the lookup RPC has been performed on the server but before
 1006          * n_dmtime is set at the end of this function.
 1007          */
 1008         dmtime = np->n_vattr.va_mtime.tv_sec;
 1009         error = 0;
 1010         newvp = NULLVP;
 1011         nfsstats.lookupcache_misses++;
 1012         nfsstats.rpccnt[NFSPROC_LOOKUP]++;
 1013         len = cnp->cn_namelen;
 1014         mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
 1015                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
 1016         mb = mreq;
 1017         bpos = mtod(mb, caddr_t);
 1018         nfsm_fhtom(dvp, v3);
 1019         nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
 1020         nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_thread, cnp->cn_cred);
 1021         if (error) {
 1022                 if (v3) {
 1023                         nfsm_postop_attr(dvp, attrflag);
 1024                         m_freem(mrep);
 1025                 }
 1026                 goto nfsmout;
 1027         }
 1028         nfsm_getfh(fhp, fhsize, v3);
 1029 
 1030         /*
 1031          * Handle RENAME case...
 1032          */
 1033         if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
 1034                 if (NFS_CMPFH(np, fhp, fhsize)) {
 1035                         m_freem(mrep);
 1036                         return (EISDIR);
 1037                 }
 1038                 error = nfs_nget(mp, fhp, fhsize, &np, LK_EXCLUSIVE);
 1039                 if (error) {
 1040                         m_freem(mrep);
 1041                         return (error);
 1042                 }
 1043                 newvp = NFSTOV(np);
 1044                 if (v3) {
 1045                         nfsm_postop_attr(newvp, attrflag);
 1046                         nfsm_postop_attr(dvp, attrflag);
 1047                 } else
 1048                         nfsm_loadattr(newvp, NULL);
 1049                 *vpp = newvp;
 1050                 m_freem(mrep);
 1051                 cnp->cn_flags |= SAVENAME;
 1052                 return (0);
 1053         }
 1054 
 1055         if (flags & ISDOTDOT) {
 1056                 ltype = VOP_ISLOCKED(dvp);
 1057                 error = vfs_busy(mp, MBF_NOWAIT);
 1058                 if (error != 0) {
 1059                         vfs_ref(mp);
 1060                         VOP_UNLOCK(dvp, 0);
 1061                         error = vfs_busy(mp, 0);
 1062                         vn_lock(dvp, ltype | LK_RETRY);
 1063                         vfs_rel(mp);
 1064                         if (error == 0 && (dvp->v_iflag & VI_DOOMED)) {
 1065                                 vfs_unbusy(mp);
 1066                                 error = ENOENT;
 1067                         }
 1068                         if (error != 0) {
 1069                                 m_freem(mrep);
 1070                                 return (error);
 1071                         }
 1072                 }
 1073                 VOP_UNLOCK(dvp, 0);
 1074                 error = nfs_nget(mp, fhp, fhsize, &np, cnp->cn_lkflags);
 1075                 if (error == 0)
 1076                         newvp = NFSTOV(np);
 1077                 vfs_unbusy(mp);
 1078                 if (newvp != dvp)
 1079                         vn_lock(dvp, ltype | LK_RETRY);
 1080                 if (dvp->v_iflag & VI_DOOMED) {
 1081                         if (error == 0) {
 1082                                 if (newvp == dvp)
 1083                                         vrele(newvp);
 1084                                 else
 1085                                         vput(newvp);
 1086                         }
 1087                         error = ENOENT;
 1088                 }
 1089                 if (error) {
 1090                         m_freem(mrep);
 1091                         return (error);
 1092                 }
 1093         } else if (NFS_CMPFH(np, fhp, fhsize)) {
 1094                 VREF(dvp);
 1095                 newvp = dvp;
 1096         } else {
 1097                 error = nfs_nget(mp, fhp, fhsize, &np, cnp->cn_lkflags);
 1098                 if (error) {
 1099                         m_freem(mrep);
 1100                         return (error);
 1101                 }
 1102                 newvp = NFSTOV(np);
 1103         }
 1104         if (v3) {
 1105                 nfsm_postop_attr(newvp, attrflag);
 1106                 nfsm_postop_attr(dvp, attrflag);
 1107         } else
 1108                 nfsm_loadattr(newvp, NULL);
 1109         if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
 1110                 cnp->cn_flags |= SAVENAME;
 1111         if ((cnp->cn_flags & MAKEENTRY) &&
 1112             (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
 1113                 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
 1114                 cache_enter(dvp, newvp, cnp);
 1115         }
 1116         *vpp = newvp;
 1117         m_freem(mrep);
 1118 nfsmout:
 1119         if (error) {
 1120                 if (newvp != NULLVP) {
 1121                         vput(newvp);
 1122                         *vpp = NULLVP;
 1123                 }
 1124 
 1125                 if (error != ENOENT)
 1126                         goto done;
 1127 
 1128                 /* The requested file was not found. */
 1129                 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
 1130                     (flags & ISLASTCN)) {
 1131                         /*
 1132                          * XXX: UFS does a full VOP_ACCESS(dvp,
 1133                          * VWRITE) here instead of just checking
 1134                          * MNT_RDONLY.
 1135                          */
 1136                         if (mp->mnt_flag & MNT_RDONLY)
 1137                                 return (EROFS);
 1138                         cnp->cn_flags |= SAVENAME;
 1139                         return (EJUSTRETURN);
 1140                 }
 1141 
 1142                 if ((cnp->cn_flags & MAKEENTRY) && cnp->cn_nameiop != CREATE) {
 1143                         /*
 1144                          * Maintain n_dmtime as the modification time
 1145                          * of the parent directory when the oldest -ve
 1146                          * name cache entry for this directory was
 1147                          * added.  If a -ve cache entry has already
 1148                          * been added with a newer modification time
 1149                          * by a concurrent lookup, then don't bother
 1150                          * adding a cache entry.  The modification
 1151                          * time of the directory might have changed
 1152                          * due to the file this lookup failed to find
 1153                          * being created.  In that case a subsequent
 1154                          * lookup would incorrectly use the entry
 1155                          * added here instead of doing an extra
 1156                          * lookup.
 1157                          */
 1158                         mtx_lock(&np->n_mtx);
 1159                         if (np->n_dmtime <= dmtime) {
 1160                                 if (np->n_dmtime == 0)
 1161                                         np->n_dmtime = dmtime;
 1162                                 mtx_unlock(&np->n_mtx);
 1163                                 cache_enter(dvp, NULL, cnp);
 1164                         } else
 1165                                 mtx_unlock(&np->n_mtx);
 1166                 }
 1167                 return (ENOENT);
 1168         }
 1169 done:
 1170         return (error);
 1171 }
 1172 
 1173 /*
 1174  * nfs read call.
 1175  * Just call nfs_bioread() to do the work.
 1176  */
 1177 static int
 1178 nfs_read(struct vop_read_args *ap)
 1179 {
 1180         struct vnode *vp = ap->a_vp;
 1181 
 1182         switch (vp->v_type) {
 1183         case VREG:
 1184                 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
 1185         case VDIR:
 1186                 return (EISDIR);
 1187         default:
 1188                 return (EOPNOTSUPP);
 1189         }
 1190 }
 1191 
 1192 /*
 1193  * nfs readlink call
 1194  */
 1195 static int
 1196 nfs_readlink(struct vop_readlink_args *ap)
 1197 {
 1198         struct vnode *vp = ap->a_vp;
 1199 
 1200         if (vp->v_type != VLNK)
 1201                 return (EINVAL);
 1202         return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
 1203 }
 1204 
 1205 /*
 1206  * Do a readlink rpc.
 1207  * Called by nfs_doio() from below the buffer cache.
 1208  */
 1209 int
 1210 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 1211 {
 1212         caddr_t bpos, dpos;
 1213         int error = 0, len, attrflag;
 1214         struct mbuf *mreq, *mrep, *md, *mb;
 1215         int v3 = NFS_ISV3(vp);
 1216 
 1217         nfsstats.rpccnt[NFSPROC_READLINK]++;
 1218         mreq = nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
 1219         mb = mreq;
 1220         bpos = mtod(mb, caddr_t);
 1221         nfsm_fhtom(vp, v3);
 1222         nfsm_request(vp, NFSPROC_READLINK, uiop->uio_td, cred);
 1223         if (v3)
 1224                 nfsm_postop_attr(vp, attrflag);
 1225         if (!error) {
 1226                 nfsm_strsiz(len, NFS_MAXPATHLEN);
 1227                 if (len == NFS_MAXPATHLEN) {
 1228                         struct nfsnode *np = VTONFS(vp);
 1229                         mtx_lock(&np->n_mtx);
 1230                         if (np->n_size && np->n_size < NFS_MAXPATHLEN)
 1231                                 len = np->n_size;
 1232                         mtx_unlock(&np->n_mtx);
 1233                 }
 1234                 nfsm_mtouio(uiop, len);
 1235         }
 1236         m_freem(mrep);
 1237 nfsmout:
 1238         return (error);
 1239 }
 1240 
 1241 /*
 1242  * nfs read rpc call
 1243  * Ditto above
 1244  */
 1245 int
 1246 nfs_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 1247 {
 1248         u_int32_t *tl;
 1249         caddr_t bpos, dpos;
 1250         struct mbuf *mreq, *mrep, *md, *mb;
 1251         struct nfsmount *nmp;
 1252         int error = 0, len, retlen, tsiz, eof, attrflag;
 1253         int v3 = NFS_ISV3(vp);
 1254         int rsize;
 1255 
 1256 #ifndef nolint
 1257         eof = 0;
 1258 #endif
 1259         nmp = VFSTONFS(vp->v_mount);
 1260         tsiz = uiop->uio_resid;
 1261         mtx_lock(&nmp->nm_mtx);
 1262         if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) {
 1263                 mtx_unlock(&nmp->nm_mtx);
 1264                 return (EFBIG);
 1265         }
 1266         rsize = nmp->nm_rsize;
 1267         mtx_unlock(&nmp->nm_mtx);
 1268         while (tsiz > 0) {
 1269                 nfsstats.rpccnt[NFSPROC_READ]++;
 1270                 len = (tsiz > rsize) ? rsize : tsiz;
 1271                 mreq = nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
 1272                 mb = mreq;
 1273                 bpos = mtod(mb, caddr_t);
 1274                 nfsm_fhtom(vp, v3);
 1275                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED * 3);
 1276                 if (v3) {
 1277                         txdr_hyper(uiop->uio_offset, tl);
 1278                         *(tl + 2) = txdr_unsigned(len);
 1279                 } else {
 1280                         *tl++ = txdr_unsigned(uiop->uio_offset);
 1281                         *tl++ = txdr_unsigned(len);
 1282                         *tl = 0;
 1283                 }
 1284                 nfsm_request(vp, NFSPROC_READ, uiop->uio_td, cred);
 1285                 if (v3) {
 1286                         nfsm_postop_attr(vp, attrflag);
 1287                         if (error) {
 1288                                 m_freem(mrep);
 1289                                 goto nfsmout;
 1290                         }
 1291                         tl = nfsm_dissect(u_int32_t *, 2 * NFSX_UNSIGNED);
 1292                         eof = fxdr_unsigned(int, *(tl + 1));
 1293                 } else {
 1294                         nfsm_loadattr(vp, NULL);
 1295                 }
 1296                 nfsm_strsiz(retlen, rsize);
 1297                 nfsm_mtouio(uiop, retlen);
 1298                 m_freem(mrep);
 1299                 tsiz -= retlen;
 1300                 if (v3) {
 1301                         if (eof || retlen == 0) {
 1302                                 tsiz = 0;
 1303                         }
 1304                 } else if (retlen < len) {
 1305                         tsiz = 0;
 1306                 }
 1307         }
 1308 nfsmout:
 1309         return (error);
 1310 }
 1311 
 1312 /*
 1313  * nfs write call
 1314  */
 1315 int
 1316 nfs_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
 1317              int *iomode, int *must_commit)
 1318 {
 1319         u_int32_t *tl;
 1320         int32_t backup;
 1321         caddr_t bpos, dpos;
 1322         struct mbuf *mreq, *mrep, *md, *mb;
 1323         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 1324         int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
 1325         int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
 1326         int wsize;
 1327         
 1328 #ifndef DIAGNOSTIC
 1329         if (uiop->uio_iovcnt != 1)
 1330                 panic("nfs: writerpc iovcnt > 1");
 1331 #endif
 1332         *must_commit = 0;
 1333         tsiz = uiop->uio_resid;
 1334         mtx_lock(&nmp->nm_mtx);
 1335         if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) {
 1336                 mtx_unlock(&nmp->nm_mtx);               
 1337                 return (EFBIG);
 1338         }
 1339         wsize = nmp->nm_wsize;
 1340         mtx_unlock(&nmp->nm_mtx);
 1341         while (tsiz > 0) {
 1342                 nfsstats.rpccnt[NFSPROC_WRITE]++;
 1343                 len = (tsiz > wsize) ? wsize : tsiz;
 1344                 mreq = nfsm_reqhead(vp, NFSPROC_WRITE,
 1345                         NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
 1346                 mb = mreq;
 1347                 bpos = mtod(mb, caddr_t);
 1348                 nfsm_fhtom(vp, v3);
 1349                 if (v3) {
 1350                         tl = nfsm_build(u_int32_t *, 5 * NFSX_UNSIGNED);
 1351                         txdr_hyper(uiop->uio_offset, tl);
 1352                         tl += 2;
 1353                         *tl++ = txdr_unsigned(len);
 1354                         *tl++ = txdr_unsigned(*iomode);
 1355                         *tl = txdr_unsigned(len);
 1356                 } else {
 1357                         u_int32_t x;
 1358 
 1359                         tl = nfsm_build(u_int32_t *, 4 * NFSX_UNSIGNED);
 1360                         /* Set both "begin" and "current" to non-garbage. */
 1361                         x = txdr_unsigned((u_int32_t)uiop->uio_offset);
 1362                         *tl++ = x;      /* "begin offset" */
 1363                         *tl++ = x;      /* "current offset" */
 1364                         x = txdr_unsigned(len);
 1365                         *tl++ = x;      /* total to this offset */
 1366                         *tl = x;        /* size of this write */
 1367                 }
 1368                 nfsm_uiotom(uiop, len);
 1369                 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_td, cred);
 1370                 if (v3) {
 1371                         wccflag = NFSV3_WCCCHK;
 1372                         nfsm_wcc_data(vp, wccflag);
 1373                         if (!error) {
 1374                                 tl = nfsm_dissect(u_int32_t *, 2 * NFSX_UNSIGNED
 1375                                         + NFSX_V3WRITEVERF);
 1376                                 rlen = fxdr_unsigned(int, *tl++);
 1377                                 if (rlen == 0) {
 1378                                         error = NFSERR_IO;
 1379                                         m_freem(mrep);
 1380                                         break;
 1381                                 } else if (rlen < len) {
 1382                                         backup = len - rlen;
 1383                                         uiop->uio_iov->iov_base =
 1384                                             (char *)uiop->uio_iov->iov_base -
 1385                                             backup;
 1386                                         uiop->uio_iov->iov_len += backup;
 1387                                         uiop->uio_offset -= backup;
 1388                                         uiop->uio_resid += backup;
 1389                                         len = rlen;
 1390                                 }
 1391                                 commit = fxdr_unsigned(int, *tl++);
 1392 
 1393                                 /*
 1394                                  * Return the lowest committment level
 1395                                  * obtained by any of the RPCs.
 1396                                  */
 1397                                 if (committed == NFSV3WRITE_FILESYNC)
 1398                                         committed = commit;
 1399                                 else if (committed == NFSV3WRITE_DATASYNC &&
 1400                                         commit == NFSV3WRITE_UNSTABLE)
 1401                                         committed = commit;
 1402                                 mtx_lock(&nmp->nm_mtx);
 1403                                 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
 1404                                     bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
 1405                                         NFSX_V3WRITEVERF);
 1406                                     nmp->nm_state |= NFSSTA_HASWRITEVERF;
 1407                                 } else if (bcmp((caddr_t)tl,
 1408                                     (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
 1409                                     *must_commit = 1;
 1410                                     bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
 1411                                         NFSX_V3WRITEVERF);
 1412                                 }
 1413                                 mtx_unlock(&nmp->nm_mtx);
 1414                         }
 1415                 } else {
 1416                         nfsm_loadattr(vp, NULL);
 1417                 }
 1418                 if (wccflag) {
 1419                         mtx_lock(&(VTONFS(vp))->n_mtx);
 1420                         VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime;
 1421                         mtx_unlock(&(VTONFS(vp))->n_mtx);
 1422                 }
 1423                 m_freem(mrep);
 1424                 if (error)
 1425                         break;
 1426                 tsiz -= len;
 1427         }
 1428 nfsmout:
 1429         if (vp->v_mount->mnt_kern_flag & MNTK_ASYNC)
 1430                 committed = NFSV3WRITE_FILESYNC;
 1431         *iomode = committed;
 1432         if (error)
 1433                 uiop->uio_resid = tsiz;
 1434         return (error);
 1435 }
 1436 
 1437 /*
 1438  * nfs mknod rpc
 1439  * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
 1440  * mode set to specify the file type and the size field for rdev.
 1441  */
 1442 static int
 1443 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
 1444     struct vattr *vap)
 1445 {
 1446         struct nfsv2_sattr *sp;
 1447         u_int32_t *tl;
 1448         struct vnode *newvp = NULL;
 1449         struct nfsnode *np = NULL;
 1450         struct vattr vattr;
 1451         caddr_t bpos, dpos;
 1452         int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
 1453         struct mbuf *mreq, *mrep, *md, *mb;
 1454         u_int32_t rdev;
 1455         int v3 = NFS_ISV3(dvp);
 1456 
 1457         if (vap->va_type == VCHR || vap->va_type == VBLK)
 1458                 rdev = txdr_unsigned(vap->va_rdev);
 1459         else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
 1460                 rdev = nfs_xdrneg1;
 1461         else {
 1462                 return (EOPNOTSUPP);
 1463         }
 1464         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
 1465                 return (error);
 1466         nfsstats.rpccnt[NFSPROC_MKNOD]++;
 1467         mreq = nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
 1468                 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
 1469         mb = mreq;
 1470         bpos = mtod(mb, caddr_t);
 1471         nfsm_fhtom(dvp, v3);
 1472         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 1473         if (v3) {
 1474                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
 1475                 *tl++ = vtonfsv3_type(vap->va_type);
 1476                 nfsm_v3attrbuild(vap, FALSE);
 1477                 if (vap->va_type == VCHR || vap->va_type == VBLK) {
 1478                         tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
 1479                         *tl++ = txdr_unsigned(major(vap->va_rdev));
 1480                         *tl = txdr_unsigned(minor(vap->va_rdev));
 1481                 }
 1482         } else {
 1483                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 1484                 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
 1485                 sp->sa_uid = nfs_xdrneg1;
 1486                 sp->sa_gid = nfs_xdrneg1;
 1487                 sp->sa_size = rdev;
 1488                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 1489                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 1490         }
 1491         nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_thread, cnp->cn_cred);
 1492         if (!error) {
 1493                 nfsm_mtofh(dvp, newvp, v3, gotvp);
 1494                 if (!gotvp) {
 1495                         if (newvp) {
 1496                                 vput(newvp);
 1497                                 newvp = NULL;
 1498                         }
 1499                         error = nfs_lookitup(dvp, cnp->cn_nameptr,
 1500                             cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
 1501                         if (!error)
 1502                                 newvp = NFSTOV(np);
 1503                 }
 1504         }
 1505         if (v3)
 1506                 nfsm_wcc_data(dvp, wccflag);
 1507         m_freem(mrep);
 1508 nfsmout:
 1509         if (error) {
 1510                 if (newvp)
 1511                         vput(newvp);
 1512         } else {
 1513                 if (cnp->cn_flags & MAKEENTRY)
 1514                         cache_enter(dvp, newvp, cnp);
 1515                 *vpp = newvp;
 1516         }
 1517         mtx_lock(&(VTONFS(dvp))->n_mtx);
 1518         VTONFS(dvp)->n_flag |= NMODIFIED;
 1519         if (!wccflag) {
 1520                 VTONFS(dvp)->n_attrstamp = 0;
 1521                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1522         }
 1523         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 1524         return (error);
 1525 }
 1526 
 1527 /*
 1528  * nfs mknod vop
 1529  * just call nfs_mknodrpc() to do the work.
 1530  */
 1531 /* ARGSUSED */
 1532 static int
 1533 nfs_mknod(struct vop_mknod_args *ap)
 1534 {
 1535         return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap));
 1536 }
 1537 
 1538 static u_long create_verf;
 1539 /*
 1540  * nfs file create call
 1541  */
 1542 static int
 1543 nfs_create(struct vop_create_args *ap)
 1544 {
 1545         struct vnode *dvp = ap->a_dvp;
 1546         struct vattr *vap = ap->a_vap;
 1547         struct componentname *cnp = ap->a_cnp;
 1548         struct nfsv2_sattr *sp;
 1549         u_int32_t *tl;
 1550         struct nfsnode *np = NULL;
 1551         struct vnode *newvp = NULL;
 1552         caddr_t bpos, dpos;
 1553         int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
 1554         struct mbuf *mreq, *mrep, *md, *mb;
 1555         struct vattr vattr;
 1556         int v3 = NFS_ISV3(dvp);
 1557 
 1558         /*
 1559          * Oops, not for me..
 1560          */
 1561         if (vap->va_type == VSOCK)
 1562                 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
 1563 
 1564         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
 1565                 return (error);
 1566         if (vap->va_vaflags & VA_EXCLUSIVE)
 1567                 fmode |= O_EXCL;
 1568 again:
 1569         nfsstats.rpccnt[NFSPROC_CREATE]++;
 1570         mreq = nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
 1571                 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
 1572         mb = mreq;
 1573         bpos = mtod(mb, caddr_t);
 1574         nfsm_fhtom(dvp, v3);
 1575         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 1576         if (v3) {
 1577                 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
 1578                 if (fmode & O_EXCL) {
 1579                         *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
 1580                         tl = nfsm_build(u_int32_t *, NFSX_V3CREATEVERF);
 1581 #ifdef INET
 1582                         CURVNET_SET(CRED_TO_VNET(cnp->cn_cred));
 1583                         IN_IFADDR_RLOCK();
 1584                         if (!TAILQ_EMPTY(&V_in_ifaddrhead))
 1585                                 *tl++ = IA_SIN(TAILQ_FIRST(&V_in_ifaddrhead))->sin_addr.s_addr;
 1586                         else
 1587 #endif
 1588                                 *tl++ = create_verf;
 1589 #ifdef INET
 1590                         IN_IFADDR_RUNLOCK();
 1591                         CURVNET_RESTORE();
 1592 #endif
 1593                         *tl = ++create_verf;
 1594                 } else {
 1595                         *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
 1596                         nfsm_v3attrbuild(vap, FALSE);
 1597                 }
 1598         } else {
 1599                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 1600                 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
 1601                 sp->sa_uid = nfs_xdrneg1;
 1602                 sp->sa_gid = nfs_xdrneg1;
 1603                 sp->sa_size = 0;
 1604                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 1605                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 1606         }
 1607         nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_thread, cnp->cn_cred);
 1608         if (!error) {
 1609                 nfsm_mtofh(dvp, newvp, v3, gotvp);
 1610                 if (!gotvp) {
 1611                         if (newvp) {
 1612                                 vput(newvp);
 1613                                 newvp = NULL;
 1614                         }
 1615                         error = nfs_lookitup(dvp, cnp->cn_nameptr,
 1616                             cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
 1617                         if (!error)
 1618                                 newvp = NFSTOV(np);
 1619                 }
 1620         }
 1621         if (v3)
 1622                 nfsm_wcc_data(dvp, wccflag);
 1623         m_freem(mrep);
 1624 nfsmout:
 1625         if (error) {
 1626                 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
 1627                         fmode &= ~O_EXCL;
 1628                         goto again;
 1629                 }
 1630                 if (newvp)
 1631                         vput(newvp);
 1632         } else if (v3 && (fmode & O_EXCL)) {
 1633                 /*
 1634                  * We are normally called with only a partially initialized
 1635                  * VAP.  Since the NFSv3 spec says that server may use the
 1636                  * file attributes to store the verifier, the spec requires
 1637                  * us to do a SETATTR RPC. FreeBSD servers store the verifier
 1638                  * in atime, but we can't really assume that all servers will
 1639                  * so we ensure that our SETATTR sets both atime and mtime.
 1640                  */
 1641                 if (vap->va_mtime.tv_sec == VNOVAL)
 1642                         vfs_timestamp(&vap->va_mtime);
 1643                 if (vap->va_atime.tv_sec == VNOVAL)
 1644                         vap->va_atime = vap->va_mtime;
 1645                 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred);
 1646                 if (error)
 1647                         vput(newvp);
 1648         }
 1649         if (!error) {
 1650                 if (cnp->cn_flags & MAKEENTRY)
 1651                         cache_enter(dvp, newvp, cnp);
 1652                 *ap->a_vpp = newvp;
 1653         }
 1654         mtx_lock(&(VTONFS(dvp))->n_mtx);
 1655         VTONFS(dvp)->n_flag |= NMODIFIED;
 1656         if (!wccflag) {
 1657                 VTONFS(dvp)->n_attrstamp = 0;
 1658                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1659         }
 1660         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 1661         return (error);
 1662 }
 1663 
 1664 /*
 1665  * nfs file remove call
 1666  * To try and make nfs semantics closer to ufs semantics, a file that has
 1667  * other processes using the vnode is renamed instead of removed and then
 1668  * removed later on the last close.
 1669  * - If v_usecount > 1
 1670  *        If a rename is not already in the works
 1671  *           call nfs_sillyrename() to set it up
 1672  *     else
 1673  *        do the remove rpc
 1674  */
 1675 static int
 1676 nfs_remove(struct vop_remove_args *ap)
 1677 {
 1678         struct vnode *vp = ap->a_vp;
 1679         struct vnode *dvp = ap->a_dvp;
 1680         struct componentname *cnp = ap->a_cnp;
 1681         struct nfsnode *np = VTONFS(vp);
 1682         int error = 0;
 1683         struct vattr vattr;
 1684 
 1685 #ifndef DIAGNOSTIC
 1686         if ((cnp->cn_flags & HASBUF) == 0)
 1687                 panic("nfs_remove: no name");
 1688         if (vrefcnt(vp) < 1)
 1689                 panic("nfs_remove: bad v_usecount");
 1690 #endif
 1691         if (vp->v_type == VDIR)
 1692                 error = EPERM;
 1693         else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
 1694             !VOP_GETATTR(vp, &vattr, cnp->cn_cred) && vattr.va_nlink > 1)) {
 1695                 /*
 1696                  * Purge the name cache so that the chance of a lookup for
 1697                  * the name succeeding while the remove is in progress is
 1698                  * minimized. Without node locking it can still happen, such
 1699                  * that an I/O op returns ESTALE, but since you get this if
 1700                  * another host removes the file..
 1701                  */
 1702                 cache_purge(vp);
 1703                 /*
 1704                  * throw away biocache buffers, mainly to avoid
 1705                  * unnecessary delayed writes later.
 1706                  */
 1707                 error = nfs_vinvalbuf(vp, 0, cnp->cn_thread, 1);
 1708                 /* Do the rpc */
 1709                 if (error != EINTR && error != EIO)
 1710                         error = nfs_removerpc(dvp, cnp->cn_nameptr,
 1711                                 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread);
 1712                 /*
 1713                  * Kludge City: If the first reply to the remove rpc is lost..
 1714                  *   the reply to the retransmitted request will be ENOENT
 1715                  *   since the file was in fact removed
 1716                  *   Therefore, we cheat and return success.
 1717                  */
 1718                 if (error == ENOENT)
 1719                         error = 0;
 1720         } else if (!np->n_sillyrename)
 1721                 error = nfs_sillyrename(dvp, vp, cnp);
 1722         np->n_attrstamp = 0;
 1723         KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
 1724         return (error);
 1725 }
 1726 
 1727 /*
 1728  * nfs file remove rpc called from nfs_inactive
 1729  */
 1730 int
 1731 nfs_removeit(struct sillyrename *sp)
 1732 {
 1733         /*
 1734          * Make sure that the directory vnode is still valid.
 1735          * XXX we should lock sp->s_dvp here.
 1736          */
 1737         if (sp->s_dvp->v_type == VBAD)
 1738                 return (0);
 1739         return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
 1740                 NULL));
 1741 }
 1742 
 1743 /*
 1744  * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
 1745  */
 1746 static int
 1747 nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
 1748     struct ucred *cred, struct thread *td)
 1749 {
 1750         caddr_t bpos, dpos;
 1751         int error = 0, wccflag = NFSV3_WCCRATTR;
 1752         struct mbuf *mreq, *mrep, *md, *mb;
 1753         int v3 = NFS_ISV3(dvp);
 1754 
 1755         nfsstats.rpccnt[NFSPROC_REMOVE]++;
 1756         mreq = nfsm_reqhead(dvp, NFSPROC_REMOVE,
 1757                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
 1758         mb = mreq;
 1759         bpos = mtod(mb, caddr_t);
 1760         nfsm_fhtom(dvp, v3);
 1761         nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
 1762         nfsm_request(dvp, NFSPROC_REMOVE, td, cred);
 1763         if (v3)
 1764                 nfsm_wcc_data(dvp, wccflag);
 1765         m_freem(mrep);
 1766 nfsmout:
 1767         mtx_lock(&(VTONFS(dvp))->n_mtx);
 1768         VTONFS(dvp)->n_flag |= NMODIFIED;
 1769         if (!wccflag) {
 1770                 VTONFS(dvp)->n_attrstamp = 0;
 1771                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 1772         }
 1773         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 1774         return (error);
 1775 }
 1776 
 1777 /*
 1778  * nfs file rename call
 1779  */
 1780 static int
 1781 nfs_rename(struct vop_rename_args *ap)
 1782 {
 1783         struct vnode *fvp = ap->a_fvp;
 1784         struct vnode *tvp = ap->a_tvp;
 1785         struct vnode *fdvp = ap->a_fdvp;
 1786         struct vnode *tdvp = ap->a_tdvp;
 1787         struct componentname *tcnp = ap->a_tcnp;
 1788         struct componentname *fcnp = ap->a_fcnp;
 1789         int error;
 1790 
 1791 #ifndef DIAGNOSTIC
 1792         if ((tcnp->cn_flags & HASBUF) == 0 ||
 1793             (fcnp->cn_flags & HASBUF) == 0)
 1794                 panic("nfs_rename: no name");
 1795 #endif
 1796         /* Check for cross-device rename */
 1797         if ((fvp->v_mount != tdvp->v_mount) ||
 1798             (tvp && (fvp->v_mount != tvp->v_mount))) {
 1799                 error = EXDEV;
 1800                 goto out;
 1801         }
 1802 
 1803         if (fvp == tvp) {
 1804                 nfs_printf("nfs_rename: fvp == tvp (can't happen)\n");
 1805                 error = 0;
 1806                 goto out;
 1807         }
 1808         if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
 1809                 goto out;
 1810 
 1811         /*
 1812          * We have to flush B_DELWRI data prior to renaming
 1813          * the file.  If we don't, the delayed-write buffers
 1814          * can be flushed out later after the file has gone stale
 1815          * under NFSV3.  NFSV2 does not have this problem because
 1816          * ( as far as I can tell ) it flushes dirty buffers more
 1817          * often.
 1818          * 
 1819          * Skip the rename operation if the fsync fails, this can happen
 1820          * due to the server's volume being full, when we pushed out data
 1821          * that was written back to our cache earlier. Not checking for
 1822          * this condition can result in potential (silent) data loss.
 1823          */
 1824         error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread);
 1825         VOP_UNLOCK(fvp, 0);
 1826         if (!error && tvp)
 1827                 error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread);
 1828         if (error)
 1829                 goto out;
 1830 
 1831         /*
 1832          * If the tvp exists and is in use, sillyrename it before doing the
 1833          * rename of the new file over it.
 1834          * XXX Can't sillyrename a directory.
 1835          */
 1836         if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
 1837                 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
 1838                 vput(tvp);
 1839                 tvp = NULL;
 1840         }
 1841 
 1842         error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
 1843                 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
 1844                 tcnp->cn_thread);
 1845 
 1846         if (fvp->v_type == VDIR) {
 1847                 if (tvp != NULL && tvp->v_type == VDIR)
 1848                         cache_purge(tdvp);
 1849                 cache_purge(fdvp);
 1850         }
 1851 
 1852 out:
 1853         if (tdvp == tvp)
 1854                 vrele(tdvp);
 1855         else
 1856                 vput(tdvp);
 1857         if (tvp)
 1858                 vput(tvp);
 1859         vrele(fdvp);
 1860         vrele(fvp);
 1861         /*
 1862          * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
 1863          */
 1864         if (error == ENOENT)
 1865                 error = 0;
 1866         return (error);
 1867 }
 1868 
 1869 /*
 1870  * nfs file rename rpc called from nfs_remove() above
 1871  */
 1872 static int
 1873 nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
 1874     struct sillyrename *sp)
 1875 {
 1876 
 1877         return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, sdvp,
 1878             sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_thread));
 1879 }
 1880 
 1881 /*
 1882  * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
 1883  */
 1884 static int
 1885 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen,
 1886     struct vnode *tdvp, const char *tnameptr, int tnamelen, struct ucred *cred,
 1887     struct thread *td)
 1888 {
 1889         caddr_t bpos, dpos;
 1890         int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
 1891         struct mbuf *mreq, *mrep, *md, *mb;
 1892         int v3 = NFS_ISV3(fdvp);
 1893 
 1894         nfsstats.rpccnt[NFSPROC_RENAME]++;
 1895         mreq = nfsm_reqhead(fdvp, NFSPROC_RENAME,
 1896                 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
 1897                 nfsm_rndup(tnamelen));
 1898         mb = mreq;
 1899         bpos = mtod(mb, caddr_t);
 1900         nfsm_fhtom(fdvp, v3);
 1901         nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
 1902         nfsm_fhtom(tdvp, v3);
 1903         nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
 1904         nfsm_request(fdvp, NFSPROC_RENAME, td, cred);
 1905         if (v3) {
 1906                 nfsm_wcc_data(fdvp, fwccflag);
 1907                 nfsm_wcc_data(tdvp, twccflag);
 1908         }
 1909         m_freem(mrep);
 1910 nfsmout:
 1911         mtx_lock(&(VTONFS(fdvp))->n_mtx);
 1912         VTONFS(fdvp)->n_flag |= NMODIFIED;
 1913         mtx_unlock(&(VTONFS(fdvp))->n_mtx);
 1914         mtx_lock(&(VTONFS(tdvp))->n_mtx);
 1915         VTONFS(tdvp)->n_flag |= NMODIFIED;
 1916         mtx_unlock(&(VTONFS(tdvp))->n_mtx);
 1917         if (!fwccflag) {
 1918                 VTONFS(fdvp)->n_attrstamp = 0;
 1919                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp);
 1920         }
 1921         if (!twccflag) {
 1922                 VTONFS(tdvp)->n_attrstamp = 0;
 1923                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
 1924         }
 1925         return (error);
 1926 }
 1927 
 1928 /*
 1929  * nfs hard link create call
 1930  */
 1931 static int
 1932 nfs_link(struct vop_link_args *ap)
 1933 {
 1934         struct vnode *vp = ap->a_vp;
 1935         struct vnode *tdvp = ap->a_tdvp;
 1936         struct componentname *cnp = ap->a_cnp;
 1937         caddr_t bpos, dpos;
 1938         int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
 1939         struct mbuf *mreq, *mrep, *md, *mb;
 1940         int v3;
 1941 
 1942         if (vp->v_mount != tdvp->v_mount) {
 1943                 return (EXDEV);
 1944         }
 1945 
 1946         /*
 1947          * Push all writes to the server, so that the attribute cache
 1948          * doesn't get "out of sync" with the server.
 1949          * XXX There should be a better way!
 1950          */
 1951         VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread);
 1952 
 1953         v3 = NFS_ISV3(vp);
 1954         nfsstats.rpccnt[NFSPROC_LINK]++;
 1955         mreq = nfsm_reqhead(vp, NFSPROC_LINK,
 1956                 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
 1957         mb = mreq;
 1958         bpos = mtod(mb, caddr_t);
 1959         nfsm_fhtom(vp, v3);
 1960         nfsm_fhtom(tdvp, v3);
 1961         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 1962         nfsm_request(vp, NFSPROC_LINK, cnp->cn_thread, cnp->cn_cred);
 1963         if (v3) {
 1964                 nfsm_postop_attr(vp, attrflag);
 1965                 nfsm_wcc_data(tdvp, wccflag);
 1966         }
 1967         m_freem(mrep);
 1968 nfsmout:
 1969         mtx_lock(&(VTONFS(tdvp))->n_mtx);
 1970         VTONFS(tdvp)->n_flag |= NMODIFIED;
 1971         mtx_unlock(&(VTONFS(tdvp))->n_mtx);
 1972         if (!attrflag) {
 1973                 VTONFS(vp)->n_attrstamp = 0;
 1974                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
 1975         }
 1976         if (!wccflag) {
 1977                 VTONFS(tdvp)->n_attrstamp = 0;
 1978                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
 1979         }
 1980         return (error);
 1981 }
 1982 
 1983 /*
 1984  * nfs symbolic link create call
 1985  */
 1986 static int
 1987 nfs_symlink(struct vop_symlink_args *ap)
 1988 {
 1989         struct vnode *dvp = ap->a_dvp;
 1990         struct vattr *vap = ap->a_vap;
 1991         struct componentname *cnp = ap->a_cnp;
 1992         struct nfsv2_sattr *sp;
 1993         caddr_t bpos, dpos;
 1994         int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
 1995         struct mbuf *mreq, *mrep, *md, *mb;
 1996         struct vnode *newvp = NULL;
 1997         int v3 = NFS_ISV3(dvp);
 1998 
 1999         nfsstats.rpccnt[NFSPROC_SYMLINK]++;
 2000         slen = strlen(ap->a_target);
 2001         mreq = nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
 2002             nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
 2003         mb = mreq;
 2004         bpos = mtod(mb, caddr_t);
 2005         nfsm_fhtom(dvp, v3);
 2006         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 2007         if (v3) {
 2008                 nfsm_v3attrbuild(vap, FALSE);
 2009         }
 2010         nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
 2011         if (!v3) {
 2012                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 2013                 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
 2014                 sp->sa_uid = nfs_xdrneg1;
 2015                 sp->sa_gid = nfs_xdrneg1;
 2016                 sp->sa_size = nfs_xdrneg1;
 2017                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 2018                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 2019         }
 2020 
 2021         /*
 2022          * Issue the NFS request and get the rpc response.
 2023          *
 2024          * Only NFSv3 responses returning an error of 0 actually return
 2025          * a file handle that can be converted into newvp without having
 2026          * to do an extra lookup rpc.
 2027          */
 2028         nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_thread, cnp->cn_cred);
 2029         if (v3) {
 2030                 if (error == 0)
 2031                         nfsm_mtofh(dvp, newvp, v3, gotvp);
 2032                 nfsm_wcc_data(dvp, wccflag);
 2033         }
 2034 
 2035         /*
 2036          * out code jumps -> here, mrep is also freed.
 2037          */
 2038 
 2039         m_freem(mrep);
 2040 nfsmout:
 2041 
 2042         /*
 2043          * If we do not have an error and we could not extract the newvp from
 2044          * the response due to the request being NFSv2, we have to do a
 2045          * lookup in order to obtain a newvp to return.
 2046          */
 2047         if (error == 0 && newvp == NULL) {
 2048                 struct nfsnode *np = NULL;
 2049 
 2050                 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
 2051                     cnp->cn_cred, cnp->cn_thread, &np);
 2052                 if (!error)
 2053                         newvp = NFSTOV(np);
 2054         }
 2055         if (error) {
 2056                 if (newvp)
 2057                         vput(newvp);
 2058         } else {
 2059                 *ap->a_vpp = newvp;
 2060         }
 2061         mtx_lock(&(VTONFS(dvp))->n_mtx);
 2062         VTONFS(dvp)->n_flag |= NMODIFIED;
 2063         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 2064         if (!wccflag) {
 2065                 VTONFS(dvp)->n_attrstamp = 0;
 2066                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2067         }
 2068         return (error);
 2069 }
 2070 
 2071 /*
 2072  * nfs make dir call
 2073  */
 2074 static int
 2075 nfs_mkdir(struct vop_mkdir_args *ap)
 2076 {
 2077         struct vnode *dvp = ap->a_dvp;
 2078         struct vattr *vap = ap->a_vap;
 2079         struct componentname *cnp = ap->a_cnp;
 2080         struct nfsv2_sattr *sp;
 2081         int len;
 2082         struct nfsnode *np = NULL;
 2083         struct vnode *newvp = NULL;
 2084         caddr_t bpos, dpos;
 2085         int error = 0, wccflag = NFSV3_WCCRATTR;
 2086         int gotvp = 0;
 2087         struct mbuf *mreq, *mrep, *md, *mb;
 2088         struct vattr vattr;
 2089         int v3 = NFS_ISV3(dvp);
 2090 
 2091         if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
 2092                 return (error);
 2093         len = cnp->cn_namelen;
 2094         nfsstats.rpccnt[NFSPROC_MKDIR]++;
 2095         mreq = nfsm_reqhead(dvp, NFSPROC_MKDIR,
 2096           NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
 2097         mb = mreq;
 2098         bpos = mtod(mb, caddr_t);
 2099         nfsm_fhtom(dvp, v3);
 2100         nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
 2101         if (v3) {
 2102                 nfsm_v3attrbuild(vap, FALSE);
 2103         } else {
 2104                 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
 2105                 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
 2106                 sp->sa_uid = nfs_xdrneg1;
 2107                 sp->sa_gid = nfs_xdrneg1;
 2108                 sp->sa_size = nfs_xdrneg1;
 2109                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
 2110                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
 2111         }
 2112         nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_thread, cnp->cn_cred);
 2113         if (!error)
 2114                 nfsm_mtofh(dvp, newvp, v3, gotvp);
 2115         if (v3)
 2116                 nfsm_wcc_data(dvp, wccflag);
 2117         m_freem(mrep);
 2118 nfsmout:
 2119         mtx_lock(&(VTONFS(dvp))->n_mtx);
 2120         VTONFS(dvp)->n_flag |= NMODIFIED;
 2121         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 2122         if (!wccflag) {
 2123                 VTONFS(dvp)->n_attrstamp = 0;
 2124                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2125         }
 2126         if (error == 0 && newvp == NULL) {
 2127                 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
 2128                         cnp->cn_thread, &np);
 2129                 if (!error) {
 2130                         newvp = NFSTOV(np);
 2131                         if (newvp->v_type != VDIR)
 2132                                 error = EEXIST;
 2133                 }
 2134         }
 2135         if (error) {
 2136                 if (newvp)
 2137                         vput(newvp);
 2138         } else
 2139                 *ap->a_vpp = newvp;
 2140         return (error);
 2141 }
 2142 
 2143 /*
 2144  * nfs remove directory call
 2145  */
 2146 static int
 2147 nfs_rmdir(struct vop_rmdir_args *ap)
 2148 {
 2149         struct vnode *vp = ap->a_vp;
 2150         struct vnode *dvp = ap->a_dvp;
 2151         struct componentname *cnp = ap->a_cnp;
 2152         caddr_t bpos, dpos;
 2153         int error = 0, wccflag = NFSV3_WCCRATTR;
 2154         struct mbuf *mreq, *mrep, *md, *mb;
 2155         int v3 = NFS_ISV3(dvp);
 2156 
 2157         if (dvp == vp)
 2158                 return (EINVAL);
 2159         nfsstats.rpccnt[NFSPROC_RMDIR]++;
 2160         mreq = nfsm_reqhead(dvp, NFSPROC_RMDIR,
 2161                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
 2162         mb = mreq;
 2163         bpos = mtod(mb, caddr_t);
 2164         nfsm_fhtom(dvp, v3);
 2165         nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
 2166         nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_thread, cnp->cn_cred);
 2167         if (v3)
 2168                 nfsm_wcc_data(dvp, wccflag);
 2169         m_freem(mrep);
 2170 nfsmout:
 2171         mtx_lock(&(VTONFS(dvp))->n_mtx);
 2172         VTONFS(dvp)->n_flag |= NMODIFIED;
 2173         mtx_unlock(&(VTONFS(dvp))->n_mtx);
 2174         if (!wccflag) {
 2175                 VTONFS(dvp)->n_attrstamp = 0;
 2176                 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
 2177         }
 2178         cache_purge(dvp);
 2179         cache_purge(vp);
 2180         /*
 2181          * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
 2182          */
 2183         if (error == ENOENT)
 2184                 error = 0;
 2185         return (error);
 2186 }
 2187 
 2188 /*
 2189  * nfs readdir call
 2190  */
 2191 static int
 2192 nfs_readdir(struct vop_readdir_args *ap)
 2193 {
 2194         struct vnode *vp = ap->a_vp;
 2195         struct nfsnode *np = VTONFS(vp);
 2196         struct uio *uio = ap->a_uio;
 2197         int tresid, error = 0;
 2198         struct vattr vattr;
 2199         
 2200         if (vp->v_type != VDIR) 
 2201                 return(EPERM);
 2202 
 2203         /*
 2204          * First, check for hit on the EOF offset cache
 2205          */
 2206         if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
 2207             (np->n_flag & NMODIFIED) == 0) {
 2208                 if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) {
 2209                         mtx_lock(&np->n_mtx);
 2210                         if (!NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
 2211                                 mtx_unlock(&np->n_mtx);
 2212                                 nfsstats.direofcache_hits++;
 2213                                 goto out;
 2214                         } else
 2215                                 mtx_unlock(&np->n_mtx);
 2216                 }
 2217         }
 2218 
 2219         /*
 2220          * Call nfs_bioread() to do the real work.
 2221          */
 2222         tresid = uio->uio_resid;
 2223         error = nfs_bioread(vp, uio, 0, ap->a_cred);
 2224 
 2225         if (!error && uio->uio_resid == tresid) {
 2226                 nfsstats.direofcache_misses++;
 2227         }
 2228 out:
 2229         return (error);
 2230 }
 2231 
 2232 /*
 2233  * Readdir rpc call.
 2234  * Called from below the buffer cache by nfs_doio().
 2235  */
 2236 int
 2237 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 2238 {
 2239         int len, left;
 2240         struct dirent *dp = NULL;
 2241         u_int32_t *tl;
 2242         caddr_t cp;
 2243         nfsuint64 *cookiep;
 2244         caddr_t bpos, dpos;
 2245         struct mbuf *mreq, *mrep, *md, *mb;
 2246         nfsuint64 cookie;
 2247         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2248         struct nfsnode *dnp = VTONFS(vp);
 2249         u_quad_t fileno;
 2250         int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
 2251         int attrflag;
 2252         int v3 = NFS_ISV3(vp);
 2253 
 2254 #ifndef DIAGNOSTIC
 2255         if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
 2256                 (uiop->uio_resid & (DIRBLKSIZ - 1)))
 2257                 panic("nfs readdirrpc bad uio");
 2258 #endif
 2259 
 2260         /*
 2261          * If there is no cookie, assume directory was stale.
 2262          */
 2263         nfs_dircookie_lock(dnp);
 2264         cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
 2265         if (cookiep) {
 2266                 cookie = *cookiep;
 2267                 nfs_dircookie_unlock(dnp);
 2268         } else {
 2269                 nfs_dircookie_unlock(dnp);              
 2270                 return (NFSERR_BAD_COOKIE);
 2271         }
 2272 
 2273         /*
 2274          * Loop around doing readdir rpc's of size nm_readdirsize
 2275          * truncated to a multiple of DIRBLKSIZ.
 2276          * The stopping criteria is EOF or buffer full.
 2277          */
 2278         while (more_dirs && bigenough) {
 2279                 nfsstats.rpccnt[NFSPROC_READDIR]++;
 2280                 mreq = nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
 2281                         NFSX_READDIR(v3));
 2282                 mb = mreq;
 2283                 bpos = mtod(mb, caddr_t);
 2284                 nfsm_fhtom(vp, v3);
 2285                 if (v3) {
 2286                         tl = nfsm_build(u_int32_t *, 5 * NFSX_UNSIGNED);
 2287                         *tl++ = cookie.nfsuquad[0];
 2288                         *tl++ = cookie.nfsuquad[1];
 2289                         mtx_lock(&dnp->n_mtx);
 2290                         *tl++ = dnp->n_cookieverf.nfsuquad[0];
 2291                         *tl++ = dnp->n_cookieverf.nfsuquad[1];
 2292                         mtx_unlock(&dnp->n_mtx);
 2293                 } else {
 2294                         tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
 2295                         *tl++ = cookie.nfsuquad[0];
 2296                 }
 2297                 *tl = txdr_unsigned(nmp->nm_readdirsize);
 2298                 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_td, cred);
 2299                 if (v3) {
 2300                         nfsm_postop_attr(vp, attrflag);
 2301                         if (!error) {
 2302                                 tl = nfsm_dissect(u_int32_t *,
 2303                                     2 * NFSX_UNSIGNED);
 2304                                 mtx_lock(&dnp->n_mtx);
 2305                                 dnp->n_cookieverf.nfsuquad[0] = *tl++;
 2306                                 dnp->n_cookieverf.nfsuquad[1] = *tl;
 2307                                 mtx_unlock(&dnp->n_mtx);
 2308                         } else {
 2309                                 m_freem(mrep);
 2310                                 goto nfsmout;
 2311                         }
 2312                 }
 2313                 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2314                 more_dirs = fxdr_unsigned(int, *tl);
 2315 
 2316                 /* loop thru the dir entries, doctoring them to 4bsd form */
 2317                 while (more_dirs && bigenough) {
 2318                         if (v3) {
 2319                                 tl = nfsm_dissect(u_int32_t *,
 2320                                     3 * NFSX_UNSIGNED);
 2321                                 fileno = fxdr_hyper(tl);
 2322                                 len = fxdr_unsigned(int, *(tl + 2));
 2323                         } else {
 2324                                 tl = nfsm_dissect(u_int32_t *,
 2325                                     2 * NFSX_UNSIGNED);
 2326                                 fileno = fxdr_unsigned(u_quad_t, *tl++);
 2327                                 len = fxdr_unsigned(int, *tl);
 2328                         }
 2329                         if (len <= 0 || len > NFS_MAXNAMLEN) {
 2330                                 error = EBADRPC;
 2331                                 m_freem(mrep);
 2332                                 goto nfsmout;
 2333                         }
 2334                         tlen = nfsm_rndup(len);
 2335                         if (tlen == len)
 2336                                 tlen += 4;      /* To ensure null termination */
 2337                         left = DIRBLKSIZ - blksiz;
 2338                         if ((tlen + DIRHDSIZ) > left) {
 2339                                 dp->d_reclen += left;
 2340                                 uiop->uio_iov->iov_base =
 2341                                     (char *)uiop->uio_iov->iov_base + left;
 2342                                 uiop->uio_iov->iov_len -= left;
 2343                                 uiop->uio_offset += left;
 2344                                 uiop->uio_resid -= left;
 2345                                 blksiz = 0;
 2346                         }
 2347                         if ((tlen + DIRHDSIZ) > uiop->uio_resid)
 2348                                 bigenough = 0;
 2349                         if (bigenough) {
 2350                                 dp = (struct dirent *)uiop->uio_iov->iov_base;
 2351                                 dp->d_fileno = (int)fileno;
 2352                                 dp->d_namlen = len;
 2353                                 dp->d_reclen = tlen + DIRHDSIZ;
 2354                                 dp->d_type = DT_UNKNOWN;
 2355                                 blksiz += dp->d_reclen;
 2356                                 if (blksiz == DIRBLKSIZ)
 2357                                         blksiz = 0;
 2358                                 uiop->uio_offset += DIRHDSIZ;
 2359                                 uiop->uio_resid -= DIRHDSIZ;
 2360                                 uiop->uio_iov->iov_base =
 2361                                     (char *)uiop->uio_iov->iov_base + DIRHDSIZ;
 2362                                 uiop->uio_iov->iov_len -= DIRHDSIZ;
 2363                                 nfsm_mtouio(uiop, len);
 2364                                 cp = uiop->uio_iov->iov_base;
 2365                                 tlen -= len;
 2366                                 *cp = '\0';     /* null terminate */
 2367                                 uiop->uio_iov->iov_base =
 2368                                     (char *)uiop->uio_iov->iov_base + tlen;
 2369                                 uiop->uio_iov->iov_len -= tlen;
 2370                                 uiop->uio_offset += tlen;
 2371                                 uiop->uio_resid -= tlen;
 2372                         } else
 2373                                 nfsm_adv(nfsm_rndup(len));
 2374                         if (v3) {
 2375                                 tl = nfsm_dissect(u_int32_t *,
 2376                                     3 * NFSX_UNSIGNED);
 2377                         } else {
 2378                                 tl = nfsm_dissect(u_int32_t *,
 2379                                     2 * NFSX_UNSIGNED);
 2380                         }
 2381                         if (bigenough) {
 2382                                 cookie.nfsuquad[0] = *tl++;
 2383                                 if (v3)
 2384                                         cookie.nfsuquad[1] = *tl++;
 2385                         } else if (v3)
 2386                                 tl += 2;
 2387                         else
 2388                                 tl++;
 2389                         more_dirs = fxdr_unsigned(int, *tl);
 2390                 }
 2391                 /*
 2392                  * If at end of rpc data, get the eof boolean
 2393                  */
 2394                 if (!more_dirs) {
 2395                         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2396                         more_dirs = (fxdr_unsigned(int, *tl) == 0);
 2397                 }
 2398                 m_freem(mrep);
 2399         }
 2400         /*
 2401          * Fill last record, iff any, out to a multiple of DIRBLKSIZ
 2402          * by increasing d_reclen for the last record.
 2403          */
 2404         if (blksiz > 0) {
 2405                 left = DIRBLKSIZ - blksiz;
 2406                 dp->d_reclen += left;
 2407                 uiop->uio_iov->iov_base =
 2408                     (char *)uiop->uio_iov->iov_base + left;
 2409                 uiop->uio_iov->iov_len -= left;
 2410                 uiop->uio_offset += left;
 2411                 uiop->uio_resid -= left;
 2412         }
 2413 
 2414         /*
 2415          * We are now either at the end of the directory or have filled the
 2416          * block.
 2417          */
 2418         if (bigenough)
 2419                 dnp->n_direofoffset = uiop->uio_offset;
 2420         else {
 2421                 if (uiop->uio_resid > 0)
 2422                         nfs_printf("EEK! readdirrpc resid > 0\n");
 2423                 nfs_dircookie_lock(dnp);
 2424                 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
 2425                 *cookiep = cookie;
 2426                 nfs_dircookie_unlock(dnp);
 2427         }
 2428 nfsmout:
 2429         return (error);
 2430 }
 2431 
 2432 /*
 2433  * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
 2434  */
 2435 int
 2436 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
 2437 {
 2438         int len, left;
 2439         struct dirent *dp;
 2440         u_int32_t *tl;
 2441         caddr_t cp;
 2442         struct vnode *newvp;
 2443         nfsuint64 *cookiep;
 2444         caddr_t bpos, dpos, dpossav1, dpossav2;
 2445         struct mbuf *mreq, *mrep, *md, *mb, *mdsav1, *mdsav2;
 2446         struct nameidata nami, *ndp = &nami;
 2447         struct componentname *cnp = &ndp->ni_cnd;
 2448         nfsuint64 cookie;
 2449         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2450         struct nfsnode *dnp = VTONFS(vp), *np;
 2451         nfsfh_t *fhp;
 2452         u_quad_t fileno;
 2453         int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
 2454         int attrflag, fhsize;
 2455 
 2456 #ifndef nolint
 2457         dp = NULL;
 2458 #endif
 2459 #ifndef DIAGNOSTIC
 2460         if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
 2461                 (uiop->uio_resid & (DIRBLKSIZ - 1)))
 2462                 panic("nfs readdirplusrpc bad uio");
 2463 #endif
 2464         ndp->ni_dvp = vp;
 2465         newvp = NULLVP;
 2466 
 2467         /*
 2468          * If there is no cookie, assume directory was stale.
 2469          */
 2470         nfs_dircookie_lock(dnp);
 2471         cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
 2472         if (cookiep) {
 2473                 cookie = *cookiep;
 2474                 nfs_dircookie_unlock(dnp);
 2475         } else {
 2476                 nfs_dircookie_unlock(dnp);
 2477                 return (NFSERR_BAD_COOKIE);
 2478         }
 2479         /*
 2480          * Loop around doing readdir rpc's of size nm_readdirsize
 2481          * truncated to a multiple of DIRBLKSIZ.
 2482          * The stopping criteria is EOF or buffer full.
 2483          */
 2484         while (more_dirs && bigenough) {
 2485                 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
 2486                 mreq = nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
 2487                         NFSX_FH(1) + 6 * NFSX_UNSIGNED);
 2488                 mb = mreq;
 2489                 bpos = mtod(mb, caddr_t);
 2490                 nfsm_fhtom(vp, 1);
 2491                 tl = nfsm_build(u_int32_t *, 6 * NFSX_UNSIGNED);
 2492                 *tl++ = cookie.nfsuquad[0];
 2493                 *tl++ = cookie.nfsuquad[1];
 2494                 mtx_lock(&dnp->n_mtx);
 2495                 *tl++ = dnp->n_cookieverf.nfsuquad[0];
 2496                 *tl++ = dnp->n_cookieverf.nfsuquad[1];
 2497                 mtx_unlock(&dnp->n_mtx);
 2498                 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
 2499                 *tl = txdr_unsigned(nmp->nm_rsize);
 2500                 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_td, cred);
 2501                 nfsm_postop_attr(vp, attrflag);
 2502                 if (error) {
 2503                         m_freem(mrep);
 2504                         goto nfsmout;
 2505                 }
 2506                 tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 2507                 mtx_lock(&dnp->n_mtx);
 2508                 dnp->n_cookieverf.nfsuquad[0] = *tl++;
 2509                 dnp->n_cookieverf.nfsuquad[1] = *tl++;
 2510                 mtx_unlock(&dnp->n_mtx);
 2511                 more_dirs = fxdr_unsigned(int, *tl);
 2512 
 2513                 /* loop thru the dir entries, doctoring them to 4bsd form */
 2514                 while (more_dirs && bigenough) {
 2515                         tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 2516                         fileno = fxdr_hyper(tl);
 2517                         len = fxdr_unsigned(int, *(tl + 2));
 2518                         if (len <= 0 || len > NFS_MAXNAMLEN) {
 2519                                 error = EBADRPC;
 2520                                 m_freem(mrep);
 2521                                 goto nfsmout;
 2522                         }
 2523                         tlen = nfsm_rndup(len);
 2524                         if (tlen == len)
 2525                                 tlen += 4;      /* To ensure null termination*/
 2526                         left = DIRBLKSIZ - blksiz;
 2527                         if ((tlen + DIRHDSIZ) > left) {
 2528                                 dp->d_reclen += left;
 2529                                 uiop->uio_iov->iov_base =
 2530                                     (char *)uiop->uio_iov->iov_base + left;
 2531                                 uiop->uio_iov->iov_len -= left;
 2532                                 uiop->uio_offset += left;
 2533                                 uiop->uio_resid -= left;
 2534                                 blksiz = 0;
 2535                         }
 2536                         if ((tlen + DIRHDSIZ) > uiop->uio_resid)
 2537                                 bigenough = 0;
 2538                         if (bigenough) {
 2539                                 dp = (struct dirent *)uiop->uio_iov->iov_base;
 2540                                 dp->d_fileno = (int)fileno;
 2541                                 dp->d_namlen = len;
 2542                                 dp->d_reclen = tlen + DIRHDSIZ;
 2543                                 dp->d_type = DT_UNKNOWN;
 2544                                 blksiz += dp->d_reclen;
 2545                                 if (blksiz == DIRBLKSIZ)
 2546                                         blksiz = 0;
 2547                                 uiop->uio_offset += DIRHDSIZ;
 2548                                 uiop->uio_resid -= DIRHDSIZ;
 2549                                 uiop->uio_iov->iov_base =
 2550                                     (char *)uiop->uio_iov->iov_base + DIRHDSIZ;
 2551                                 uiop->uio_iov->iov_len -= DIRHDSIZ;
 2552                                 cnp->cn_nameptr = uiop->uio_iov->iov_base;
 2553                                 cnp->cn_namelen = len;
 2554                                 nfsm_mtouio(uiop, len);
 2555                                 cp = uiop->uio_iov->iov_base;
 2556                                 tlen -= len;
 2557                                 *cp = '\0';
 2558                                 uiop->uio_iov->iov_base =
 2559                                     (char *)uiop->uio_iov->iov_base + tlen;
 2560                                 uiop->uio_iov->iov_len -= tlen;
 2561                                 uiop->uio_offset += tlen;
 2562                                 uiop->uio_resid -= tlen;
 2563                         } else
 2564                                 nfsm_adv(nfsm_rndup(len));
 2565                         tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 2566                         if (bigenough) {
 2567                                 cookie.nfsuquad[0] = *tl++;
 2568                                 cookie.nfsuquad[1] = *tl++;
 2569                         } else
 2570                                 tl += 2;
 2571 
 2572                         /*
 2573                          * Since the attributes are before the file handle
 2574                          * (sigh), we must skip over the attributes and then
 2575                          * come back and get them.
 2576                          */
 2577                         attrflag = fxdr_unsigned(int, *tl);
 2578                         if (attrflag) {
 2579                             dpossav1 = dpos;
 2580                             mdsav1 = md;
 2581                             nfsm_adv(NFSX_V3FATTR);
 2582                             tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2583                             doit = fxdr_unsigned(int, *tl);
 2584                             /*
 2585                              * Skip loading the attrs for "..". There's a 
 2586                              * race between loading the attrs here and 
 2587                              * lookups that look for the directory currently
 2588                              * being read (in the parent). We try to acquire
 2589                              * the exclusive lock on ".." here, owning the 
 2590                              * lock on the directory being read. Lookup will
 2591                              * hold the lock on ".." and try to acquire the 
 2592                              * lock on the directory being read.
 2593                              * 
 2594                              * There are other ways of fixing this, one would
 2595                              * be to do a trylock on the ".." vnode and skip
 2596                              * loading the attrs on ".." if it happens to be 
 2597                              * locked by another process. But skipping the
 2598                              * attrload on ".." seems the easiest option.
 2599                              */
 2600                             if (strcmp(dp->d_name, "..") == 0) {
 2601                                     doit = 0;
 2602                                     /*
 2603                                      * We've already skipped over the attrs, 
 2604                                      * skip over the filehandle. And store d_type
 2605                                      * as VDIR.
 2606                                      */
 2607                                     tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2608                                     i = fxdr_unsigned(int, *tl);
 2609                                     nfsm_adv(nfsm_rndup(i));
 2610                                     dp->d_type = IFTODT(VTTOIF(VDIR));
 2611                             }       
 2612                             if (doit) {
 2613                                 nfsm_getfh(fhp, fhsize, 1);
 2614                                 if (NFS_CMPFH(dnp, fhp, fhsize)) {
 2615                                     VREF(vp);
 2616                                     newvp = vp;
 2617                                     np = dnp;
 2618                                 } else {
 2619                                     error = nfs_nget(vp->v_mount, fhp,
 2620                                         fhsize, &np, LK_EXCLUSIVE);
 2621                                     if (error)
 2622                                         doit = 0;
 2623                                     else
 2624                                         newvp = NFSTOV(np);
 2625                                 }
 2626                             }
 2627                             if (doit && bigenough) {
 2628                                 dpossav2 = dpos;
 2629                                 dpos = dpossav1;
 2630                                 mdsav2 = md;
 2631                                 md = mdsav1;
 2632                                 nfsm_loadattr(newvp, NULL);
 2633                                 dpos = dpossav2;
 2634                                 md = mdsav2;
 2635                                 dp->d_type =
 2636                                     IFTODT(VTTOIF(np->n_vattr.va_type));
 2637                                 ndp->ni_vp = newvp;
 2638                                 /* Update n_ctime, so subsequent lookup doesn't purge entry */
 2639                                 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
 2640                                 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
 2641                             }
 2642                         } else {
 2643                             /* Just skip over the file handle */
 2644                             tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2645                             i = fxdr_unsigned(int, *tl);
 2646                             if (i) {
 2647                                     tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2648                                     fhsize = fxdr_unsigned(int, *tl);
 2649                                     nfsm_adv(nfsm_rndup(fhsize));
 2650                             }
 2651                         }
 2652                         if (newvp != NULLVP) {
 2653                             if (newvp == vp)
 2654                                 vrele(newvp);
 2655                             else
 2656                                 vput(newvp);
 2657                             newvp = NULLVP;
 2658                         }
 2659                         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2660                         more_dirs = fxdr_unsigned(int, *tl);
 2661                 }
 2662                 /*
 2663                  * If at end of rpc data, get the eof boolean
 2664                  */
 2665                 if (!more_dirs) {
 2666                         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 2667                         more_dirs = (fxdr_unsigned(int, *tl) == 0);
 2668                 }
 2669                 m_freem(mrep);
 2670         }
 2671         /*
 2672          * Fill last record, iff any, out to a multiple of DIRBLKSIZ
 2673          * by increasing d_reclen for the last record.
 2674          */
 2675         if (blksiz > 0) {
 2676                 left = DIRBLKSIZ - blksiz;
 2677                 dp->d_reclen += left;
 2678                 uiop->uio_iov->iov_base =
 2679                     (char *)uiop->uio_iov->iov_base + left;
 2680                 uiop->uio_iov->iov_len -= left;
 2681                 uiop->uio_offset += left;
 2682                 uiop->uio_resid -= left;
 2683         }
 2684 
 2685         /*
 2686          * We are now either at the end of the directory or have filled the
 2687          * block.
 2688          */
 2689         if (bigenough)
 2690                 dnp->n_direofoffset = uiop->uio_offset;
 2691         else {
 2692                 if (uiop->uio_resid > 0)
 2693                         nfs_printf("EEK! readdirplusrpc resid > 0\n");
 2694                 nfs_dircookie_lock(dnp);
 2695                 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
 2696                 *cookiep = cookie;
 2697                 nfs_dircookie_unlock(dnp);
 2698         }
 2699 nfsmout:
 2700         if (newvp != NULLVP) {
 2701                 if (newvp == vp)
 2702                         vrele(newvp);
 2703                 else
 2704                         vput(newvp);
 2705                 newvp = NULLVP;
 2706         }
 2707         return (error);
 2708 }
 2709 
 2710 /*
 2711  * Silly rename. To make the NFS filesystem that is stateless look a little
 2712  * more like the "ufs" a remove of an active vnode is translated to a rename
 2713  * to a funny looking filename that is removed by nfs_inactive on the
 2714  * nfsnode. There is the potential for another process on a different client
 2715  * to create the same funny name between the nfs_lookitup() fails and the
 2716  * nfs_rename() completes, but...
 2717  */
 2718 static int
 2719 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
 2720 {
 2721         struct sillyrename *sp;
 2722         struct nfsnode *np;
 2723         int error;
 2724         short pid;
 2725         unsigned int lticks;
 2726 
 2727         cache_purge(dvp);
 2728         np = VTONFS(vp);
 2729 #ifndef DIAGNOSTIC
 2730         if (vp->v_type == VDIR)
 2731                 panic("nfs: sillyrename dir");
 2732 #endif
 2733         sp = malloc(sizeof (struct sillyrename),
 2734                 M_NFSREQ, M_WAITOK);
 2735         sp->s_cred = crhold(cnp->cn_cred);
 2736         sp->s_dvp = dvp;
 2737         sp->s_removeit = nfs_removeit;
 2738         VREF(dvp);
 2739 
 2740         /* 
 2741          * Fudge together a funny name.
 2742          * Changing the format of the funny name to accomodate more 
 2743          * sillynames per directory.
 2744          * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is 
 2745          * CPU ticks since boot.
 2746          */
 2747         pid = cnp->cn_thread->td_proc->p_pid;
 2748         lticks = (unsigned int)ticks;
 2749         for ( ; ; ) {
 2750                 sp->s_namlen = sprintf(sp->s_name, 
 2751                                        ".nfs.%08x.%04x4.4", lticks, 
 2752                                        pid);
 2753                 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
 2754                                  cnp->cn_thread, NULL))
 2755                         break;
 2756                 lticks++;
 2757         }
 2758         error = nfs_renameit(dvp, cnp, sp);
 2759         if (error)
 2760                 goto bad;
 2761         error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
 2762                 cnp->cn_thread, &np);
 2763         np->n_sillyrename = sp;
 2764         return (0);
 2765 bad:
 2766         vrele(sp->s_dvp);
 2767         crfree(sp->s_cred);
 2768         free((caddr_t)sp, M_NFSREQ);
 2769         return (error);
 2770 }
 2771 
 2772 /*
 2773  * Look up a file name and optionally either update the file handle or
 2774  * allocate an nfsnode, depending on the value of npp.
 2775  * npp == NULL  --> just do the lookup
 2776  * *npp == NULL --> allocate a new nfsnode and make sure attributes are
 2777  *                      handled too
 2778  * *npp != NULL --> update the file handle in the vnode
 2779  */
 2780 static int
 2781 nfs_lookitup(struct vnode *dvp, const char *name, int len, struct ucred *cred,
 2782     struct thread *td, struct nfsnode **npp)
 2783 {
 2784         struct vnode *newvp = NULL;
 2785         struct nfsnode *np, *dnp = VTONFS(dvp);
 2786         caddr_t bpos, dpos;
 2787         int error = 0, fhlen, attrflag;
 2788         struct mbuf *mreq, *mrep, *md, *mb;
 2789         nfsfh_t *nfhp;
 2790         int v3 = NFS_ISV3(dvp);
 2791 
 2792         nfsstats.rpccnt[NFSPROC_LOOKUP]++;
 2793         mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
 2794                 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
 2795         mb = mreq;
 2796         bpos = mtod(mb, caddr_t);
 2797         nfsm_fhtom(dvp, v3);
 2798         nfsm_strtom(name, len, NFS_MAXNAMLEN);
 2799         nfsm_request(dvp, NFSPROC_LOOKUP, td, cred);
 2800         if (npp && !error) {
 2801                 nfsm_getfh(nfhp, fhlen, v3);
 2802                 if (*npp) {
 2803                     np = *npp;
 2804                     if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
 2805                         free((caddr_t)np->n_fhp, M_NFSBIGFH);
 2806                         np->n_fhp = &np->n_fh;
 2807                     } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
 2808                         np->n_fhp =(nfsfh_t *)malloc(fhlen, M_NFSBIGFH, M_WAITOK);
 2809                     bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
 2810                     np->n_fhsize = fhlen;
 2811                     newvp = NFSTOV(np);
 2812                 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
 2813                     VREF(dvp);
 2814                     newvp = dvp;
 2815                 } else {
 2816                     error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np, LK_EXCLUSIVE);
 2817                     if (error) {
 2818                         m_freem(mrep);
 2819                         return (error);
 2820                     }
 2821                     newvp = NFSTOV(np);
 2822                 }
 2823                 if (v3) {
 2824                         nfsm_postop_attr(newvp, attrflag);
 2825                         if (!attrflag && *npp == NULL) {
 2826                                 m_freem(mrep);
 2827                                 if (newvp == dvp)
 2828                                         vrele(newvp);
 2829                                 else
 2830                                         vput(newvp);
 2831                                 return (ENOENT);
 2832                         }
 2833                 } else
 2834                         nfsm_loadattr(newvp, NULL);
 2835         }
 2836         m_freem(mrep);
 2837 nfsmout:
 2838         if (npp && *npp == NULL) {
 2839                 if (error) {
 2840                         if (newvp) {
 2841                                 if (newvp == dvp)
 2842                                         vrele(newvp);
 2843                                 else
 2844                                         vput(newvp);
 2845                         }
 2846                 } else
 2847                         *npp = np;
 2848         }
 2849         return (error);
 2850 }
 2851 
 2852 /*
 2853  * Nfs Version 3 commit rpc
 2854  */
 2855 int
 2856 nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
 2857            struct thread *td)
 2858 {
 2859         u_int32_t *tl;
 2860         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2861         caddr_t bpos, dpos;
 2862         int error = 0, wccflag = NFSV3_WCCRATTR;
 2863         struct mbuf *mreq, *mrep, *md, *mb;
 2864 
 2865         mtx_lock(&nmp->nm_mtx);
 2866         if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
 2867                 mtx_unlock(&nmp->nm_mtx);
 2868                 return (0);
 2869         }
 2870         mtx_unlock(&nmp->nm_mtx);
 2871         nfsstats.rpccnt[NFSPROC_COMMIT]++;
 2872         mreq = nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
 2873         mb = mreq;
 2874         bpos = mtod(mb, caddr_t);
 2875         nfsm_fhtom(vp, 1);
 2876         tl = nfsm_build(u_int32_t *, 3 * NFSX_UNSIGNED);
 2877         txdr_hyper(offset, tl);
 2878         tl += 2;
 2879         *tl = txdr_unsigned(cnt);
 2880         nfsm_request(vp, NFSPROC_COMMIT, td, cred);
 2881         nfsm_wcc_data(vp, wccflag);
 2882         if (!error) {
 2883                 tl = nfsm_dissect(u_int32_t *, NFSX_V3WRITEVERF);
 2884                 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
 2885                         NFSX_V3WRITEVERF)) {
 2886                         bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
 2887                                 NFSX_V3WRITEVERF);
 2888                         error = NFSERR_STALEWRITEVERF;
 2889                 }
 2890         }
 2891         m_freem(mrep);
 2892 nfsmout:
 2893         return (error);
 2894 }
 2895 
 2896 /*
 2897  * Strategy routine.
 2898  * For async requests when nfsiod(s) are running, queue the request by
 2899  * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
 2900  * request.
 2901  */
 2902 static int
 2903 nfs_strategy(struct vop_strategy_args *ap)
 2904 {
 2905         struct buf *bp = ap->a_bp;
 2906         struct ucred *cr;
 2907 
 2908         KASSERT(!(bp->b_flags & B_DONE),
 2909             ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
 2910         BUF_ASSERT_HELD(bp);
 2911 
 2912         if (bp->b_iocmd == BIO_READ)
 2913                 cr = bp->b_rcred;
 2914         else
 2915                 cr = bp->b_wcred;
 2916 
 2917         /*
 2918          * If the op is asynchronous and an i/o daemon is waiting
 2919          * queue the request, wake it up and wait for completion
 2920          * otherwise just do it ourselves.
 2921          */
 2922         if ((bp->b_flags & B_ASYNC) == 0 ||
 2923             nfs_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread))
 2924                 (void)nfs_doio(ap->a_vp, bp, cr, curthread);
 2925         return (0);
 2926 }
 2927 
 2928 /*
 2929  * fsync vnode op. Just call nfs_flush() with commit == 1.
 2930  */
 2931 /* ARGSUSED */
 2932 static int
 2933 nfs_fsync(struct vop_fsync_args *ap)
 2934 {
 2935 
 2936         return (nfs_flush(ap->a_vp, ap->a_waitfor, 1));
 2937 }
 2938 
 2939 /*
 2940  * Flush all the blocks associated with a vnode.
 2941  *      Walk through the buffer pool and push any dirty pages
 2942  *      associated with the vnode.
 2943  */
 2944 static int
 2945 nfs_flush(struct vnode *vp, int waitfor, int commit)
 2946 {
 2947         struct nfsnode *np = VTONFS(vp);
 2948         struct buf *bp;
 2949         int i;
 2950         struct buf *nbp;
 2951         struct nfsmount *nmp = VFSTONFS(vp->v_mount);
 2952         int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
 2953         int passone = 1;
 2954         u_quad_t off, endoff, toff;
 2955         struct ucred* wcred = NULL;
 2956         struct buf **bvec = NULL;
 2957         struct bufobj *bo;
 2958         struct thread *td = curthread;
 2959 #ifndef NFS_COMMITBVECSIZ
 2960 #define NFS_COMMITBVECSIZ       20
 2961 #endif
 2962         struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
 2963         int bvecsize = 0, bveccount;
 2964 
 2965         if (nmp->nm_flag & NFSMNT_INT)
 2966                 slpflag = NFS_PCATCH;
 2967         if (!commit)
 2968                 passone = 0;
 2969         bo = &vp->v_bufobj;
 2970         /*
 2971          * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
 2972          * server, but has not been committed to stable storage on the server
 2973          * yet. On the first pass, the byte range is worked out and the commit
 2974          * rpc is done. On the second pass, nfs_writebp() is called to do the
 2975          * job.
 2976          */
 2977 again:
 2978         off = (u_quad_t)-1;
 2979         endoff = 0;
 2980         bvecpos = 0;
 2981         if (NFS_ISV3(vp) && commit) {
 2982                 if (bvec != NULL && bvec != bvec_on_stack)
 2983                         free(bvec, M_TEMP);
 2984                 /*
 2985                  * Count up how many buffers waiting for a commit.
 2986                  */
 2987                 bveccount = 0;
 2988                 BO_LOCK(bo);
 2989                 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 2990                         if (!BUF_ISLOCKED(bp) &&
 2991                             (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
 2992                                 == (B_DELWRI | B_NEEDCOMMIT))
 2993                                 bveccount++;
 2994                 }
 2995                 /*
 2996                  * Allocate space to remember the list of bufs to commit.  It is
 2997                  * important to use M_NOWAIT here to avoid a race with nfs_write.
 2998                  * If we can't get memory (for whatever reason), we will end up
 2999                  * committing the buffers one-by-one in the loop below.
 3000                  */
 3001                 if (bveccount > NFS_COMMITBVECSIZ) {
 3002                         /*
 3003                          * Release the vnode interlock to avoid a lock
 3004                          * order reversal.
 3005                          */
 3006                         BO_UNLOCK(bo);
 3007                         bvec = (struct buf **)
 3008                                 malloc(bveccount * sizeof(struct buf *),
 3009                                        M_TEMP, M_NOWAIT);
 3010                         BO_LOCK(bo);
 3011                         if (bvec == NULL) {
 3012                                 bvec = bvec_on_stack;
 3013                                 bvecsize = NFS_COMMITBVECSIZ;
 3014                         } else
 3015                                 bvecsize = bveccount;
 3016                 } else {
 3017                         bvec = bvec_on_stack;
 3018                         bvecsize = NFS_COMMITBVECSIZ;
 3019                 }
 3020                 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 3021                         if (bvecpos >= bvecsize)
 3022                                 break;
 3023                         if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
 3024                                 nbp = TAILQ_NEXT(bp, b_bobufs);
 3025                                 continue;
 3026                         }
 3027                         if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
 3028                             (B_DELWRI | B_NEEDCOMMIT)) {
 3029                                 BUF_UNLOCK(bp);
 3030                                 nbp = TAILQ_NEXT(bp, b_bobufs);
 3031                                 continue;
 3032                         }
 3033                         BO_UNLOCK(bo);
 3034                         bremfree(bp);
 3035                         /*
 3036                          * Work out if all buffers are using the same cred
 3037                          * so we can deal with them all with one commit.
 3038                          *
 3039                          * NOTE: we are not clearing B_DONE here, so we have
 3040                          * to do it later on in this routine if we intend to
 3041                          * initiate I/O on the bp.
 3042                          *
 3043                          * Note: to avoid loopback deadlocks, we do not
 3044                          * assign b_runningbufspace.
 3045                          */
 3046                         if (wcred == NULL)
 3047                                 wcred = bp->b_wcred;
 3048                         else if (wcred != bp->b_wcred)
 3049                                 wcred = NOCRED;
 3050                         vfs_busy_pages(bp, 1);
 3051 
 3052                         BO_LOCK(bo);
 3053                         /*
 3054                          * bp is protected by being locked, but nbp is not
 3055                          * and vfs_busy_pages() may sleep.  We have to
 3056                          * recalculate nbp.
 3057                          */
 3058                         nbp = TAILQ_NEXT(bp, b_bobufs);
 3059 
 3060                         /*
 3061                          * A list of these buffers is kept so that the
 3062                          * second loop knows which buffers have actually
 3063                          * been committed. This is necessary, since there
 3064                          * may be a race between the commit rpc and new
 3065                          * uncommitted writes on the file.
 3066                          */
 3067                         bvec[bvecpos++] = bp;
 3068                         toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
 3069                                 bp->b_dirtyoff;
 3070                         if (toff < off)
 3071                                 off = toff;
 3072                         toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
 3073                         if (toff > endoff)
 3074                                 endoff = toff;
 3075                 }
 3076                 BO_UNLOCK(bo);
 3077         }
 3078         if (bvecpos > 0) {
 3079                 /*
 3080                  * Commit data on the server, as required.
 3081                  * If all bufs are using the same wcred, then use that with
 3082                  * one call for all of them, otherwise commit each one
 3083                  * separately.
 3084                  */
 3085                 if (wcred != NOCRED)
 3086                         retv = nfs_commit(vp, off, (int)(endoff - off),
 3087                                           wcred, td);
 3088                 else {
 3089                         retv = 0;
 3090                         for (i = 0; i < bvecpos; i++) {
 3091                                 off_t off, size;
 3092                                 bp = bvec[i];
 3093                                 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
 3094                                         bp->b_dirtyoff;
 3095                                 size = (u_quad_t)(bp->b_dirtyend
 3096                                                   - bp->b_dirtyoff);
 3097                                 retv = nfs_commit(vp, off, (int)size,
 3098                                                   bp->b_wcred, td);
 3099                                 if (retv) break;
 3100                         }
 3101                 }
 3102 
 3103                 if (retv == NFSERR_STALEWRITEVERF)
 3104                         nfs_clearcommit(vp->v_mount);
 3105 
 3106                 /*
 3107                  * Now, either mark the blocks I/O done or mark the
 3108                  * blocks dirty, depending on whether the commit
 3109                  * succeeded.
 3110                  */
 3111                 for (i = 0; i < bvecpos; i++) {
 3112                         bp = bvec[i];
 3113                         bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
 3114                         if (retv) {
 3115                                 /*
 3116                                  * Error, leave B_DELWRI intact
 3117                                  */
 3118                                 vfs_unbusy_pages(bp);
 3119                                 brelse(bp);
 3120                         } else {
 3121                                 /*
 3122                                  * Success, remove B_DELWRI ( bundirty() ).
 3123                                  *
 3124                                  * b_dirtyoff/b_dirtyend seem to be NFS
 3125                                  * specific.  We should probably move that
 3126                                  * into bundirty(). XXX
 3127                                  */
 3128                                 bufobj_wref(bo);
 3129                                 bp->b_flags |= B_ASYNC;
 3130                                 bundirty(bp);
 3131                                 bp->b_flags &= ~B_DONE;
 3132                                 bp->b_ioflags &= ~BIO_ERROR;
 3133                                 bp->b_dirtyoff = bp->b_dirtyend = 0;
 3134                                 bufdone(bp);
 3135                         }
 3136                 }
 3137         }
 3138 
 3139         /*
 3140          * Start/do any write(s) that are required.
 3141          */
 3142 loop:
 3143         BO_LOCK(bo);
 3144         TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
 3145                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
 3146                         if (waitfor != MNT_WAIT || passone)
 3147                                 continue;
 3148 
 3149                         error = BUF_TIMELOCK(bp,
 3150                             LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
 3151                             BO_MTX(bo), "nfsfsync", slpflag, slptimeo);
 3152                         if (error == 0) {
 3153                                 BUF_UNLOCK(bp);
 3154                                 goto loop;
 3155                         }
 3156                         if (error == ENOLCK) {
 3157                                 error = 0;
 3158                                 goto loop;
 3159                         }
 3160                         if (nfs_sigintr(nmp, td)) {
 3161                                 error = EINTR;
 3162                                 goto done;
 3163                         }
 3164                         if (slpflag & PCATCH) {
 3165                                 slpflag = 0;
 3166                                 slptimeo = 2 * hz;
 3167                         }
 3168                         goto loop;
 3169                 }
 3170                 if ((bp->b_flags & B_DELWRI) == 0)
 3171                         panic("nfs_fsync: not dirty");
 3172                 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
 3173                         BUF_UNLOCK(bp);
 3174                         continue;
 3175                 }
 3176                 BO_UNLOCK(bo);
 3177                 bremfree(bp);
 3178                 if (passone || !commit)
 3179                     bp->b_flags |= B_ASYNC;
 3180                 else
 3181                     bp->b_flags |= B_ASYNC;
 3182                 bwrite(bp);
 3183                 if (nfs_sigintr(nmp, td)) {
 3184                         error = EINTR;
 3185                         goto done;
 3186                 }
 3187                 goto loop;
 3188         }
 3189         if (passone) {
 3190                 passone = 0;
 3191                 BO_UNLOCK(bo);
 3192                 goto again;
 3193         }
 3194         if (waitfor == MNT_WAIT) {
 3195                 while (bo->bo_numoutput) {
 3196                         error = bufobj_wwait(bo, slpflag, slptimeo);
 3197                         if (error) {
 3198                             BO_UNLOCK(bo);
 3199                             error = nfs_sigintr(nmp, td);
 3200                             if (error)
 3201                                 goto done;
 3202                             if (slpflag & PCATCH) {
 3203                                 slpflag = 0;
 3204                                 slptimeo = 2 * hz;
 3205                             }
 3206                             BO_LOCK(bo);
 3207                         }
 3208                 }
 3209                 if (bo->bo_dirty.bv_cnt != 0 && commit) {
 3210                         BO_UNLOCK(bo);
 3211                         goto loop;
 3212                 }
 3213                 /*
 3214                  * Wait for all the async IO requests to drain
 3215                  */
 3216                 BO_UNLOCK(bo);
 3217                 mtx_lock(&np->n_mtx);
 3218                 while (np->n_directio_asyncwr > 0) {
 3219                         np->n_flag |= NFSYNCWAIT;
 3220                         error = nfs_msleep(td, (caddr_t)&np->n_directio_asyncwr,
 3221                                            &np->n_mtx, slpflag | (PRIBIO + 1), 
 3222                                            "nfsfsync", 0);
 3223                         if (error) {
 3224                                 if (nfs_sigintr(nmp, td)) {
 3225                                         mtx_unlock(&np->n_mtx);
 3226                                         error = EINTR;  
 3227                                         goto done;
 3228                                 }
 3229                         }
 3230                 }
 3231                 mtx_unlock(&np->n_mtx);
 3232         } else
 3233                 BO_UNLOCK(bo);
 3234         mtx_lock(&np->n_mtx);
 3235         if (np->n_flag & NWRITEERR) {
 3236                 error = np->n_error;
 3237                 np->n_flag &= ~NWRITEERR;
 3238         }
 3239         if (commit && bo->bo_dirty.bv_cnt == 0 &&
 3240             bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0)
 3241                 np->n_flag &= ~NMODIFIED;
 3242         mtx_unlock(&np->n_mtx);
 3243 done:
 3244         if (bvec != NULL && bvec != bvec_on_stack)
 3245                 free(bvec, M_TEMP);
 3246         return (error);
 3247 }
 3248 
 3249 /*
 3250  * NFS advisory byte-level locks.
 3251  */
 3252 static int
 3253 nfs_advlock(struct vop_advlock_args *ap)
 3254 {
 3255         struct vnode *vp = ap->a_vp;
 3256         u_quad_t size;
 3257         int error;
 3258 
 3259         error = vn_lock(vp, LK_SHARED);
 3260         if (error)
 3261                 return (error);
 3262         if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
 3263                 size = VTONFS(vp)->n_size;
 3264                 VOP_UNLOCK(vp, 0);
 3265                 error = lf_advlock(ap, &(vp->v_lockf), size);
 3266         } else {
 3267                 if (nfs_advlock_p)
 3268                         error = nfs_advlock_p(ap);
 3269                 else
 3270                         error = ENOLCK;
 3271         }
 3272 
 3273         return (error);
 3274 }
 3275 
 3276 /*
 3277  * NFS advisory byte-level locks.
 3278  */
 3279 static int
 3280 nfs_advlockasync(struct vop_advlockasync_args *ap)
 3281 {
 3282         struct vnode *vp = ap->a_vp;
 3283         u_quad_t size;
 3284         int error;
 3285         
 3286         error = vn_lock(vp, LK_SHARED);
 3287         if (error)
 3288                 return (error);
 3289         if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
 3290                 size = VTONFS(vp)->n_size;
 3291                 VOP_UNLOCK(vp, 0);
 3292                 error = lf_advlockasync(ap, &(vp->v_lockf), size);
 3293         } else {
 3294                 VOP_UNLOCK(vp, 0);
 3295                 error = EOPNOTSUPP;
 3296         }
 3297         return (error);
 3298 }
 3299 
 3300 /*
 3301  * Print out the contents of an nfsnode.
 3302  */
 3303 static int
 3304 nfs_print(struct vop_print_args *ap)
 3305 {
 3306         struct vnode *vp = ap->a_vp;
 3307         struct nfsnode *np = VTONFS(vp);
 3308 
 3309         nfs_printf("\tfileid %ld fsid 0x%x",
 3310            np->n_vattr.va_fileid, np->n_vattr.va_fsid);
 3311         if (vp->v_type == VFIFO)
 3312                 fifo_printinfo(vp);
 3313         printf("\n");
 3314         return (0);
 3315 }
 3316 
 3317 /*
 3318  * This is the "real" nfs::bwrite(struct buf*).
 3319  * We set B_CACHE if this is a VMIO buffer.
 3320  */
 3321 int
 3322 nfs_writebp(struct buf *bp, int force __unused, struct thread *td)
 3323 {
 3324         int s;
 3325         int oldflags = bp->b_flags;
 3326 #if 0
 3327         int retv = 1;
 3328         off_t off;
 3329 #endif
 3330 
 3331         BUF_ASSERT_HELD(bp);
 3332 
 3333         if (bp->b_flags & B_INVAL) {
 3334                 brelse(bp);
 3335                 return(0);
 3336         }
 3337 
 3338         bp->b_flags |= B_CACHE;
 3339 
 3340         /*
 3341          * Undirty the bp.  We will redirty it later if the I/O fails.
 3342          */
 3343 
 3344         s = splbio();
 3345         bundirty(bp);
 3346         bp->b_flags &= ~B_DONE;
 3347         bp->b_ioflags &= ~BIO_ERROR;
 3348         bp->b_iocmd = BIO_WRITE;
 3349 
 3350         bufobj_wref(bp->b_bufobj);
 3351         curthread->td_ru.ru_oublock++;
 3352         splx(s);
 3353 
 3354         /*
 3355          * Note: to avoid loopback deadlocks, we do not
 3356          * assign b_runningbufspace.
 3357          */
 3358         vfs_busy_pages(bp, 1);
 3359 
 3360         BUF_KERNPROC(bp);
 3361         bp->b_iooffset = dbtob(bp->b_blkno);
 3362         bstrategy(bp);
 3363 
 3364         if( (oldflags & B_ASYNC) == 0) {
 3365                 int rtval = bufwait(bp);
 3366 
 3367                 if (oldflags & B_DELWRI) {
 3368                         s = splbio();
 3369                         reassignbuf(bp);
 3370                         splx(s);
 3371                 }
 3372                 brelse(bp);
 3373                 return (rtval);
 3374         }
 3375 
 3376         return (0);
 3377 }
 3378 
 3379 /*
 3380  * nfs special file access vnode op.
 3381  * Essentially just get vattr and then imitate iaccess() since the device is
 3382  * local to the client.
 3383  */
 3384 static int
 3385 nfsspec_access(struct vop_access_args *ap)
 3386 {
 3387         struct vattr *vap;
 3388         struct ucred *cred = ap->a_cred;
 3389         struct vnode *vp = ap->a_vp;
 3390         accmode_t accmode = ap->a_accmode;
 3391         struct vattr vattr;
 3392         int error;
 3393 
 3394         /*
 3395          * Disallow write attempts on filesystems mounted read-only;
 3396          * unless the file is a socket, fifo, or a block or character
 3397          * device resident on the filesystem.
 3398          */
 3399         if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
 3400                 switch (vp->v_type) {
 3401                 case VREG:
 3402                 case VDIR:
 3403                 case VLNK:
 3404                         return (EROFS);
 3405                 default:
 3406                         break;
 3407                 }
 3408         }
 3409         vap = &vattr;
 3410         error = VOP_GETATTR(vp, vap, cred);
 3411         if (error)
 3412                 goto out;
 3413         error  = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid,
 3414                          accmode, cred, NULL);
 3415 out:
 3416         return error;
 3417 }
 3418 
 3419 /*
 3420  * Read wrapper for fifos.
 3421  */
 3422 static int
 3423 nfsfifo_read(struct vop_read_args *ap)
 3424 {
 3425         struct nfsnode *np = VTONFS(ap->a_vp);
 3426         int error;
 3427 
 3428         /*
 3429          * Set access flag.
 3430          */
 3431         mtx_lock(&np->n_mtx);
 3432         np->n_flag |= NACC;
 3433         getnanotime(&np->n_atim);
 3434         mtx_unlock(&np->n_mtx);
 3435         error = fifo_specops.vop_read(ap);
 3436         return error;   
 3437 }
 3438 
 3439 /*
 3440  * Write wrapper for fifos.
 3441  */
 3442 static int
 3443 nfsfifo_write(struct vop_write_args *ap)
 3444 {
 3445         struct nfsnode *np = VTONFS(ap->a_vp);
 3446 
 3447         /*
 3448          * Set update flag.
 3449          */
 3450         mtx_lock(&np->n_mtx);
 3451         np->n_flag |= NUPD;
 3452         getnanotime(&np->n_mtim);
 3453         mtx_unlock(&np->n_mtx);
 3454         return(fifo_specops.vop_write(ap));
 3455 }
 3456 
 3457 /*
 3458  * Close wrapper for fifos.
 3459  *
 3460  * Update the times on the nfsnode then do fifo close.
 3461  */
 3462 static int
 3463 nfsfifo_close(struct vop_close_args *ap)
 3464 {
 3465         struct vnode *vp = ap->a_vp;
 3466         struct nfsnode *np = VTONFS(vp);
 3467         struct vattr vattr;
 3468         struct timespec ts;
 3469 
 3470         mtx_lock(&np->n_mtx);
 3471         if (np->n_flag & (NACC | NUPD)) {
 3472                 getnanotime(&ts);
 3473                 if (np->n_flag & NACC)
 3474                         np->n_atim = ts;
 3475                 if (np->n_flag & NUPD)
 3476                         np->n_mtim = ts;
 3477                 np->n_flag |= NCHG;
 3478                 if (vrefcnt(vp) == 1 &&
 3479                     (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
 3480                         VATTR_NULL(&vattr);
 3481                         if (np->n_flag & NACC)
 3482                                 vattr.va_atime = np->n_atim;
 3483                         if (np->n_flag & NUPD)
 3484                                 vattr.va_mtime = np->n_mtim;
 3485                         mtx_unlock(&np->n_mtx);
 3486                         (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
 3487                         goto out;
 3488                 }
 3489         }
 3490         mtx_unlock(&np->n_mtx);
 3491 out:
 3492         return (fifo_specops.vop_close(ap));
 3493 }
 3494 
 3495 /*
 3496  * Just call nfs_writebp() with the force argument set to 1.
 3497  *
 3498  * NOTE: B_DONE may or may not be set in a_bp on call.
 3499  */
 3500 static int
 3501 nfs_bwrite(struct buf *bp)
 3502 {
 3503 
 3504         return (nfs_writebp(bp, 1, curthread));
 3505 }
 3506 
 3507 struct buf_ops buf_ops_nfs = {
 3508         .bop_name       =       "buf_ops_nfs",
 3509         .bop_write      =       nfs_bwrite,
 3510         .bop_strategy   =       bufstrategy,
 3511         .bop_sync       =       bufsync,
 3512         .bop_bdflush    =       bufbdflush,
 3513 };

Cache object: fbe795f1f272936630fa753e2dfda117


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.