The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/nfsclient/nfs_clport.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * Rick Macklem at The University of Guelph.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD$");
   36 
   37 #include "opt_inet6.h"
   38 
   39 /*
   40  * generally, I don't like #includes inside .h files, but it seems to
   41  * be the easiest way to handle the port.
   42  */
   43 #include <sys/hash.h>
   44 #include <fs/nfs/nfsport.h>
   45 #include <netinet/if_ether.h>
   46 #include <net/if_types.h>
   47 
   48 extern u_int32_t newnfs_true, newnfs_false, newnfs_xdrneg1;
   49 extern struct vop_vector newnfs_vnodeops;
   50 extern struct vop_vector newnfs_fifoops;
   51 extern uma_zone_t newnfsnode_zone;
   52 extern struct buf_ops buf_ops_newnfs;
   53 extern int ncl_pbuf_freecnt;
   54 extern short nfsv4_cbport;
   55 extern int nfscl_enablecallb;
   56 extern int nfs_numnfscbd;
   57 extern int nfscl_inited;
   58 struct mtx nfs_clstate_mutex;
   59 struct mtx ncl_iod_mutex;
   60 NFSDLOCKMUTEX;
   61 
   62 extern void (*ncl_call_invalcaches)(struct vnode *);
   63 
   64 /*
   65  * Comparison function for vfs_hash functions.
   66  */
   67 int
   68 newnfs_vncmpf(struct vnode *vp, void *arg)
   69 {
   70         struct nfsfh *nfhp = (struct nfsfh *)arg;
   71         struct nfsnode *np = VTONFS(vp);
   72 
   73         if (np->n_fhp->nfh_len != nfhp->nfh_len ||
   74             NFSBCMP(np->n_fhp->nfh_fh, nfhp->nfh_fh, nfhp->nfh_len))
   75                 return (1);
   76         return (0);
   77 }
   78 
   79 /*
   80  * Look up a vnode/nfsnode by file handle.
   81  * Callers must check for mount points!!
   82  * In all cases, a pointer to a
   83  * nfsnode structure is returned.
   84  * This variant takes a "struct nfsfh *" as second argument and uses
   85  * that structure up, either by hanging off the nfsnode or FREEing it.
   86  */
   87 int
   88 nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp,
   89     struct componentname *cnp, struct thread *td, struct nfsnode **npp,
   90     void *stuff, int lkflags)
   91 {
   92         struct nfsnode *np, *dnp;
   93         struct vnode *vp, *nvp;
   94         struct nfsv4node *newd, *oldd;
   95         int error;
   96         u_int hash;
   97         struct nfsmount *nmp;
   98 
   99         nmp = VFSTONFS(mntp);
  100         dnp = VTONFS(dvp);
  101         *npp = NULL;
  102 
  103         hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, FNV1_32_INIT);
  104 
  105         error = vfs_hash_get(mntp, hash, lkflags,
  106             td, &nvp, newnfs_vncmpf, nfhp);
  107         if (error == 0 && nvp != NULL) {
  108                 /*
  109                  * I believe there is a slight chance that vgonel() could
  110                  * get called on this vnode between when NFSVOPLOCK() drops
  111                  * the VI_LOCK() and vget() acquires it again, so that it
  112                  * hasn't yet had v_usecount incremented. If this were to
  113                  * happen, the VI_DOOMED flag would be set, so check for
  114                  * that here. Since we now have the v_usecount incremented,
  115                  * we should be ok until we vrele() it, if the VI_DOOMED
  116                  * flag isn't set now.
  117                  */
  118                 VI_LOCK(nvp);
  119                 if ((nvp->v_iflag & VI_DOOMED)) {
  120                         VI_UNLOCK(nvp);
  121                         vrele(nvp);
  122                         error = ENOENT;
  123                 } else {
  124                         VI_UNLOCK(nvp);
  125                 }
  126         }
  127         if (error) {
  128                 FREE((caddr_t)nfhp, M_NFSFH);
  129                 return (error);
  130         }
  131         if (nvp != NULL) {
  132                 np = VTONFS(nvp);
  133                 /*
  134                  * For NFSv4, check to see if it is the same name and
  135                  * replace the name, if it is different.
  136                  */
  137                 oldd = newd = NULL;
  138                 if ((nmp->nm_flag & NFSMNT_NFSV4) && np->n_v4 != NULL &&
  139                     nvp->v_type == VREG &&
  140                     (np->n_v4->n4_namelen != cnp->cn_namelen ||
  141                      NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
  142                      cnp->cn_namelen) ||
  143                      dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
  144                      NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
  145                      dnp->n_fhp->nfh_len))) {
  146                     MALLOC(newd, struct nfsv4node *,
  147                         sizeof (struct nfsv4node) + dnp->n_fhp->nfh_len +
  148                         + cnp->cn_namelen - 1, M_NFSV4NODE, M_WAITOK);
  149                     NFSLOCKNODE(np);
  150                     if (newd != NULL && np->n_v4 != NULL && nvp->v_type == VREG
  151                         && (np->n_v4->n4_namelen != cnp->cn_namelen ||
  152                          NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
  153                          cnp->cn_namelen) ||
  154                          dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
  155                          NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
  156                          dnp->n_fhp->nfh_len))) {
  157                         oldd = np->n_v4;
  158                         np->n_v4 = newd;
  159                         newd = NULL;
  160                         np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
  161                         np->n_v4->n4_namelen = cnp->cn_namelen;
  162                         NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
  163                             dnp->n_fhp->nfh_len);
  164                         NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
  165                             cnp->cn_namelen);
  166                     }
  167                     NFSUNLOCKNODE(np);
  168                 }
  169                 if (newd != NULL)
  170                         FREE((caddr_t)newd, M_NFSV4NODE);
  171                 if (oldd != NULL)
  172                         FREE((caddr_t)oldd, M_NFSV4NODE);
  173                 *npp = np;
  174                 FREE((caddr_t)nfhp, M_NFSFH);
  175                 return (0);
  176         }
  177 
  178         /*
  179          * Allocate before getnewvnode since doing so afterward
  180          * might cause a bogus v_data pointer to get dereferenced
  181          * elsewhere if zalloc should block.
  182          */
  183         np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO);
  184 
  185         error = getnewvnode("newnfs", mntp, &newnfs_vnodeops, &nvp);
  186         if (error) {
  187                 uma_zfree(newnfsnode_zone, np);
  188                 FREE((caddr_t)nfhp, M_NFSFH);
  189                 return (error);
  190         }
  191         vp = nvp;
  192         KASSERT(vp->v_bufobj.bo_bsize != 0, ("nfscl_nget: bo_bsize == 0"));
  193         vp->v_bufobj.bo_ops = &buf_ops_newnfs;
  194         vp->v_data = np;
  195         np->n_vnode = vp;
  196         /* 
  197          * Initialize the mutex even if the vnode is going to be a loser.
  198          * This simplifies the logic in reclaim, which can then unconditionally
  199          * destroy the mutex (in the case of the loser, or if hash_insert
  200          * happened to return an error no special casing is needed).
  201          */
  202         mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK);
  203 
  204         /* 
  205          * Are we getting the root? If so, make sure the vnode flags
  206          * are correct 
  207          */
  208         if ((nfhp->nfh_len == nmp->nm_fhsize) &&
  209             !bcmp(nfhp->nfh_fh, nmp->nm_fh, nfhp->nfh_len)) {
  210                 if (vp->v_type == VNON)
  211                         vp->v_type = VDIR;
  212                 vp->v_vflag |= VV_ROOT;
  213         }
  214         
  215         np->n_fhp = nfhp;
  216         /*
  217          * For NFSv4, we have to attach the directory file handle and
  218          * file name, so that Open Ops can be done later.
  219          */
  220         if (nmp->nm_flag & NFSMNT_NFSV4) {
  221                 MALLOC(np->n_v4, struct nfsv4node *, sizeof (struct nfsv4node)
  222                     + dnp->n_fhp->nfh_len + cnp->cn_namelen - 1, M_NFSV4NODE,
  223                     M_WAITOK);
  224                 np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
  225                 np->n_v4->n4_namelen = cnp->cn_namelen;
  226                 NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
  227                     dnp->n_fhp->nfh_len);
  228                 NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
  229                     cnp->cn_namelen);
  230         } else {
  231                 np->n_v4 = NULL;
  232         }
  233 
  234         /*
  235          * NFS supports recursive and shared locking.
  236          */
  237         VN_LOCK_AREC(vp);
  238         VN_LOCK_ASHARE(vp);
  239         lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
  240         error = insmntque(vp, mntp);
  241         if (error != 0) {
  242                 *npp = NULL;
  243                 mtx_destroy(&np->n_mtx);
  244                 FREE((caddr_t)nfhp, M_NFSFH);
  245                 if (np->n_v4 != NULL)
  246                         FREE((caddr_t)np->n_v4, M_NFSV4NODE);
  247                 uma_zfree(newnfsnode_zone, np);
  248                 return (error);
  249         }
  250         error = vfs_hash_insert(vp, hash, lkflags, 
  251             td, &nvp, newnfs_vncmpf, nfhp);
  252         if (error)
  253                 return (error);
  254         if (nvp != NULL) {
  255                 *npp = VTONFS(nvp);
  256                 /* vfs_hash_insert() vput()'s the losing vnode */
  257                 return (0);
  258         }
  259         *npp = np;
  260 
  261         return (0);
  262 }
  263 
  264 /*
  265  * Anothe variant of nfs_nget(). This one is only used by reopen. It
  266  * takes almost the same args as nfs_nget(), but only succeeds if an entry
  267  * exists in the cache. (Since files should already be "open" with a
  268  * vnode ref cnt on the node when reopen calls this, it should always
  269  * succeed.)
  270  * Also, don't get a vnode lock, since it may already be locked by some
  271  * other process that is handling it. This is ok, since all other threads
  272  * on the client are blocked by the nfsc_lock being exclusively held by the
  273  * caller of this function.
  274  */
  275 int
  276 nfscl_ngetreopen(struct mount *mntp, u_int8_t *fhp, int fhsize,
  277     struct thread *td, struct nfsnode **npp)
  278 {
  279         struct vnode *nvp;
  280         u_int hash;
  281         struct nfsfh *nfhp;
  282         int error;
  283 
  284         *npp = NULL;
  285         /* For forced dismounts, just return error. */
  286         if ((mntp->mnt_kern_flag & MNTK_UNMOUNTF))
  287                 return (EINTR);
  288         MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
  289             M_NFSFH, M_WAITOK);
  290         bcopy(fhp, &nfhp->nfh_fh[0], fhsize);
  291         nfhp->nfh_len = fhsize;
  292 
  293         hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT);
  294 
  295         /*
  296          * First, try to get the vnode locked, but don't block for the lock.
  297          */
  298         error = vfs_hash_get(mntp, hash, (LK_EXCLUSIVE | LK_NOWAIT), td, &nvp,
  299             newnfs_vncmpf, nfhp);
  300         if (error == 0 && nvp != NULL) {
  301                 NFSVOPUNLOCK(nvp, 0);
  302         } else if (error == EBUSY) {
  303                 /*
  304                  * The LK_EXCLOTHER lock type tells nfs_lock1() to not try
  305                  * and lock the vnode, but just get a v_usecount on it.
  306                  * LK_NOWAIT is set so that when vget() returns ENOENT,
  307                  * vfs_hash_get() fails instead of looping.
  308                  * If this succeeds, it is safe so long as a vflush() with
  309                  * FORCECLOSE has not been done. Since the Renew thread is
  310                  * stopped and the MNTK_UNMOUNTF flag is set before doing
  311                  * a vflush() with FORCECLOSE, we should be ok here.
  312                  */
  313                 if ((mntp->mnt_kern_flag & MNTK_UNMOUNTF))
  314                         error = EINTR;
  315                 else
  316                         error = vfs_hash_get(mntp, hash,
  317                             (LK_EXCLOTHER | LK_NOWAIT), td, &nvp,
  318                             newnfs_vncmpf, nfhp);
  319         }
  320         FREE(nfhp, M_NFSFH);
  321         if (error)
  322                 return (error);
  323         if (nvp != NULL) {
  324                 *npp = VTONFS(nvp);
  325                 return (0);
  326         }
  327         return (EINVAL);
  328 }
  329 
  330 /*
  331  * Load the attribute cache (that lives in the nfsnode entry) with
  332  * the attributes of the second argument and
  333  * Iff vaper not NULL
  334  *    copy the attributes to *vaper
  335  * Similar to nfs_loadattrcache(), except the attributes are passed in
  336  * instead of being parsed out of the mbuf list.
  337  */
  338 int
  339 nfscl_loadattrcache(struct vnode **vpp, struct nfsvattr *nap, void *nvaper,
  340     void *stuff, int writeattr, int dontshrink)
  341 {
  342         struct vnode *vp = *vpp;
  343         struct vattr *vap, *nvap = &nap->na_vattr, *vaper = nvaper;
  344         struct nfsnode *np;
  345         struct nfsmount *nmp;
  346         struct timespec mtime_save;
  347 
  348         /*
  349          * If v_type == VNON it is a new node, so fill in the v_type,
  350          * n_mtime fields. Check to see if it represents a special 
  351          * device, and if so, check for a possible alias. Once the
  352          * correct vnode has been obtained, fill in the rest of the
  353          * information.
  354          */
  355         np = VTONFS(vp);
  356         NFSLOCKNODE(np);
  357         if (vp->v_type != nvap->va_type) {
  358                 vp->v_type = nvap->va_type;
  359                 if (vp->v_type == VFIFO)
  360                         vp->v_op = &newnfs_fifoops;
  361                 np->n_mtime = nvap->va_mtime;
  362         }
  363         nmp = VFSTONFS(vp->v_mount);
  364         vap = &np->n_vattr.na_vattr;
  365         mtime_save = vap->va_mtime;
  366         if (writeattr) {
  367                 np->n_vattr.na_filerev = nap->na_filerev;
  368                 np->n_vattr.na_size = nap->na_size;
  369                 np->n_vattr.na_mtime = nap->na_mtime;
  370                 np->n_vattr.na_ctime = nap->na_ctime;
  371                 np->n_vattr.na_fsid = nap->na_fsid;
  372                 np->n_vattr.na_mode = nap->na_mode;
  373         } else {
  374                 NFSBCOPY((caddr_t)nap, (caddr_t)&np->n_vattr,
  375                     sizeof (struct nfsvattr));
  376         }
  377 
  378         /*
  379          * For NFSv4, if the node's fsid is not equal to the mount point's
  380          * fsid, return the low order 32bits of the node's fsid. This
  381          * allows getcwd(3) to work. There is a chance that the fsid might
  382          * be the same as a local fs, but since this is in an NFS mount
  383          * point, I don't think that will cause any problems?
  384          */
  385         if (NFSHASNFSV4(nmp) && NFSHASHASSETFSID(nmp) &&
  386             (nmp->nm_fsid[0] != np->n_vattr.na_filesid[0] ||
  387              nmp->nm_fsid[1] != np->n_vattr.na_filesid[1])) {
  388                 /*
  389                  * va_fsid needs to be set to some value derived from
  390                  * np->n_vattr.na_filesid that is not equal
  391                  * vp->v_mount->mnt_stat.f_fsid[0], so that it changes
  392                  * from the value used for the top level server volume
  393                  * in the mounted subtree.
  394                  */
  395                 if (vp->v_mount->mnt_stat.f_fsid.val[0] !=
  396                     (uint32_t)np->n_vattr.na_filesid[0])
  397                         vap->va_fsid = (uint32_t)np->n_vattr.na_filesid[0];
  398                 else
  399                         vap->va_fsid = (uint32_t)hash32_buf(
  400                             np->n_vattr.na_filesid, 2 * sizeof(uint64_t), 0);
  401         } else
  402                 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
  403         np->n_attrstamp = time_second;
  404         if (vap->va_size != np->n_size) {
  405                 if (vap->va_type == VREG) {
  406                         if (dontshrink && vap->va_size < np->n_size) {
  407                                 /*
  408                                  * We've been told not to shrink the file;
  409                                  * zero np->n_attrstamp to indicate that
  410                                  * the attributes are stale.
  411                                  */
  412                                 vap->va_size = np->n_size;
  413                                 np->n_attrstamp = 0;
  414                         } else if (np->n_flag & NMODIFIED) {
  415                                 /*
  416                                  * We've modified the file: Use the larger
  417                                  * of our size, and the server's size.
  418                                  */
  419                                 if (vap->va_size < np->n_size) {
  420                                         vap->va_size = np->n_size;
  421                                 } else {
  422                                         np->n_size = vap->va_size;
  423                                         np->n_flag |= NSIZECHANGED;
  424                                 }
  425                         } else {
  426                                 np->n_size = vap->va_size;
  427                                 np->n_flag |= NSIZECHANGED;
  428                         }
  429                         vnode_pager_setsize(vp, np->n_size);
  430                 } else {
  431                         np->n_size = vap->va_size;
  432                 }
  433         }
  434         /*
  435          * The following checks are added to prevent a race between (say)
  436          * a READDIR+ and a WRITE. 
  437          * READDIR+, WRITE requests sent out.
  438          * READDIR+ resp, WRITE resp received on client.
  439          * However, the WRITE resp was handled before the READDIR+ resp
  440          * causing the post op attrs from the write to be loaded first
  441          * and the attrs from the READDIR+ to be loaded later. If this 
  442          * happens, we have stale attrs loaded into the attrcache.
  443          * We detect this by for the mtime moving back. We invalidate the 
  444          * attrcache when this happens.
  445          */
  446         if (timespeccmp(&mtime_save, &vap->va_mtime, >))
  447                 /* Size changed or mtime went backwards */
  448                 np->n_attrstamp = 0;
  449         if (vaper != NULL) {
  450                 NFSBCOPY((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
  451                 if (np->n_flag & NCHG) {
  452                         if (np->n_flag & NACC)
  453                                 vaper->va_atime = np->n_atim;
  454                         if (np->n_flag & NUPD)
  455                                 vaper->va_mtime = np->n_mtim;
  456                 }
  457         }
  458         NFSUNLOCKNODE(np);
  459         return (0);
  460 }
  461 
  462 /*
  463  * Fill in the client id name. For these bytes:
  464  * 1 - they must be unique
  465  * 2 - they should be persistent across client reboots
  466  * 1 is more critical than 2
  467  * Use the mount point's unique id plus either the uuid or, if that
  468  * isn't set, random junk.
  469  */
  470 void
  471 nfscl_fillclid(u_int64_t clval, char *uuid, u_int8_t *cp, u_int16_t idlen)
  472 {
  473         int uuidlen;
  474 
  475         /*
  476          * First, put in the 64bit mount point identifier.
  477          */
  478         if (idlen >= sizeof (u_int64_t)) {
  479                 NFSBCOPY((caddr_t)&clval, cp, sizeof (u_int64_t));
  480                 cp += sizeof (u_int64_t);
  481                 idlen -= sizeof (u_int64_t);
  482         }
  483 
  484         /*
  485          * If uuid is non-zero length, use it.
  486          */
  487         uuidlen = strlen(uuid);
  488         if (uuidlen > 0 && idlen >= uuidlen) {
  489                 NFSBCOPY(uuid, cp, uuidlen);
  490                 cp += uuidlen;
  491                 idlen -= uuidlen;
  492         }
  493 
  494         /*
  495          * This only normally happens if the uuid isn't set.
  496          */
  497         while (idlen > 0) {
  498                 *cp++ = (u_int8_t)(arc4random() % 256);
  499                 idlen--;
  500         }
  501 }
  502 
  503 /*
  504  * Fill in a lock owner name. For now, pid + the process's creation time.
  505  */
  506 void
  507 nfscl_filllockowner(void *id, u_int8_t *cp, int flags)
  508 {
  509         union {
  510                 u_int32_t       lval;
  511                 u_int8_t        cval[4];
  512         } tl;
  513         struct proc *p;
  514 
  515         if (id == NULL) {
  516                 printf("NULL id\n");
  517                 bzero(cp, NFSV4CL_LOCKNAMELEN);
  518                 return;
  519         }
  520         if ((flags & F_POSIX) != 0) {
  521                 p = (struct proc *)id;
  522                 tl.lval = p->p_pid;
  523                 *cp++ = tl.cval[0];
  524                 *cp++ = tl.cval[1];
  525                 *cp++ = tl.cval[2];
  526                 *cp++ = tl.cval[3];
  527                 tl.lval = p->p_stats->p_start.tv_sec;
  528                 *cp++ = tl.cval[0];
  529                 *cp++ = tl.cval[1];
  530                 *cp++ = tl.cval[2];
  531                 *cp++ = tl.cval[3];
  532                 tl.lval = p->p_stats->p_start.tv_usec;
  533                 *cp++ = tl.cval[0];
  534                 *cp++ = tl.cval[1];
  535                 *cp++ = tl.cval[2];
  536                 *cp = tl.cval[3];
  537         } else if ((flags & F_FLOCK) != 0) {
  538                 bcopy(&id, cp, sizeof(id));
  539                 bzero(&cp[sizeof(id)], NFSV4CL_LOCKNAMELEN - sizeof(id));
  540         } else {
  541                 printf("nfscl_filllockowner: not F_POSIX or F_FLOCK\n");
  542                 bzero(cp, NFSV4CL_LOCKNAMELEN);
  543         }
  544 }
  545 
  546 /*
  547  * Find the parent process for the thread passed in as an argument.
  548  * If none exists, return NULL, otherwise return a thread for the parent.
  549  * (Can be any of the threads, since it is only used for td->td_proc.)
  550  */
  551 NFSPROC_T *
  552 nfscl_getparent(struct thread *td)
  553 {
  554         struct proc *p;
  555         struct thread *ptd;
  556 
  557         if (td == NULL)
  558                 return (NULL);
  559         p = td->td_proc;
  560         if (p->p_pid == 0)
  561                 return (NULL);
  562         p = p->p_pptr;
  563         if (p == NULL)
  564                 return (NULL);
  565         ptd = TAILQ_FIRST(&p->p_threads);
  566         return (ptd);
  567 }
  568 
  569 /*
  570  * Start up the renew kernel thread.
  571  */
  572 static void
  573 start_nfscl(void *arg)
  574 {
  575         struct nfsclclient *clp;
  576         struct thread *td;
  577 
  578         clp = (struct nfsclclient *)arg;
  579         td = TAILQ_FIRST(&clp->nfsc_renewthread->p_threads);
  580         nfscl_renewthread(clp, td);
  581         kproc_exit(0);
  582 }
  583 
  584 void
  585 nfscl_start_renewthread(struct nfsclclient *clp)
  586 {
  587 
  588         kproc_create(start_nfscl, (void *)clp, &clp->nfsc_renewthread, 0, 0,
  589             "nfscl");
  590 }
  591 
  592 /*
  593  * Handle wcc_data.
  594  * For NFSv4, it assumes that nfsv4_wccattr() was used to set up the getattr
  595  * as the first Op after PutFH.
  596  * (For NFSv4, the postop attributes are after the Op, so they can't be
  597  *  parsed here. A separate call to nfscl_postop_attr() is required.)
  598  */
  599 int
  600 nfscl_wcc_data(struct nfsrv_descript *nd, struct vnode *vp,
  601     struct nfsvattr *nap, int *flagp, int *wccflagp, void *stuff)
  602 {
  603         u_int32_t *tl;
  604         struct nfsnode *np = VTONFS(vp);
  605         struct nfsvattr nfsva;
  606         int error = 0;
  607 
  608         if (wccflagp != NULL)
  609                 *wccflagp = 0;
  610         if (nd->nd_flag & ND_NFSV3) {
  611                 *flagp = 0;
  612                 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
  613                 if (*tl == newnfs_true) {
  614                         NFSM_DISSECT(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
  615                         if (wccflagp != NULL) {
  616                                 mtx_lock(&np->n_mtx);
  617                                 *wccflagp = (np->n_mtime.tv_sec ==
  618                                     fxdr_unsigned(u_int32_t, *(tl + 2)) &&
  619                                     np->n_mtime.tv_nsec ==
  620                                     fxdr_unsigned(u_int32_t, *(tl + 3)));
  621                                 mtx_unlock(&np->n_mtx);
  622                         }
  623                 }
  624                 error = nfscl_postop_attr(nd, nap, flagp, stuff);
  625         } else if ((nd->nd_flag & (ND_NOMOREDATA | ND_NFSV4 | ND_V4WCCATTR))
  626             == (ND_NFSV4 | ND_V4WCCATTR)) {
  627                 error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
  628                     NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
  629                     NULL, NULL, NULL, NULL, NULL);
  630                 if (error)
  631                         return (error);
  632                 /*
  633                  * Get rid of Op# and status for next op.
  634                  */
  635                 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
  636                 if (*++tl)
  637                         nd->nd_flag |= ND_NOMOREDATA;
  638                 if (wccflagp != NULL &&
  639                     nfsva.na_vattr.va_mtime.tv_sec != 0) {
  640                         mtx_lock(&np->n_mtx);
  641                         *wccflagp = (np->n_mtime.tv_sec ==
  642                             nfsva.na_vattr.va_mtime.tv_sec &&
  643                             np->n_mtime.tv_nsec ==
  644                             nfsva.na_vattr.va_mtime.tv_sec);
  645                         mtx_unlock(&np->n_mtx);
  646                 }
  647         }
  648 nfsmout:
  649         return (error);
  650 }
  651 
  652 /*
  653  * Get postop attributes.
  654  */
  655 int
  656 nfscl_postop_attr(struct nfsrv_descript *nd, struct nfsvattr *nap, int *retp,
  657     void *stuff)
  658 {
  659         u_int32_t *tl;
  660         int error = 0;
  661 
  662         *retp = 0;
  663         if (nd->nd_flag & ND_NOMOREDATA)
  664                 return (error);
  665         if (nd->nd_flag & ND_NFSV3) {
  666                 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
  667                 *retp = fxdr_unsigned(int, *tl);
  668         } else if (nd->nd_flag & ND_NFSV4) {
  669                 /*
  670                  * For NFSv4, the postop attr are at the end, so no point
  671                  * in looking if nd_repstat != 0.
  672                  */
  673                 if (!nd->nd_repstat) {
  674                         NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
  675                         if (*(tl + 1))
  676                                 /* should never happen since nd_repstat != 0 */
  677                                 nd->nd_flag |= ND_NOMOREDATA;
  678                         else
  679                                 *retp = 1;
  680                 }
  681         } else if (!nd->nd_repstat) {
  682                 /* For NFSv2, the attributes are here iff nd_repstat == 0 */
  683                 *retp = 1;
  684         }
  685         if (*retp) {
  686                 error = nfsm_loadattr(nd, nap);
  687                 if (error)
  688                         *retp = 0;
  689         }
  690 nfsmout:
  691         return (error);
  692 }
  693 
  694 /*
  695  * Fill in the setable attributes. The full argument indicates whether
  696  * to fill in them all or just mode and time.
  697  */
  698 void
  699 nfscl_fillsattr(struct nfsrv_descript *nd, struct vattr *vap,
  700     struct vnode *vp, int flags, u_int32_t rdev)
  701 {
  702         u_int32_t *tl;
  703         struct nfsv2_sattr *sp;
  704         nfsattrbit_t attrbits;
  705 
  706         switch (nd->nd_flag & (ND_NFSV2 | ND_NFSV3 | ND_NFSV4)) {
  707         case ND_NFSV2:
  708                 NFSM_BUILD(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
  709                 if (vap->va_mode == (mode_t)VNOVAL)
  710                         sp->sa_mode = newnfs_xdrneg1;
  711                 else
  712                         sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
  713                 if (vap->va_uid == (uid_t)VNOVAL)
  714                         sp->sa_uid = newnfs_xdrneg1;
  715                 else
  716                         sp->sa_uid = txdr_unsigned(vap->va_uid);
  717                 if (vap->va_gid == (gid_t)VNOVAL)
  718                         sp->sa_gid = newnfs_xdrneg1;
  719                 else
  720                         sp->sa_gid = txdr_unsigned(vap->va_gid);
  721                 if (flags & NFSSATTR_SIZE0)
  722                         sp->sa_size = 0;
  723                 else if (flags & NFSSATTR_SIZENEG1)
  724                         sp->sa_size = newnfs_xdrneg1;
  725                 else if (flags & NFSSATTR_SIZERDEV)
  726                         sp->sa_size = txdr_unsigned(rdev);
  727                 else
  728                         sp->sa_size = txdr_unsigned(vap->va_size);
  729                 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
  730                 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
  731                 break;
  732         case ND_NFSV3:
  733                 if (vap->va_mode != (mode_t)VNOVAL) {
  734                         NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
  735                         *tl++ = newnfs_true;
  736                         *tl = txdr_unsigned(vap->va_mode);
  737                 } else {
  738                         NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
  739                         *tl = newnfs_false;
  740                 }
  741                 if ((flags & NFSSATTR_FULL) && vap->va_uid != (uid_t)VNOVAL) {
  742                         NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
  743                         *tl++ = newnfs_true;
  744                         *tl = txdr_unsigned(vap->va_uid);
  745                 } else {
  746                         NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
  747                         *tl = newnfs_false;
  748                 }
  749                 if ((flags & NFSSATTR_FULL) && vap->va_gid != (gid_t)VNOVAL) {
  750                         NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
  751                         *tl++ = newnfs_true;
  752                         *tl = txdr_unsigned(vap->va_gid);
  753                 } else {
  754                         NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
  755                         *tl = newnfs_false;
  756                 }
  757                 if ((flags & NFSSATTR_FULL) && vap->va_size != VNOVAL) {
  758                         NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
  759                         *tl++ = newnfs_true;
  760                         txdr_hyper(vap->va_size, tl);
  761                 } else {
  762                         NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
  763                         *tl = newnfs_false;
  764                 }
  765                 if (vap->va_atime.tv_sec != VNOVAL) {
  766                         if ((vap->va_vaflags & VA_UTIMES_NULL) == 0) {
  767                                 NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
  768                                 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
  769                                 txdr_nfsv3time(&vap->va_atime, tl);
  770                         } else {
  771                                 NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
  772                                 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
  773                         }
  774                 } else {
  775                         NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
  776                         *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
  777                 }
  778                 if (vap->va_mtime.tv_sec != VNOVAL) {
  779                         if ((vap->va_vaflags & VA_UTIMES_NULL) == 0) {
  780                                 NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
  781                                 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
  782                                 txdr_nfsv3time(&vap->va_mtime, tl);
  783                         } else {
  784                                 NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
  785                                 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
  786                         }
  787                 } else {
  788                         NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
  789                         *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
  790                 }
  791                 break;
  792         case ND_NFSV4:
  793                 NFSZERO_ATTRBIT(&attrbits);
  794                 if (vap->va_mode != (mode_t)VNOVAL)
  795                         NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_MODE);
  796                 if ((flags & NFSSATTR_FULL) && vap->va_uid != (uid_t)VNOVAL)
  797                         NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_OWNER);
  798                 if ((flags & NFSSATTR_FULL) && vap->va_gid != (gid_t)VNOVAL)
  799                         NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_OWNERGROUP);
  800                 if ((flags & NFSSATTR_FULL) && vap->va_size != VNOVAL)
  801                         NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_SIZE);
  802                 if (vap->va_atime.tv_sec != VNOVAL)
  803                         NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEACCESSSET);
  804                 if (vap->va_mtime.tv_sec != VNOVAL)
  805                         NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEMODIFYSET);
  806                 (void) nfsv4_fillattr(nd, vp->v_mount, vp, NULL, vap, NULL, 0,
  807                     &attrbits, NULL, NULL, 0, 0, 0, 0, (uint64_t)0);
  808                 break;
  809         };
  810 }
  811 
  812 /*
  813  * nfscl_request() - mostly a wrapper for newnfs_request().
  814  */
  815 int
  816 nfscl_request(struct nfsrv_descript *nd, struct vnode *vp, NFSPROC_T *p,
  817     struct ucred *cred, void *stuff)
  818 {
  819         int ret, vers;
  820         struct nfsmount *nmp;
  821 
  822         nmp = VFSTONFS(vp->v_mount);
  823         if (nd->nd_flag & ND_NFSV4)
  824                 vers = NFS_VER4;
  825         else if (nd->nd_flag & ND_NFSV3)
  826                 vers = NFS_VER3;
  827         else
  828                 vers = NFS_VER2;
  829         ret = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, vp, p, cred,
  830                 NFS_PROG, vers, NULL, 1, NULL);
  831         return (ret);
  832 }
  833 
  834 /*
  835  * fill in this bsden's variant of statfs using nfsstatfs.
  836  */
  837 void
  838 nfscl_loadsbinfo(struct nfsmount *nmp, struct nfsstatfs *sfp, void *statfs)
  839 {
  840         struct statfs *sbp = (struct statfs *)statfs;
  841 
  842         if (nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) {
  843                 sbp->f_bsize = NFS_FABLKSIZE;
  844                 sbp->f_blocks = sfp->sf_tbytes / NFS_FABLKSIZE;
  845                 sbp->f_bfree = sfp->sf_fbytes / NFS_FABLKSIZE;
  846                 /*
  847                  * Although sf_abytes is uint64_t and f_bavail is int64_t,
  848                  * the value after dividing by NFS_FABLKSIZE is small
  849                  * enough that it will fit in 63bits, so it is ok to
  850                  * assign it to f_bavail without fear that it will become
  851                  * negative.
  852                  */
  853                 sbp->f_bavail = sfp->sf_abytes / NFS_FABLKSIZE;
  854                 sbp->f_files = sfp->sf_tfiles;
  855                 /* Since f_ffree is int64_t, clip it to 63bits. */
  856                 if (sfp->sf_ffiles > INT64_MAX)
  857                         sbp->f_ffree = INT64_MAX;
  858                 else
  859                         sbp->f_ffree = sfp->sf_ffiles;
  860         } else if ((nmp->nm_flag & NFSMNT_NFSV4) == 0) {
  861                 /*
  862                  * The type casts to (int32_t) ensure that this code is
  863                  * compatible with the old NFS client, in that it will
  864                  * propagate bit31 to the high order bits. This may or may
  865                  * not be correct for NFSv2, but since it is a legacy
  866                  * environment, I'd rather retain backwards compatibility.
  867                  */
  868                 sbp->f_bsize = (int32_t)sfp->sf_bsize;
  869                 sbp->f_blocks = (int32_t)sfp->sf_blocks;
  870                 sbp->f_bfree = (int32_t)sfp->sf_bfree;
  871                 sbp->f_bavail = (int32_t)sfp->sf_bavail;
  872                 sbp->f_files = 0;
  873                 sbp->f_ffree = 0;
  874         }
  875 }
  876 
  877 /*
  878  * Use the fsinfo stuff to update the mount point.
  879  */
  880 void
  881 nfscl_loadfsinfo(struct nfsmount *nmp, struct nfsfsinfo *fsp)
  882 {
  883 
  884         if ((nmp->nm_wsize == 0 || fsp->fs_wtpref < nmp->nm_wsize) &&
  885             fsp->fs_wtpref >= NFS_FABLKSIZE)
  886                 nmp->nm_wsize = (fsp->fs_wtpref + NFS_FABLKSIZE - 1) &
  887                     ~(NFS_FABLKSIZE - 1);
  888         if (fsp->fs_wtmax < nmp->nm_wsize && fsp->fs_wtmax > 0) {
  889                 nmp->nm_wsize = fsp->fs_wtmax & ~(NFS_FABLKSIZE - 1);
  890                 if (nmp->nm_wsize == 0)
  891                         nmp->nm_wsize = fsp->fs_wtmax;
  892         }
  893         if (nmp->nm_wsize < NFS_FABLKSIZE)
  894                 nmp->nm_wsize = NFS_FABLKSIZE;
  895         if ((nmp->nm_rsize == 0 || fsp->fs_rtpref < nmp->nm_rsize) &&
  896             fsp->fs_rtpref >= NFS_FABLKSIZE)
  897                 nmp->nm_rsize = (fsp->fs_rtpref + NFS_FABLKSIZE - 1) &
  898                     ~(NFS_FABLKSIZE - 1);
  899         if (fsp->fs_rtmax < nmp->nm_rsize && fsp->fs_rtmax > 0) {
  900                 nmp->nm_rsize = fsp->fs_rtmax & ~(NFS_FABLKSIZE - 1);
  901                 if (nmp->nm_rsize == 0)
  902                         nmp->nm_rsize = fsp->fs_rtmax;
  903         }
  904         if (nmp->nm_rsize < NFS_FABLKSIZE)
  905                 nmp->nm_rsize = NFS_FABLKSIZE;
  906         if ((nmp->nm_readdirsize == 0 || fsp->fs_dtpref < nmp->nm_readdirsize)
  907             && fsp->fs_dtpref >= NFS_DIRBLKSIZ)
  908                 nmp->nm_readdirsize = (fsp->fs_dtpref + NFS_DIRBLKSIZ - 1) &
  909                     ~(NFS_DIRBLKSIZ - 1);
  910         if (fsp->fs_rtmax < nmp->nm_readdirsize && fsp->fs_rtmax > 0) {
  911                 nmp->nm_readdirsize = fsp->fs_rtmax & ~(NFS_DIRBLKSIZ - 1);
  912                 if (nmp->nm_readdirsize == 0)
  913                         nmp->nm_readdirsize = fsp->fs_rtmax;
  914         }
  915         if (nmp->nm_readdirsize < NFS_DIRBLKSIZ)
  916                 nmp->nm_readdirsize = NFS_DIRBLKSIZ;
  917         if (fsp->fs_maxfilesize > 0 &&
  918             fsp->fs_maxfilesize < nmp->nm_maxfilesize)
  919                 nmp->nm_maxfilesize = fsp->fs_maxfilesize;
  920         nmp->nm_mountp->mnt_stat.f_iosize = newnfs_iosize(nmp);
  921         nmp->nm_state |= NFSSTA_GOTFSINFO;
  922 }
  923 
  924 /*
  925  * Get a pointer to my IP addrress and return it.
  926  * Return NULL if you can't find one.
  927  */
  928 u_int8_t *
  929 nfscl_getmyip(struct nfsmount *nmp, int *isinet6p)
  930 {
  931         struct sockaddr_in sad, *sin;
  932         struct rtentry *rt;
  933         u_int8_t *retp = NULL;
  934         static struct in_addr laddr;
  935 
  936         *isinet6p = 0;
  937         /*
  938          * Loop up a route for the destination address.
  939          */
  940         if (nmp->nm_nam->sa_family == AF_INET) {
  941                 bzero(&sad, sizeof (sad));
  942                 sin = (struct sockaddr_in *)nmp->nm_nam;
  943                 sad.sin_family = AF_INET;
  944                 sad.sin_len = sizeof (struct sockaddr_in);
  945                 sad.sin_addr.s_addr = sin->sin_addr.s_addr;
  946                 CURVNET_SET(CRED_TO_VNET(nmp->nm_sockreq.nr_cred));
  947                 rt = rtalloc1_fib((struct sockaddr *)&sad, 0, 0UL,
  948                      curthread->td_proc->p_fibnum);
  949                 if (rt != NULL) {
  950                         if (rt->rt_ifp != NULL &&
  951                             rt->rt_ifa != NULL &&
  952                             ((rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0) &&
  953                             rt->rt_ifa->ifa_addr->sa_family == AF_INET) {
  954                                 sin = (struct sockaddr_in *)
  955                                     rt->rt_ifa->ifa_addr;
  956                                 laddr.s_addr = sin->sin_addr.s_addr;
  957                                 retp = (u_int8_t *)&laddr;
  958                         }
  959                         RTFREE_LOCKED(rt);
  960                 }
  961                 CURVNET_RESTORE();
  962 #ifdef INET6
  963         } else if (nmp->nm_nam->sa_family == AF_INET6) {
  964                 struct sockaddr_in6 sad6, *sin6;
  965                 static struct in6_addr laddr6;
  966 
  967                 bzero(&sad6, sizeof (sad6));
  968                 sin6 = (struct sockaddr_in6 *)nmp->nm_nam;
  969                 sad6.sin6_family = AF_INET6;
  970                 sad6.sin6_len = sizeof (struct sockaddr_in6);
  971                 sad6.sin6_addr = sin6->sin6_addr;
  972                 CURVNET_SET(CRED_TO_VNET(nmp->nm_sockreq.nr_cred));
  973                 rt = rtalloc1_fib((struct sockaddr *)&sad6, 0, 0UL,
  974                      curthread->td_proc->p_fibnum);
  975                 if (rt != NULL) {
  976                         if (rt->rt_ifp != NULL &&
  977                             rt->rt_ifa != NULL &&
  978                             ((rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0) &&
  979                             rt->rt_ifa->ifa_addr->sa_family == AF_INET6) {
  980                                 sin6 = (struct sockaddr_in6 *)
  981                                     rt->rt_ifa->ifa_addr;
  982                                 laddr6 = sin6->sin6_addr;
  983                                 retp = (u_int8_t *)&laddr6;
  984                                 *isinet6p = 1;
  985                         }
  986                         RTFREE_LOCKED(rt);
  987                 }
  988                 CURVNET_RESTORE();
  989 #endif
  990         }
  991         return (retp);
  992 }
  993 
  994 /*
  995  * Copy NFS uid, gids from the cred structure.
  996  */
  997 void
  998 newnfs_copyincred(struct ucred *cr, struct nfscred *nfscr)
  999 {
 1000         int i;
 1001 
 1002         KASSERT(cr->cr_ngroups >= 0,
 1003             ("newnfs_copyincred: negative cr_ngroups"));
 1004         nfscr->nfsc_uid = cr->cr_uid;
 1005         nfscr->nfsc_ngroups = MIN(cr->cr_ngroups, NFS_MAXGRPS + 1);
 1006         for (i = 0; i < nfscr->nfsc_ngroups; i++)
 1007                 nfscr->nfsc_groups[i] = cr->cr_groups[i];
 1008 }
 1009 
 1010 
 1011 /*
 1012  * Do any client specific initialization.
 1013  */
 1014 void
 1015 nfscl_init(void)
 1016 {
 1017         static int inited = 0;
 1018 
 1019         if (inited)
 1020                 return;
 1021         inited = 1;
 1022         nfscl_inited = 1;
 1023         ncl_pbuf_freecnt = nswbuf / 2 + 1;
 1024 }
 1025 
 1026 /*
 1027  * Check each of the attributes to be set, to ensure they aren't already
 1028  * the correct value. Disable setting ones already correct.
 1029  */
 1030 int
 1031 nfscl_checksattr(struct vattr *vap, struct nfsvattr *nvap)
 1032 {
 1033 
 1034         if (vap->va_mode != (mode_t)VNOVAL) {
 1035                 if (vap->va_mode == nvap->na_mode)
 1036                         vap->va_mode = (mode_t)VNOVAL;
 1037         }
 1038         if (vap->va_uid != (uid_t)VNOVAL) {
 1039                 if (vap->va_uid == nvap->na_uid)
 1040                         vap->va_uid = (uid_t)VNOVAL;
 1041         }
 1042         if (vap->va_gid != (gid_t)VNOVAL) {
 1043                 if (vap->va_gid == nvap->na_gid)
 1044                         vap->va_gid = (gid_t)VNOVAL;
 1045         }
 1046         if (vap->va_size != VNOVAL) {
 1047                 if (vap->va_size == nvap->na_size)
 1048                         vap->va_size = VNOVAL;
 1049         }
 1050 
 1051         /*
 1052          * We are normally called with only a partially initialized
 1053          * VAP.  Since the NFSv3 spec says that server may use the
 1054          * file attributes to store the verifier, the spec requires
 1055          * us to do a SETATTR RPC. FreeBSD servers store the verifier
 1056          * in atime, but we can't really assume that all servers will
 1057          * so we ensure that our SETATTR sets both atime and mtime.
 1058          */
 1059         if (vap->va_mtime.tv_sec == VNOVAL)
 1060                 vfs_timestamp(&vap->va_mtime);
 1061         if (vap->va_atime.tv_sec == VNOVAL)
 1062                 vap->va_atime = vap->va_mtime;
 1063         return (1);
 1064 }
 1065 
 1066 /*
 1067  * Map nfsv4 errors to errno.h errors.
 1068  * The uid and gid arguments are only used for NFSERR_BADOWNER and that
 1069  * error should only be returned for the Open, Create and Setattr Ops.
 1070  * As such, most calls can just pass in 0 for those arguments.
 1071  */
 1072 APPLESTATIC int
 1073 nfscl_maperr(struct thread *td, int error, uid_t uid, gid_t gid)
 1074 {
 1075         struct proc *p;
 1076 
 1077         if (error < 10000)
 1078                 return (error);
 1079         if (td != NULL)
 1080                 p = td->td_proc;
 1081         else
 1082                 p = NULL;
 1083         switch (error) {
 1084         case NFSERR_BADOWNER:
 1085                 tprintf(p, LOG_INFO,
 1086                     "No name and/or group mapping for uid,gid:(%d,%d)\n",
 1087                     uid, gid);
 1088                 return (EPERM);
 1089         case NFSERR_STALECLIENTID:
 1090         case NFSERR_STALESTATEID:
 1091         case NFSERR_EXPIRED:
 1092         case NFSERR_BADSTATEID:
 1093                 printf("nfsv4 recover err returned %d\n", error);
 1094                 return (EIO);
 1095         case NFSERR_BADHANDLE:
 1096         case NFSERR_SERVERFAULT:
 1097         case NFSERR_BADTYPE:
 1098         case NFSERR_FHEXPIRED:
 1099         case NFSERR_RESOURCE:
 1100         case NFSERR_MOVED:
 1101         case NFSERR_NOFILEHANDLE:
 1102         case NFSERR_MINORVERMISMATCH:
 1103         case NFSERR_OLDSTATEID:
 1104         case NFSERR_BADSEQID:
 1105         case NFSERR_LEASEMOVED:
 1106         case NFSERR_RECLAIMBAD:
 1107         case NFSERR_BADXDR:
 1108         case NFSERR_BADCHAR:
 1109         case NFSERR_BADNAME:
 1110         case NFSERR_OPILLEGAL:
 1111                 printf("nfsv4 client/server protocol prob err=%d\n",
 1112                     error);
 1113                 return (EIO);
 1114         default:
 1115                 tprintf(p, LOG_INFO, "nfsv4 err=%d\n", error);
 1116                 return (EIO);
 1117         };
 1118 }
 1119 
 1120 /*
 1121  * Locate a process by number; return only "live" processes -- i.e., neither
 1122  * zombies nor newly born but incompletely initialized processes.  By not
 1123  * returning processes in the PRS_NEW state, we allow callers to avoid
 1124  * testing for that condition to avoid dereferencing p_ucred, et al.
 1125  * Identical to pfind() in kern_proc.c, except it assume the list is
 1126  * already locked.
 1127  */
 1128 static struct proc *
 1129 pfind_locked(pid_t pid)
 1130 {
 1131         struct proc *p;
 1132 
 1133         LIST_FOREACH(p, PIDHASH(pid), p_hash)
 1134                 if (p->p_pid == pid) {
 1135                         PROC_LOCK(p);
 1136                         if (p->p_state == PRS_NEW) {
 1137                                 PROC_UNLOCK(p);
 1138                                 p = NULL;
 1139                         }
 1140                         break;
 1141                 }
 1142         return (p);
 1143 }
 1144 
 1145 /*
 1146  * Check to see if the process for this owner exists. Return 1 if it doesn't
 1147  * and 0 otherwise.
 1148  */
 1149 int
 1150 nfscl_procdoesntexist(u_int8_t *own)
 1151 {
 1152         union {
 1153                 u_int32_t       lval;
 1154                 u_int8_t        cval[4];
 1155         } tl;
 1156         struct proc *p;
 1157         pid_t pid;
 1158         int ret = 0;
 1159 
 1160         tl.cval[0] = *own++;
 1161         tl.cval[1] = *own++;
 1162         tl.cval[2] = *own++;
 1163         tl.cval[3] = *own++;
 1164         pid = tl.lval;
 1165         p = pfind_locked(pid);
 1166         if (p == NULL)
 1167                 return (1);
 1168         if (p->p_stats == NULL) {
 1169                 PROC_UNLOCK(p);
 1170                 return (0);
 1171         }
 1172         tl.cval[0] = *own++;
 1173         tl.cval[1] = *own++;
 1174         tl.cval[2] = *own++;
 1175         tl.cval[3] = *own++;
 1176         if (tl.lval != p->p_stats->p_start.tv_sec) {
 1177                 ret = 1;
 1178         } else {
 1179                 tl.cval[0] = *own++;
 1180                 tl.cval[1] = *own++;
 1181                 tl.cval[2] = *own++;
 1182                 tl.cval[3] = *own;
 1183                 if (tl.lval != p->p_stats->p_start.tv_usec)
 1184                         ret = 1;
 1185         }
 1186         PROC_UNLOCK(p);
 1187         return (ret);
 1188 }
 1189 
 1190 /*
 1191  * - nfs pseudo system call for the client
 1192  */
 1193 /*
 1194  * MPSAFE
 1195  */
 1196 static int
 1197 nfssvc_nfscl(struct thread *td, struct nfssvc_args *uap)
 1198 {
 1199         struct file *fp;
 1200         struct nfscbd_args nfscbdarg;
 1201         struct nfsd_nfscbd_args nfscbdarg2;
 1202         int error;
 1203 
 1204         if (uap->flag & NFSSVC_CBADDSOCK) {
 1205                 error = copyin(uap->argp, (caddr_t)&nfscbdarg, sizeof(nfscbdarg));
 1206                 if (error)
 1207                         return (error);
 1208                 if ((error = fget(td, nfscbdarg.sock, &fp)) != 0) {
 1209                         return (error);
 1210                 }
 1211                 if (fp->f_type != DTYPE_SOCKET) {
 1212                         fdrop(fp, td);
 1213                         return (EPERM);
 1214                 }
 1215                 error = nfscbd_addsock(fp);
 1216                 fdrop(fp, td);
 1217                 if (!error && nfscl_enablecallb == 0) {
 1218                         nfsv4_cbport = nfscbdarg.port;
 1219                         nfscl_enablecallb = 1;
 1220                 }
 1221         } else if (uap->flag & NFSSVC_NFSCBD) {
 1222                 if (uap->argp == NULL) 
 1223                         return (EINVAL);
 1224                 error = copyin(uap->argp, (caddr_t)&nfscbdarg2,
 1225                     sizeof(nfscbdarg2));
 1226                 if (error)
 1227                         return (error);
 1228                 error = nfscbd_nfsd(td, &nfscbdarg2);
 1229         } else {
 1230                 error = EINVAL;
 1231         }
 1232         return (error);
 1233 }
 1234 
 1235 extern int (*nfsd_call_nfscl)(struct thread *, struct nfssvc_args *);
 1236 
 1237 /*
 1238  * Called once to initialize data structures...
 1239  */
 1240 static int
 1241 nfscl_modevent(module_t mod, int type, void *data)
 1242 {
 1243         int error = 0;
 1244         static int loaded = 0;
 1245 
 1246         switch (type) {
 1247         case MOD_LOAD:
 1248                 if (loaded)
 1249                         return (0);
 1250                 newnfs_portinit();
 1251                 mtx_init(&nfs_clstate_mutex, "nfs_clstate_mutex", NULL,
 1252                     MTX_DEF);
 1253                 mtx_init(&ncl_iod_mutex, "ncl_iod_mutex", NULL, MTX_DEF);
 1254                 nfscl_init();
 1255                 NFSD_LOCK();
 1256                 nfsrvd_cbinit(0);
 1257                 NFSD_UNLOCK();
 1258                 ncl_call_invalcaches = ncl_invalcaches;
 1259                 nfsd_call_nfscl = nfssvc_nfscl;
 1260                 loaded = 1;
 1261                 break;
 1262 
 1263         case MOD_UNLOAD:
 1264                 if (nfs_numnfscbd != 0) {
 1265                         error = EBUSY;
 1266                         break;
 1267                 }
 1268 
 1269                 /*
 1270                  * XXX: Unloading of nfscl module is unsupported.
 1271                  */
 1272 #if 0
 1273                 ncl_call_invalcaches = NULL;
 1274                 nfsd_call_nfscl = NULL;
 1275                 /* and get rid of the mutexes */
 1276                 mtx_destroy(&nfs_clstate_mutex);
 1277                 mtx_destroy(&ncl_iod_mutex);
 1278                 loaded = 0;
 1279                 break;
 1280 #else
 1281                 /* FALLTHROUGH */
 1282 #endif
 1283         default:
 1284                 error = EOPNOTSUPP;
 1285                 break;
 1286         }
 1287         return error;
 1288 }
 1289 static moduledata_t nfscl_mod = {
 1290         "nfscl",
 1291         nfscl_modevent,
 1292         NULL,
 1293 };
 1294 DECLARE_MODULE(nfscl, nfscl_mod, SI_SUB_VFS, SI_ORDER_FIRST);
 1295 
 1296 /* So that loader and kldload(2) can find us, wherever we are.. */
 1297 MODULE_VERSION(nfscl, 1);
 1298 MODULE_DEPEND(nfscl, nfscommon, 1, 1, 1);
 1299 MODULE_DEPEND(nfscl, krpc, 1, 1, 1);
 1300 MODULE_DEPEND(nfscl, nfssvc, 1, 1, 1);
 1301 MODULE_DEPEND(nfscl, nfslock, 1, 1, 1);
 1302 

Cache object: 70c990eeec865d3f722199c75b23e16a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.