The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/nfsclient/nfs_socket.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989, 1991, 1993, 1995
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * Rick Macklem at The University of Guelph.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)nfs_socket.c        8.5 (Berkeley) 3/30/95
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD$");
   37 
   38 /*
   39  * Socket operations for use by nfs
   40  */
   41 
   42 #include "opt_inet6.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/systm.h>
   46 #include <sys/kernel.h>
   47 #include <sys/lock.h>
   48 #include <sys/malloc.h>
   49 #include <sys/mbuf.h>
   50 #include <sys/mount.h>
   51 #include <sys/mutex.h>
   52 #include <sys/proc.h>
   53 #include <sys/protosw.h>
   54 #include <sys/signalvar.h>
   55 #include <sys/syscallsubr.h>
   56 #include <sys/socket.h>
   57 #include <sys/socketvar.h>
   58 #include <sys/sysctl.h>
   59 #include <sys/syslog.h>
   60 #include <sys/vnode.h>
   61 
   62 #include <netinet/in.h>
   63 #include <netinet/tcp.h>
   64 
   65 #include <rpc/rpcclnt.h>
   66 
   67 #include <nfs/rpcv2.h>
   68 #include <nfs/nfsproto.h>
   69 #include <nfsclient/nfs.h>
   70 #include <nfs/xdr_subs.h>
   71 #include <nfsclient/nfsm_subs.h>
   72 #include <nfsclient/nfsmount.h>
   73 #include <nfsclient/nfsnode.h>
   74 
   75 #include <nfs4client/nfs4.h>
   76 
   77 #define TRUE    1
   78 #define FALSE   0
   79 
   80 extern u_int32_t nfs_xid;
   81 extern struct mtx nfs_xid_mtx;
   82 
   83 static int      nfs_realign_test;
   84 static int      nfs_realign_count;
   85 static int      nfs_bufpackets = 4;
   86 static int      nfs_reconnects;
   87 static int     nfs3_jukebox_delay = 10;
   88 static int     nfs_skip_wcc_data_onerr = 1;
   89 
   90 SYSCTL_DECL(_vfs_nfs);
   91 
   92 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
   93 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
   94 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
   95 SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
   96     "number of times the nfs client has had to reconnect");
   97 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
   98            "number of seconds to delay a retry after receiving EJUKEBOX");
   99 SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0, "");
  100 
  101 /*
  102  * There is a congestion window for outstanding rpcs maintained per mount
  103  * point. The cwnd size is adjusted in roughly the way that:
  104  * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
  105  * SIGCOMM '88". ACM, August 1988.
  106  * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
  107  * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
  108  * of rpcs is in progress.
  109  * (The sent count and cwnd are scaled for integer arith.)
  110  * Variants of "slow start" were tried and were found to be too much of a
  111  * performance hit (ave. rtt 3 times larger),
  112  * I suspect due to the large rtt that nfs rpcs have.
  113  */
  114 #define NFS_CWNDSCALE   256
  115 #define NFS_MAXCWND     (NFS_CWNDSCALE * 32)
  116 #define NFS_NBACKOFF    8
  117 static int nfs_backoff[NFS_NBACKOFF] = { 2, 4, 8, 16, 32, 64, 128, 256, };
  118 struct callout  nfs_callout;
  119 
  120 static int      nfs_msg(struct thread *, const char *, const char *, int);
  121 static int      nfs_realign(struct mbuf **pm, int hsiz);
  122 static int      nfs_reply(struct nfsreq *);
  123 static void     nfs_softterm(struct nfsreq *rep);
  124 static int      nfs_reconnect(struct nfsreq *rep);
  125 static void nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag);
  126 static void nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag);
  127 
  128 extern struct mtx nfs_reqq_mtx;
  129 
  130 /*
  131  * RTT estimator
  132  */
  133 
  134 static enum nfs_rto_timer_t nfs_proct[NFS_NPROCS] = {
  135         NFS_DEFAULT_TIMER,      /* NULL */
  136         NFS_GETATTR_TIMER,      /* GETATTR */
  137         NFS_DEFAULT_TIMER,      /* SETATTR */
  138         NFS_LOOKUP_TIMER,       /* LOOKUP */
  139         NFS_GETATTR_TIMER,      /* ACCESS */
  140         NFS_READ_TIMER,         /* READLINK */
  141         NFS_READ_TIMER,         /* READ */
  142         NFS_WRITE_TIMER,        /* WRITE */
  143         NFS_DEFAULT_TIMER,      /* CREATE */
  144         NFS_DEFAULT_TIMER,      /* MKDIR */
  145         NFS_DEFAULT_TIMER,      /* SYMLINK */
  146         NFS_DEFAULT_TIMER,      /* MKNOD */
  147         NFS_DEFAULT_TIMER,      /* REMOVE */
  148         NFS_DEFAULT_TIMER,      /* RMDIR */
  149         NFS_DEFAULT_TIMER,      /* RENAME */
  150         NFS_DEFAULT_TIMER,      /* LINK */
  151         NFS_READ_TIMER,         /* READDIR */
  152         NFS_READ_TIMER,         /* READDIRPLUS */
  153         NFS_DEFAULT_TIMER,      /* FSSTAT */
  154         NFS_DEFAULT_TIMER,      /* FSINFO */
  155         NFS_DEFAULT_TIMER,      /* PATHCONF */
  156         NFS_DEFAULT_TIMER,      /* COMMIT */
  157         NFS_DEFAULT_TIMER,      /* NOOP */
  158 };
  159 
  160 /*
  161  * Choose the correct RTT timer for this NFS procedure.
  162  */
  163 static inline enum nfs_rto_timer_t
  164 nfs_rto_timer(u_int32_t procnum)
  165 {
  166         return nfs_proct[procnum];
  167 }
  168 
  169 /*
  170  * Initialize the RTT estimator state for a new mount point.
  171  */
  172 static void
  173 nfs_init_rtt(struct nfsmount *nmp)
  174 {
  175         int i;
  176 
  177         for (i = 0; i < NFS_MAX_TIMER; i++)
  178                 nmp->nm_srtt[i] = NFS_INITRTT;
  179         for (i = 0; i < NFS_MAX_TIMER; i++)
  180                 nmp->nm_sdrtt[i] = 0;
  181 }
  182 
  183 /*
  184  * Update a mount point's RTT estimator state using data from the
  185  * passed-in request.
  186  * 
  187  * Use a gain of 0.125 on the mean and a gain of 0.25 on the deviation.
  188  *
  189  * NB: Since the timer resolution of NFS_HZ is so course, it can often
  190  * result in r_rtt == 0. Since r_rtt == N means that the actual RTT is
  191  * between N + dt and N + 2 - dt ticks, add 1 before calculating the
  192  * update values.
  193  */
  194 static void
  195 nfs_update_rtt(struct nfsreq *rep)
  196 {
  197         int t1 = rep->r_rtt + 1;
  198         int index = nfs_rto_timer(rep->r_procnum) - 1;
  199         int *srtt = &rep->r_nmp->nm_srtt[index];
  200         int *sdrtt = &rep->r_nmp->nm_sdrtt[index];
  201 
  202         t1 -= *srtt >> 3;
  203         *srtt += t1;
  204         if (t1 < 0)
  205                 t1 = -t1;
  206         t1 -= *sdrtt >> 2;
  207         *sdrtt += t1;
  208 }
  209 
  210 /*
  211  * Estimate RTO for an NFS RPC sent via an unreliable datagram.
  212  *
  213  * Use the mean and mean deviation of RTT for the appropriate type
  214  * of RPC for the frequent RPCs and a default for the others.
  215  * The justification for doing "other" this way is that these RPCs
  216  * happen so infrequently that timer est. would probably be stale.
  217  * Also, since many of these RPCs are non-idempotent, a conservative
  218  * timeout is desired.
  219  *
  220  * getattr, lookup - A+2D
  221  * read, write     - A+4D
  222  * other           - nm_timeo
  223  */
  224 static int
  225 nfs_estimate_rto(struct nfsmount *nmp, u_int32_t procnum)
  226 {
  227         enum nfs_rto_timer_t timer = nfs_rto_timer(procnum);
  228         int index = timer - 1;
  229         int rto;
  230 
  231         switch (timer) {
  232         case NFS_GETATTR_TIMER:
  233         case NFS_LOOKUP_TIMER:
  234                 rto = ((nmp->nm_srtt[index] + 3) >> 2) +
  235                                 ((nmp->nm_sdrtt[index] + 1) >> 1);
  236                 break;
  237         case NFS_READ_TIMER:
  238         case NFS_WRITE_TIMER:
  239                 rto = ((nmp->nm_srtt[index] + 7) >> 3) +
  240                                 (nmp->nm_sdrtt[index] + 1);
  241                 break;
  242         default:
  243                 rto = nmp->nm_timeo;
  244                 return (rto);
  245         }
  246 
  247         if (rto < NFS_MINRTO)
  248                 rto = NFS_MINRTO;
  249         else if (rto > NFS_MAXRTO)
  250                 rto = NFS_MAXRTO;
  251 
  252         return (rto);
  253 }
  254 
  255 
  256 /*
  257  * Initialize sockets and congestion for a new NFS connection.
  258  * We do not free the sockaddr if error.
  259  */
  260 int
  261 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
  262 {
  263         struct socket *so;
  264         int error, rcvreserve, sndreserve;
  265         int pktscale;
  266         struct sockaddr *saddr;
  267         struct thread *td = &thread0; /* only used for socreate and sobind */
  268 
  269         if (nmp->nm_sotype == SOCK_STREAM) {
  270                 mtx_lock(&nmp->nm_mtx);
  271                 nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
  272                 nmp->nm_nfstcpstate.rpcresid = 0;
  273                 mtx_unlock(&nmp->nm_mtx);
  274         }       
  275         nmp->nm_so = NULL;
  276         saddr = nmp->nm_nam;
  277         error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
  278                 nmp->nm_soproto, nmp->nm_mountp->mnt_cred, td);
  279         if (error)
  280                 goto bad;
  281         so = nmp->nm_so;
  282         nmp->nm_soflags = so->so_proto->pr_flags;
  283 
  284         /*
  285          * Some servers require that the client port be a reserved port number.
  286          */
  287         if (nmp->nm_flag & NFSMNT_RESVPORT) {
  288                 struct sockopt sopt;
  289                 int ip, ip2, len;
  290                 struct sockaddr_in6 ssin;
  291                 struct sockaddr *sa;
  292 
  293                 bzero(&sopt, sizeof sopt);
  294                 switch(saddr->sa_family) {
  295                 case AF_INET:
  296                         sopt.sopt_level = IPPROTO_IP;
  297                         sopt.sopt_name = IP_PORTRANGE;
  298                         ip = IP_PORTRANGE_LOW;
  299                         ip2 = IP_PORTRANGE_DEFAULT;
  300                         len = sizeof (struct sockaddr_in);
  301                         break;
  302 #ifdef INET6
  303                 case AF_INET6:
  304                         sopt.sopt_level = IPPROTO_IPV6;
  305                         sopt.sopt_name = IPV6_PORTRANGE;
  306                         ip = IPV6_PORTRANGE_LOW;
  307                         ip2 = IPV6_PORTRANGE_DEFAULT;
  308                         len = sizeof (struct sockaddr_in6);
  309                         break;
  310 #endif
  311                 default:
  312                         goto noresvport;
  313                 }
  314                 sa = (struct sockaddr *)&ssin;
  315                 bzero(sa, len);
  316                 sa->sa_len = len;
  317                 sa->sa_family = saddr->sa_family;
  318                 sopt.sopt_dir = SOPT_SET;
  319                 sopt.sopt_val = (void *)&ip;
  320                 sopt.sopt_valsize = sizeof(ip);
  321                 error = sosetopt(so, &sopt);
  322                 if (error)
  323                         goto bad;
  324                 error = sobind(so, sa, td);
  325                 if (error)
  326                         goto bad;
  327                 ip = ip2;
  328                 error = sosetopt(so, &sopt);
  329                 if (error)
  330                         goto bad;
  331         noresvport: ;
  332         }
  333 
  334         /*
  335          * Protocols that do not require connections may be optionally left
  336          * unconnected for servers that reply from a port other than NFS_PORT.
  337          */
  338         mtx_lock(&nmp->nm_mtx);
  339         if (nmp->nm_flag & NFSMNT_NOCONN) {
  340                 if (nmp->nm_soflags & PR_CONNREQUIRED) {
  341                         error = ENOTCONN;
  342                         mtx_unlock(&nmp->nm_mtx);
  343                         goto bad;
  344                 } else
  345                         mtx_unlock(&nmp->nm_mtx);
  346         } else {
  347                 mtx_unlock(&nmp->nm_mtx);
  348                 error = soconnect(so, nmp->nm_nam, td);
  349                 if (error)
  350                         goto bad;
  351 
  352                 /*
  353                  * Wait for the connection to complete. Cribbed from the
  354                  * connect system call but with the wait timing out so
  355                  * that interruptible mounts don't hang here for a long time.
  356                  */
  357                 SOCK_LOCK(so);
  358                 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
  359                         (void) msleep(&so->so_timeo, SOCK_MTX(so),
  360                             PSOCK, "nfscon", 2 * hz);
  361                         if ((so->so_state & SS_ISCONNECTING) &&
  362                             so->so_error == 0 && rep &&
  363                             (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0) {
  364                                 so->so_state &= ~SS_ISCONNECTING;
  365                                 SOCK_UNLOCK(so);
  366                                 goto bad;
  367                         }
  368                 }
  369                 if (so->so_error) {
  370                         error = so->so_error;
  371                         so->so_error = 0;
  372                         SOCK_UNLOCK(so);
  373                         goto bad;
  374                 }
  375                 SOCK_UNLOCK(so);
  376         }
  377         so->so_rcv.sb_timeo = 12 * hz;
  378         if (nmp->nm_sotype == SOCK_STREAM)
  379                 so->so_snd.sb_timeo = 1 * hz;   /* 1s snd timeout for NFS/TCP */
  380         else
  381                 so->so_snd.sb_timeo = 5 * hz;
  382 
  383         /*
  384          * Get buffer reservation size from sysctl, but impose reasonable
  385          * limits.
  386          */
  387         pktscale = nfs_bufpackets;
  388         if (pktscale < 2)
  389                 pktscale = 2;
  390         if (pktscale > 64)
  391                 pktscale = 64;
  392         mtx_lock(&nmp->nm_mtx);
  393         if (nmp->nm_sotype == SOCK_DGRAM) {
  394                 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
  395                 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
  396                     NFS_MAXPKTHDR) * pktscale;
  397         } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
  398                 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
  399                 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
  400                     NFS_MAXPKTHDR) * pktscale;
  401         } else {
  402                 if (nmp->nm_sotype != SOCK_STREAM)
  403                         panic("nfscon sotype");
  404                 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
  405                         struct sockopt sopt;
  406                         int val;
  407 
  408                         bzero(&sopt, sizeof sopt);
  409                         sopt.sopt_dir = SOPT_SET;
  410                         sopt.sopt_level = SOL_SOCKET;
  411                         sopt.sopt_name = SO_KEEPALIVE;
  412                         sopt.sopt_val = &val;
  413                         sopt.sopt_valsize = sizeof val;
  414                         val = 1;
  415                         mtx_unlock(&nmp->nm_mtx);
  416                         sosetopt(so, &sopt);
  417                         mtx_lock(&nmp->nm_mtx);
  418                 }
  419                 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
  420                         struct sockopt sopt;
  421                         int val;
  422 
  423                         bzero(&sopt, sizeof sopt);
  424                         sopt.sopt_dir = SOPT_SET;
  425                         sopt.sopt_level = IPPROTO_TCP;
  426                         sopt.sopt_name = TCP_NODELAY;
  427                         sopt.sopt_val = &val;
  428                         sopt.sopt_valsize = sizeof val;
  429                         val = 1;
  430                         mtx_unlock(&nmp->nm_mtx);
  431                         sosetopt(so, &sopt);
  432                         mtx_lock(&nmp->nm_mtx);
  433                 }
  434                 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
  435                     sizeof (u_int32_t)) * pktscale;
  436                 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
  437                     sizeof (u_int32_t)) * pktscale;
  438         }
  439         mtx_unlock(&nmp->nm_mtx);
  440         error = soreserve(so, sndreserve, rcvreserve);
  441         if (error)
  442                 goto bad;
  443         SOCKBUF_LOCK(&so->so_rcv);
  444         so->so_rcv.sb_flags |= SB_NOINTR;
  445         so->so_upcallarg = (caddr_t)nmp;
  446         if (so->so_type == SOCK_STREAM)
  447                 so->so_upcall = nfs_clnt_tcp_soupcall;
  448         else    
  449                 so->so_upcall = nfs_clnt_udp_soupcall;
  450         so->so_rcv.sb_flags |= SB_UPCALL;
  451         SOCKBUF_UNLOCK(&so->so_rcv);
  452         SOCKBUF_LOCK(&so->so_snd);
  453         so->so_snd.sb_flags |= SB_NOINTR;
  454         SOCKBUF_UNLOCK(&so->so_snd);
  455 
  456         mtx_lock(&nmp->nm_mtx);
  457         /* Initialize other non-zero congestion variables */
  458         nfs_init_rtt(nmp);
  459         nmp->nm_cwnd = NFS_MAXCWND / 2;     /* Initial send window */
  460         nmp->nm_sent = 0;
  461         nmp->nm_timeouts = 0;
  462         mtx_unlock(&nmp->nm_mtx);
  463         return (0);
  464 
  465 bad:
  466         nfs_disconnect(nmp);
  467         return (error);
  468 }
  469 
  470 static void
  471 nfs_wakup_reconnectors(struct nfsmount *nmp)
  472 {
  473         KASSERT(mtx_owned(&nmp->nm_mtx), ("NFS mnt lock not owned !"));
  474         if (--nmp->nm_nfstcpstate.sock_send_inprog == 0 &&
  475             (nmp->nm_nfstcpstate.flags & NFS_TCP_WAIT_WRITE_DRAIN)) {
  476                 nmp->nm_nfstcpstate.flags &= ~NFS_TCP_WAIT_WRITE_DRAIN;
  477                 wakeup((caddr_t)&nmp->nm_nfstcpstate.sock_send_inprog);
  478         }
  479 }
  480 
  481 /*
  482  * Reconnect routine:
  483  * Called when a connection is broken on a reliable protocol.
  484  * - clean up the old socket
  485  * - nfs_connect() again
  486  * - set R_MUSTRESEND for all outstanding requests on mount point
  487  * If this fails the mount point is DEAD!
  488  * nb: Must be called with the nfs_sndlock() set on the mount point.
  489  */
  490 static int
  491 nfs_reconnect(struct nfsreq *rep)
  492 {
  493         struct nfsreq *rp;
  494         struct nfsmount *nmp = rep->r_nmp;
  495         int error;
  496         int slpflag = 0;
  497 
  498         KASSERT(mtx_owned(&nmp->nm_mtx), ("NFS mnt lock not owned !"));
  499         if (nmp->nm_flag & NFSMNT_INT)
  500                 slpflag = PCATCH;
  501         /*
  502          * Wait for any pending writes to this socket to drain (or timeout).
  503          */
  504         while (nmp->nm_nfstcpstate.sock_send_inprog > 0) {
  505                 nmp->nm_nfstcpstate.flags |= NFS_TCP_WAIT_WRITE_DRAIN;
  506                 error = msleep((caddr_t)&nmp->nm_nfstcpstate.sock_send_inprog,
  507                                &nmp->nm_mtx, slpflag | (PZERO - 1), "nfscon", 0);               
  508         }
  509         /*
  510          * Grab the nfs_connect_lock to serialize connects. 
  511          * After grabbing the nfs_connect_lock, check if a reconnect is necessary or
  512          * if someone else beat us to the connect !
  513          */
  514         error = nfs_connect_lock(rep);
  515         if (error)
  516                 goto unlock_exit;
  517         if ((nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) == 0)
  518                 goto unlock_exit;
  519         else
  520                 mtx_unlock(&nmp->nm_mtx);
  521 
  522         nfs_reconnects++;
  523         nfs_disconnect(nmp);
  524         while ((error = nfs_connect(nmp, rep)) != 0) {
  525                 if (error == ERESTART)
  526                         error = EINTR;
  527                 if (error == EIO || error == EINTR) {
  528                         mtx_lock(&nmp->nm_mtx);
  529                         goto unlock_exit;
  530                 }
  531                 (void) tsleep(&lbolt, PSOCK, "nfscon", 0);
  532         }
  533 
  534         /*
  535          * Clear the FORCE_RECONNECT flag only after the connect 
  536          * succeeds. To prevent races between multiple processes 
  537          * waiting on the mountpoint where the connection is being
  538          * torn down. The first one to acquire the sndlock will 
  539          * retry the connection. The others block on the sndlock
  540          * until the connection is established successfully, and 
  541          * then re-transmit the request.
  542          */
  543         mtx_lock(&nmp->nm_mtx);
  544         nmp->nm_nfstcpstate.flags &= ~NFS_TCP_FORCE_RECONNECT;
  545         nmp->nm_nfstcpstate.rpcresid = 0;
  546         mtx_unlock(&nmp->nm_mtx);       
  547 
  548         /*
  549          * Loop through outstanding request list and fix up all requests
  550          * on old socket.
  551          */
  552         mtx_lock(&nfs_reqq_mtx);
  553         TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
  554                 if (rp->r_nmp == nmp) {
  555                         mtx_lock(&rp->r_mtx);                   
  556                         rp->r_flags |= R_MUSTRESEND;
  557                         mtx_unlock(&rp->r_mtx);
  558                 }
  559         }
  560         mtx_unlock(&nfs_reqq_mtx);
  561         mtx_lock(&nmp->nm_mtx);
  562 unlock_exit:
  563         nfs_connect_unlock(rep);
  564         mtx_unlock(&nmp->nm_mtx);               
  565         return (error);
  566 }
  567 
  568 /*
  569  * NFS disconnect. Clean up and unlink.
  570  */
  571 void
  572 nfs_disconnect(struct nfsmount *nmp)
  573 {
  574         struct socket *so;
  575 
  576         mtx_lock(&nmp->nm_mtx);
  577         if (nmp->nm_so) {
  578                 so = nmp->nm_so;
  579                 nmp->nm_so = NULL;
  580                 mtx_unlock(&nmp->nm_mtx);
  581                 SOCKBUF_LOCK(&so->so_rcv);
  582                 so->so_upcallarg = NULL;
  583                 so->so_upcall = NULL;
  584                 so->so_rcv.sb_flags &= ~SB_UPCALL;
  585                 SOCKBUF_UNLOCK(&so->so_rcv);
  586                 soshutdown(so, SHUT_WR);
  587                 soclose(so);
  588         } else
  589                 mtx_unlock(&nmp->nm_mtx);
  590 }
  591 
  592 void
  593 nfs_safedisconnect(struct nfsmount *nmp)
  594 {
  595         struct nfsreq dummyreq;
  596 
  597         bzero(&dummyreq, sizeof(dummyreq));
  598         dummyreq.r_nmp = nmp;
  599         nfs_disconnect(nmp);
  600 }
  601 
  602 /*
  603  * This is the nfs send routine. For connection based socket types, it
  604  * must be called with an nfs_sndlock() on the socket.
  605  * - return EINTR if the RPC is terminated, 0 otherwise
  606  * - set R_MUSTRESEND if the send fails for any reason
  607  * - do any cleanup required by recoverable socket errors (?)
  608  */
  609 int
  610 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
  611     struct nfsreq *rep)
  612 {
  613         struct sockaddr *sendnam;
  614         int error, error2, soflags, flags;
  615 
  616         KASSERT(rep, ("nfs_send: called with rep == NULL"));
  617 
  618         error = nfs_sigintr(rep->r_nmp, rep, rep->r_td);
  619         if (error) {
  620                 m_freem(top);
  621                 return (error);
  622         }
  623         mtx_lock(&rep->r_nmp->nm_mtx);
  624         mtx_lock(&rep->r_mtx);
  625         if ((so = rep->r_nmp->nm_so) == NULL) {
  626                 rep->r_flags |= R_MUSTRESEND;
  627                 mtx_unlock(&rep->r_mtx);
  628                 mtx_unlock(&rep->r_nmp->nm_mtx);
  629                 m_freem(top);
  630                 return (EPIPE);
  631         }
  632         rep->r_flags &= ~R_MUSTRESEND;
  633         soflags = rep->r_nmp->nm_soflags;
  634         mtx_unlock(&rep->r_mtx);
  635         mtx_unlock(&rep->r_nmp->nm_mtx);
  636 
  637         if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
  638                 sendnam = NULL;
  639         else
  640                 sendnam = nam;
  641         if (so->so_type == SOCK_SEQPACKET)
  642                 flags = MSG_EOR;
  643         else
  644                 flags = 0;
  645 
  646         error = sosend(so, sendnam, 0, top, 0, flags, curthread /*XXX*/);
  647         if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
  648                 error = 0;
  649                 mtx_lock(&rep->r_mtx);
  650                 rep->r_flags |= R_MUSTRESEND;
  651                 mtx_unlock(&rep->r_mtx);
  652         }
  653 
  654         if (error) {
  655                 /*
  656                  * Don't report EPIPE errors on nfs sockets.
  657                  * These can be due to idle tcp mounts which will be closed by
  658                  * netapp, solaris, etc. if left idle too long.
  659                  */
  660                 if (error != EPIPE) {
  661                         log(LOG_INFO, "nfs send error %d for server %s\n",
  662                             error,
  663                             rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
  664                 }
  665                 /*
  666                  * Deal with errors for the client side.
  667                  */
  668                 error2 = NFS_SIGREP(rep);
  669                 if (error2)
  670                         error = error2;
  671                 else {
  672                         mtx_lock(&rep->r_mtx);
  673                         rep->r_flags |= R_MUSTRESEND;
  674                         mtx_unlock(&rep->r_mtx);
  675                 }
  676 
  677                 /*
  678                  * Handle any recoverable (soft) socket errors here. (?)
  679                  * Make EWOULDBLOCK a recoverable error, we'll rexmit from nfs_timer().
  680                  */
  681                 if (error != EINTR && error != ERESTART && error != EIO && error != EPIPE)
  682                         error = 0;
  683         }
  684         return (error);
  685 }
  686 
  687 int
  688 nfs_reply(struct nfsreq *rep)
  689 {
  690         register struct socket *so;
  691         register struct mbuf *m;
  692         int error = 0, sotype, slpflag;
  693         struct nfsmount *nmp = rep->r_nmp;
  694         
  695         sotype = nmp->nm_sotype;
  696         /*
  697          * For reliable protocols, lock against other senders/receivers
  698          * in case a reconnect is necessary.
  699          */
  700         if (sotype != SOCK_DGRAM) {
  701 tryagain:
  702                 mtx_lock(&nmp->nm_mtx);
  703                 mtx_lock(&rep->r_mtx);
  704                 if (rep->r_mrep) {
  705                         mtx_unlock(&rep->r_mtx);
  706                         mtx_unlock(&nmp->nm_mtx);
  707                         return (0);
  708                 }
  709                 if (rep->r_flags & R_SOFTTERM) {
  710                         mtx_unlock(&rep->r_mtx);
  711                         mtx_unlock(&nmp->nm_mtx);
  712                         return (EINTR);
  713                 }
  714                 so = nmp->nm_so;
  715                 if (!so || 
  716                     (nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT)) {
  717                         mtx_unlock(&rep->r_mtx);
  718                         nmp->nm_nfstcpstate.flags |= NFS_TCP_FORCE_RECONNECT;
  719                         error = nfs_reconnect(rep);
  720                         if (error)
  721                                 return (error);
  722                         goto tryagain;
  723                 }
  724                 while (rep->r_flags & R_MUSTRESEND) {
  725                         mtx_unlock(&rep->r_mtx);
  726                         nmp->nm_nfstcpstate.sock_send_inprog++;
  727                         mtx_unlock(&nmp->nm_mtx);
  728                         m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT);
  729                         nfsstats.rpcretries++;
  730                         error = nfs_send(so, nmp->nm_nam, m, rep);
  731                         if (error) {
  732                                 mtx_lock(&nmp->nm_mtx);
  733                                 nfs_wakup_reconnectors(nmp);
  734                                 if (!(error == EINTR || error == ERESTART)) {
  735                                         nmp->nm_nfstcpstate.flags |= NFS_TCP_FORCE_RECONNECT;
  736                                         error = nfs_reconnect(rep);
  737                                 } else
  738                                         mtx_unlock(&nmp->nm_mtx);
  739                                 if (error)
  740                                         return (error);
  741                                 goto tryagain;
  742                         } else {
  743                                 mtx_lock(&nmp->nm_mtx);
  744                                 nfs_wakup_reconnectors(nmp);
  745                                 mtx_lock(&rep->r_mtx);
  746                         }
  747                 }
  748                 mtx_unlock(&rep->r_mtx);
  749                 mtx_unlock(&nmp->nm_mtx);
  750         }
  751         slpflag = 0;
  752         mtx_lock(&nmp->nm_mtx);
  753         if (nmp->nm_flag & NFSMNT_INT)
  754                 slpflag = PCATCH;
  755         mtx_unlock(&nmp->nm_mtx);
  756         mtx_lock(&rep->r_mtx);
  757         while ((rep->r_mrep == NULL) && (error == 0) && 
  758                ((rep->r_flags & R_SOFTTERM) == 0) &&
  759                ((sotype == SOCK_DGRAM) || ((rep->r_flags & R_MUSTRESEND) == 0)))
  760                 error = msleep((caddr_t)rep, &rep->r_mtx, 
  761                                slpflag | (PZERO - 1), "nfsreq", 0);
  762         if (error == EINTR || error == ERESTART) {
  763                 /* NFS operations aren't restartable. Map ERESTART to EINTR */
  764                 mtx_unlock(&rep->r_mtx);
  765                 return (EINTR);
  766         }
  767         if (rep->r_flags & R_SOFTTERM) {
  768                 /* Request was terminated because we exceeded the retries (soft mount) */
  769                 mtx_unlock(&rep->r_mtx);
  770                 return (ETIMEDOUT);
  771         }
  772         mtx_unlock(&rep->r_mtx);
  773         if (sotype == SOCK_STREAM) {
  774                 mtx_lock(&nmp->nm_mtx);
  775                 mtx_lock(&rep->r_mtx);
  776                 if (((nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) || 
  777                      (rep->r_flags & R_MUSTRESEND))) {
  778                         mtx_unlock(&rep->r_mtx);
  779                         mtx_unlock(&nmp->nm_mtx);       
  780                         goto tryagain;
  781                 } else {
  782                         mtx_unlock(&rep->r_mtx);
  783                         mtx_unlock(&nmp->nm_mtx);       
  784                 }
  785         }
  786         return (error);
  787 }
  788 
  789 /*
  790  * XXX TO DO
  791  * Make nfs_realign() non-blocking. Also make nfsm_dissect() nonblocking.
  792  */
  793 static void
  794 nfs_clnt_match_xid(struct socket *so, 
  795                    struct nfsmount *nmp, 
  796                    struct mbuf *mrep)
  797 {
  798         struct mbuf *md;
  799         caddr_t dpos;
  800         u_int32_t rxid, *tl;
  801         struct nfsreq *rep;
  802         int error;
  803         
  804         /*
  805          * Search for any mbufs that are not a multiple of 4 bytes long
  806          * or with m_data not longword aligned.
  807          * These could cause pointer alignment problems, so copy them to
  808          * well aligned mbufs.
  809          */
  810         if (nfs_realign(&mrep, 5 * NFSX_UNSIGNED) == ENOMEM) {
  811                 m_freem(mrep);
  812                 nfsstats.rpcinvalid++;
  813                 return;
  814         }
  815         
  816         /*
  817          * Get the xid and check that it is an rpc reply
  818          */
  819         md = mrep;
  820         dpos = mtod(md, caddr_t);
  821         tl = nfsm_dissect_nonblock(u_int32_t *, 2*NFSX_UNSIGNED);
  822         rxid = *tl++;
  823         if (*tl != rpc_reply) {
  824                 m_freem(mrep);
  825 nfsmout:
  826                 nfsstats.rpcinvalid++;
  827                 return;
  828         }
  829 
  830         mtx_lock(&nfs_reqq_mtx);
  831         /*
  832          * Loop through the request list to match up the reply
  833          * Iff no match, just drop the datagram
  834          */
  835         TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
  836                 mtx_lock(&nmp->nm_mtx);
  837                 mtx_lock(&rep->r_mtx);
  838                 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
  839                         /* Found it.. */
  840                         rep->r_mrep = mrep;
  841                         rep->r_md = md;
  842                         rep->r_dpos = dpos;
  843                         /*
  844                          * Update congestion window.
  845                          * Do the additive increase of
  846                          * one rpc/rtt.
  847                          */
  848                         if (nmp->nm_cwnd <= nmp->nm_sent) {
  849                                 nmp->nm_cwnd +=
  850                                         (NFS_CWNDSCALE * NFS_CWNDSCALE +
  851                                          (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
  852                                 if (nmp->nm_cwnd > NFS_MAXCWND)
  853                                         nmp->nm_cwnd = NFS_MAXCWND;
  854                         }       
  855                         if (rep->r_flags & R_SENT) {
  856                                 rep->r_flags &= ~R_SENT;
  857                                 nmp->nm_sent -= NFS_CWNDSCALE;
  858                         }
  859                         if (rep->r_flags & R_TIMING)
  860                                 nfs_update_rtt(rep);
  861                         nmp->nm_timeouts = 0;
  862                         wakeup((caddr_t)rep);
  863                         mtx_unlock(&rep->r_mtx);
  864                         mtx_unlock(&nmp->nm_mtx);
  865                         break;
  866                 }
  867                 mtx_unlock(&rep->r_mtx);
  868                 mtx_unlock(&nmp->nm_mtx);
  869         }
  870         /*
  871          * If not matched to a request, drop it.
  872          * If it's mine, wake up requestor.
  873          */
  874         if (rep == 0) {
  875                 nfsstats.rpcunexpected++;
  876                 m_freem(mrep);
  877         }
  878         mtx_unlock(&nfs_reqq_mtx);
  879 }
  880 
  881 static void
  882 nfs_mark_for_reconnect(struct nfsmount *nmp)
  883 {
  884         struct nfsreq *rp;
  885 
  886         mtx_lock(&nmp->nm_mtx);
  887         nmp->nm_nfstcpstate.flags |= NFS_TCP_FORCE_RECONNECT;
  888         mtx_unlock(&nmp->nm_mtx);
  889         /* 
  890          * Wakeup all processes that are waiting for replies 
  891          * on this mount point. One of them does the reconnect.
  892          */
  893         mtx_lock(&nfs_reqq_mtx);
  894         TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
  895                 if (rp->r_nmp == nmp) {
  896                         mtx_lock(&rp->r_mtx);
  897                         rp->r_flags |= R_MUSTRESEND;
  898                         wakeup((caddr_t)rp);
  899                         mtx_unlock(&rp->r_mtx);
  900                 }
  901         }
  902         mtx_unlock(&nfs_reqq_mtx);
  903 }
  904 
  905 static int
  906 nfstcp_readable(struct socket *so, int bytes)
  907 {
  908         int retval;
  909         
  910         SOCKBUF_LOCK(&so->so_rcv);
  911         retval = (so->so_rcv.sb_cc >= (bytes) ||
  912                   (so->so_rcv.sb_state & SBS_CANTRCVMORE) ||
  913                   so->so_error);
  914         SOCKBUF_UNLOCK(&so->so_rcv);
  915         return (retval);
  916 }
  917 
  918 #define nfstcp_marker_readable(so)      nfstcp_readable(so, sizeof(u_int32_t))
  919 
  920 static int
  921 nfs_copy_len(struct mbuf *mp, char *buf, int len)
  922 {
  923         while (len > 0 && mp != NULL) {
  924                 int copylen = min(len, mp->m_len);
  925                 
  926                 bcopy(mp->m_data, buf, copylen);
  927                 buf += copylen;
  928                 len -= copylen;
  929                 mp = mp->m_next;
  930         }
  931         return (len);
  932 }
  933 
  934 static void
  935 nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag)
  936 {
  937         struct nfsmount *nmp = (struct nfsmount *)arg;
  938         struct mbuf *mp = NULL;
  939         struct uio auio;
  940         int error;
  941         u_int32_t len;
  942         int rcvflg;
  943 
  944         /*
  945          * Don't pick any more data from the socket if we've marked the 
  946          * mountpoint for reconnect.
  947          */
  948         mtx_lock(&nmp->nm_mtx);
  949         if (nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) {
  950                 mtx_unlock(&nmp->nm_mtx);               
  951                 return;
  952         } else                  
  953                 mtx_unlock(&nmp->nm_mtx);
  954         auio.uio_td = curthread;
  955         auio.uio_segflg = UIO_SYSSPACE;
  956         auio.uio_rw = UIO_READ;
  957         for ( ; ; ) {
  958                 mtx_lock(&nmp->nm_mtx);
  959                 if (nmp->nm_nfstcpstate.flags & NFS_TCP_EXPECT_RPCMARKER) {
  960                         int resid;
  961 
  962                         mtx_unlock(&nmp->nm_mtx);
  963                         if (!nfstcp_marker_readable(so)) {
  964                                 /* Marker is not readable */
  965                                 return;
  966                         }
  967                         auio.uio_resid = sizeof(u_int32_t);
  968                         auio.uio_iov = NULL;
  969                         auio.uio_iovcnt = 0;
  970                         mp = NULL;
  971                         rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
  972                         error =  soreceive(so, (struct sockaddr **)0, &auio,
  973                             &mp, (struct mbuf **)0, &rcvflg);
  974                         /*
  975                          * We've already tested that the socket is readable. 2 cases 
  976                          * here, we either read 0 bytes (client closed connection), 
  977                          * or got some other error. In both cases, we tear down the 
  978                          * connection.
  979                          */
  980                         if (error || auio.uio_resid > 0) {
  981                                 if (error && error != ECONNRESET) {
  982                                         log(LOG_ERR, 
  983                                             "nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
  984                                             error);
  985                                 }
  986                                 goto mark_reconnect;
  987                         }
  988                         if (mp == NULL)
  989                                 panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
  990                         /*
  991                          * Sigh. We can't do the obvious thing here (which would
  992                          * be to have soreceive copy the length from mbufs for us).
  993                          * Calling uiomove() from the context of a socket callback
  994                          * (even for kernel-kernel copies) leads to LORs (since
  995                          * we hold network locks at this point).
  996                          */
  997                         if ((resid = nfs_copy_len(mp, (char *)&len, 
  998                                                   sizeof(u_int32_t)))) {
  999                                 log(LOG_ERR, "%s (%d) from nfs server %s\n",
 1000                                     "Bad RPC HDR length",
 1001                                     (int)(sizeof(u_int32_t) - resid),
 1002                                     nmp->nm_mountp->mnt_stat.f_mntfromname);
 1003                                 goto mark_reconnect;
 1004                         }                               
 1005                         len = ntohl(len) & ~0x80000000;
 1006                         m_freem(mp);
 1007                         /*
 1008                          * This is SERIOUS! We are out of sync with the sender
 1009                          * and forcing a disconnect/reconnect is all I can do.
 1010                          */
 1011                         if (len > NFS_MAXPACKET || len == 0) {
 1012                                 log(LOG_ERR, "%s (%d) from nfs server %s\n",
 1013                                     "impossible packet length",
 1014                                     len,
 1015                                     nmp->nm_mountp->mnt_stat.f_mntfromname);
 1016                                 goto mark_reconnect;
 1017                         }
 1018                         mtx_lock(&nmp->nm_mtx);
 1019                         nmp->nm_nfstcpstate.rpcresid = len;
 1020                         nmp->nm_nfstcpstate.flags &= ~(NFS_TCP_EXPECT_RPCMARKER);
 1021                         mtx_unlock(&nmp->nm_mtx);
 1022                 } else
 1023                         mtx_unlock(&nmp->nm_mtx);
 1024 
 1025                 /* 
 1026                  * Processed RPC marker or no RPC marker to process. 
 1027                  * Pull in and process data.
 1028                  */
 1029                 mtx_lock(&nmp->nm_mtx);
 1030                 if (nmp->nm_nfstcpstate.rpcresid > 0) {
 1031                         mtx_unlock(&nmp->nm_mtx);
 1032                         if (!nfstcp_readable(so, nmp->nm_nfstcpstate.rpcresid)) {
 1033                                 /* All data not readable */
 1034                                 return;
 1035                         }
 1036                         auio.uio_resid = nmp->nm_nfstcpstate.rpcresid;
 1037                         auio.uio_iov = NULL;
 1038                         auio.uio_iovcnt = 0;
 1039                         mp = NULL;
 1040                         rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
 1041                         error =  soreceive(so, (struct sockaddr **)0, &auio,
 1042                             &mp, (struct mbuf **)0, &rcvflg);
 1043                         if (error || auio.uio_resid > 0) {
 1044                                 if (error && error != ECONNRESET) {
 1045                                         log(LOG_ERR, 
 1046                                             "nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
 1047                                             error);
 1048                                 }
 1049                                 goto mark_reconnect;                            
 1050                         }
 1051                         if (mp == NULL)
 1052                                 panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
 1053                         mtx_lock(&nmp->nm_mtx);
 1054                         nmp->nm_nfstcpstate.rpcresid = 0;
 1055                         nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
 1056                         mtx_unlock(&nmp->nm_mtx);
 1057                         /* We got the entire RPC reply. Match XIDs and wake up requestor */
 1058                         nfs_clnt_match_xid(so, nmp, mp);
 1059                 } else
 1060                         mtx_unlock(&nmp->nm_mtx);
 1061         }
 1062 
 1063 mark_reconnect:
 1064         nfs_mark_for_reconnect(nmp);
 1065 }
 1066 
 1067 static void
 1068 nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag)
 1069 {
 1070         struct nfsmount *nmp = (struct nfsmount *)arg;
 1071         struct uio auio;
 1072         struct mbuf *mp = NULL;
 1073         struct mbuf *control = NULL;
 1074         int error, rcvflag;
 1075 
 1076         auio.uio_resid = 1000000;
 1077         auio.uio_td = curthread;
 1078         rcvflag = MSG_DONTWAIT;
 1079         auio.uio_resid = 1000000000;
 1080         do {
 1081                 mp = control = NULL;
 1082                 error = soreceive(so, NULL, &auio, &mp, &control, &rcvflag);
 1083                 if (control)
 1084                         m_freem(control);
 1085                 if (mp)
 1086                         nfs_clnt_match_xid(so, nmp, mp);
 1087         } while (mp && !error);
 1088 }
 1089 
 1090 /*
 1091  * nfs_request - goes something like this
 1092  *      - fill in request struct
 1093  *      - links it into list
 1094  *      - calls nfs_send() for first transmit
 1095  *      - calls nfs_receive() to get reply
 1096  *      - break down rpc header and return with nfs reply pointed to
 1097  *        by mrep or error
 1098  * nb: always frees up mreq mbuf list
 1099  */
 1100 int
 1101 nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
 1102     struct thread *td, struct ucred *cred, struct mbuf **mrp,
 1103     struct mbuf **mdp, caddr_t *dposp)
 1104 {
 1105         struct mbuf *mrep, *m2;
 1106         struct nfsreq *rep;
 1107         u_int32_t *tl;
 1108         int i;
 1109         struct nfsmount *nmp;
 1110         struct mbuf *m, *md, *mheadend;
 1111         time_t waituntil;
 1112         caddr_t dpos;
 1113         int error = 0, mrest_len, auth_len, auth_type;
 1114         struct timeval now;
 1115         u_int32_t *xidp;
 1116 
 1117         /* Reject requests while attempting a forced unmount. */
 1118         if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
 1119                 m_freem(mrest);
 1120                 return (ESTALE);
 1121         }
 1122         nmp = VFSTONFS(vp->v_mount);
 1123         if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
 1124                 return nfs4_request(vp, mrest, procnum, td, cred, mrp, mdp, dposp);
 1125         MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
 1126         bzero(rep, sizeof(struct nfsreq));
 1127         rep->r_nmp = nmp;
 1128         rep->r_vp = vp;
 1129         rep->r_td = td;
 1130         rep->r_procnum = procnum;
 1131         mtx_init(&rep->r_mtx, "NFSrep lock", NULL, MTX_DEF);
 1132 
 1133         getmicrouptime(&now);
 1134         rep->r_lastmsg = now.tv_sec -
 1135             ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
 1136         mrest_len = m_length(mrest, NULL);
 1137 
 1138         /*
 1139          * Get the RPC header with authorization.
 1140          */
 1141         auth_type = RPCAUTH_UNIX;
 1142         if (cred->cr_ngroups < 1)
 1143                 panic("nfsreq nogrps");
 1144         auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
 1145                 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
 1146                 5 * NFSX_UNSIGNED;
 1147         m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len,
 1148              mrest, mrest_len, &mheadend, &xidp);
 1149 
 1150         /*
 1151          * For stream protocols, insert a Sun RPC Record Mark.
 1152          */
 1153         if (nmp->nm_sotype == SOCK_STREAM) {
 1154                 M_PREPEND(m, NFSX_UNSIGNED, M_TRYWAIT);
 1155                 *mtod(m, u_int32_t *) = htonl(0x80000000 |
 1156                          (m->m_pkthdr.len - NFSX_UNSIGNED));
 1157         }
 1158         rep->r_mreq = m;
 1159         rep->r_xid = *xidp;
 1160 tryagain:
 1161         if (nmp->nm_flag & NFSMNT_SOFT)
 1162                 rep->r_retry = nmp->nm_retry;
 1163         else
 1164                 rep->r_retry = NFS_MAXREXMIT + 1;       /* past clip limit */
 1165         rep->r_rtt = rep->r_rexmit = 0;
 1166         if (nfs_rto_timer(procnum) != NFS_DEFAULT_TIMER)
 1167                 rep->r_flags = R_TIMING;
 1168         else
 1169                 rep->r_flags = 0;
 1170         rep->r_mrep = NULL;
 1171 
 1172         /*
 1173          * Do the client side RPC.
 1174          */
 1175         nfsstats.rpcrequests++;
 1176         /*
 1177          * Chain request into list of outstanding requests. Be sure
 1178          * to put it LAST so timer finds oldest requests first.
 1179          */
 1180         mtx_lock(&nfs_reqq_mtx);
 1181         if (TAILQ_EMPTY(&nfs_reqq))
 1182                 callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
 1183         TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain);
 1184         mtx_unlock(&nfs_reqq_mtx);
 1185 
 1186         /*
 1187          * If backing off another request or avoiding congestion, don't
 1188          * send this one now but let timer do it. If not timing a request,
 1189          * do it now.
 1190          */
 1191         mtx_lock(&nmp->nm_mtx);
 1192         if (nmp->nm_so && 
 1193             (((nmp->nm_sotype == SOCK_STREAM) && !(nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT)) || 
 1194              (nmp->nm_flag & NFSMNT_DUMBTIMR) || nmp->nm_sent < nmp->nm_cwnd)) {
 1195                 if (nmp->nm_sotype == SOCK_STREAM)
 1196                         nmp->nm_nfstcpstate.sock_send_inprog++;
 1197                 mtx_unlock(&nmp->nm_mtx);
 1198                 m2 = m_copym(m, 0, M_COPYALL, M_TRYWAIT);
 1199                 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
 1200                 mtx_lock(&nmp->nm_mtx);
 1201                 mtx_lock(&rep->r_mtx);
 1202                 /* 
 1203                  * nfs_timer() could've re-transmitted the request if we ended up
 1204                  * blocking on nfs_send() too long, so check for R_SENT here.
 1205                  */
 1206                 if (!error && (rep->r_flags & (R_SENT | R_MUSTRESEND)) == 0) {
 1207                         nmp->nm_sent += NFS_CWNDSCALE;
 1208                         rep->r_flags |= R_SENT;
 1209                 }
 1210                 mtx_unlock(&rep->r_mtx);
 1211                 if (nmp->nm_sotype == SOCK_STREAM)
 1212                         nfs_wakup_reconnectors(rep->r_nmp);
 1213                 mtx_unlock(&nmp->nm_mtx);
 1214         } else {
 1215                 mtx_unlock(&nmp->nm_mtx);
 1216                 rep->r_rtt = -1;
 1217         }
 1218 
 1219         /*
 1220          * Wait for the reply from our send or the timer's.
 1221          */
 1222         if (!error || error == EPIPE)
 1223                 error = nfs_reply(rep);
 1224 
 1225         /*
 1226          * nfs_timer() may be in the process of re-transmitting this request.
 1227          * nfs_timer() drops the nfs_reqq_mtx before the pru_send() (to avoid LORs).
 1228          * Wait till nfs_timer() completes the re-transmission. When the reply 
 1229          * comes back, it will be discarded (since the req struct for it no longer 
 1230          * exists).
 1231          */
 1232 wait_for_pinned_req:
 1233         mtx_lock(&rep->r_mtx);
 1234         while (rep->r_flags & R_PIN_REQ) {
 1235                 msleep((caddr_t)&rep->r_flags, &rep->r_mtx, 
 1236                        (PZERO - 1), "nfsrxmt", 0);
 1237         }
 1238         mtx_unlock(&rep->r_mtx);
 1239 
 1240         mtx_lock(&nfs_reqq_mtx);
 1241         /* Have to check for R_PIN_REQ after grabbing wlock again */
 1242         mtx_lock(&rep->r_mtx);
 1243         if (rep->r_flags & R_PIN_REQ) {
 1244                 mtx_unlock(&rep->r_mtx);
 1245                 mtx_unlock(&nfs_reqq_mtx);
 1246                 goto wait_for_pinned_req;
 1247         } else
 1248                 mtx_unlock(&rep->r_mtx);
 1249         /* RPC done (timer not active, request not pinned), unlink the request */
 1250         TAILQ_REMOVE(&nfs_reqq, rep, r_chain);
 1251         if (TAILQ_EMPTY(&nfs_reqq))
 1252                 callout_stop(&nfs_callout);
 1253         mtx_unlock(&nfs_reqq_mtx);
 1254 
 1255         /*
 1256          * Decrement the outstanding request count.
 1257          */
 1258         mtx_lock(&rep->r_mtx);
 1259         if (rep->r_flags & R_SENT) {
 1260                 rep->r_flags &= ~R_SENT;        /* paranoia */
 1261                 mtx_unlock(&rep->r_mtx);
 1262                 mtx_lock(&nmp->nm_mtx);
 1263                 nmp->nm_sent -= NFS_CWNDSCALE;
 1264                 mtx_unlock(&nmp->nm_mtx);
 1265         } else
 1266                 mtx_unlock(&rep->r_mtx);
 1267 
 1268         /*
 1269          * If there was a successful reply and a tprintf msg.
 1270          * tprintf a response.
 1271          */
 1272         if (!error) {
 1273                 nfs_up(rep, nmp, rep->r_td, "is alive again", NFSSTA_TIMEO);
 1274         }
 1275         mrep = rep->r_mrep;
 1276         md = rep->r_md;
 1277         dpos = rep->r_dpos;
 1278         if (error) {
 1279                 /*
 1280                  * If we got interrupted by a signal in nfs_reply(), there's
 1281                  * a very small window where the reply could've come in before
 1282                  * this process got scheduled in. To handle that case, we need 
 1283                  * to free the reply if it was delivered.
 1284                  */
 1285                 if (rep->r_mrep != NULL)
 1286                         m_freem(rep->r_mrep);
 1287                 m_freem(rep->r_mreq);
 1288                 mtx_destroy(&rep->r_mtx);
 1289                 free((caddr_t)rep, M_NFSREQ);
 1290                 return (error);
 1291         }
 1292 
 1293         if (rep->r_mrep == NULL)
 1294                 panic("nfs_request: rep->r_mrep shouldn't be NULL if no error\n");
 1295 
 1296         /*
 1297          * break down the rpc header and check if ok
 1298          */
 1299         tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 1300         if (*tl++ == rpc_msgdenied) {
 1301                 if (*tl == rpc_mismatch)
 1302                         error = EOPNOTSUPP;
 1303                 else
 1304                         error = EACCES;
 1305                 m_freem(mrep);
 1306                 m_freem(rep->r_mreq);
 1307                 mtx_destroy(&rep->r_mtx);
 1308                 free((caddr_t)rep, M_NFSREQ);
 1309                 return (error);
 1310         }
 1311 
 1312         /*
 1313          * Just throw away any verifyer (ie: kerberos etc).
 1314          */
 1315         i = fxdr_unsigned(int, *tl++);          /* verf type */
 1316         i = fxdr_unsigned(int32_t, *tl);        /* len */
 1317         if (i > 0)
 1318                 nfsm_adv(nfsm_rndup(i));
 1319         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 1320         /* 0 == ok */
 1321         if (*tl == 0) {
 1322                 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 1323                 if (*tl != 0) {
 1324                         error = fxdr_unsigned(int, *tl);
 1325                         if ((nmp->nm_flag & NFSMNT_NFSV3) &&
 1326                                 error == NFSERR_TRYLATER) {
 1327                                 m_freem(mrep);
 1328                                 error = 0;
 1329                                 waituntil = time_second + nfs3_jukebox_delay;
 1330                                 while (time_second < waituntil) {
 1331                                         (void) tsleep(&lbolt, PSOCK, "nqnfstry", 0);
 1332                                 }
 1333                                 mtx_lock(&nfs_xid_mtx);
 1334                                 if (++nfs_xid == 0)
 1335                                         nfs_xid++;
 1336                                 rep->r_xid = *xidp = txdr_unsigned(nfs_xid);
 1337                                 mtx_unlock(&nfs_xid_mtx);
 1338                                 goto tryagain;
 1339                         }
 1340 
 1341                         /*
 1342                          * If the File Handle was stale, invalidate the
 1343                          * lookup cache, just in case.
 1344                          */
 1345                         if (error == ESTALE)
 1346                                 cache_purge(vp);
 1347                         /*
 1348                          * Skip wcc data on NFS errors for now. NetApp filers return corrupt
 1349                          * postop attrs in the wcc data for NFS err EROFS. Not sure if they 
 1350                          * could return corrupt postop attrs for others errors.
 1351                          */
 1352                         if ((nmp->nm_flag & NFSMNT_NFSV3) && !nfs_skip_wcc_data_onerr) {
 1353                                 *mrp = mrep;
 1354                                 *mdp = md;
 1355                                 *dposp = dpos;
 1356                                 error |= NFSERR_RETERR;
 1357                         } else
 1358                                 m_freem(mrep);
 1359                         m_freem(rep->r_mreq);
 1360                         mtx_destroy(&rep->r_mtx);
 1361                         free((caddr_t)rep, M_NFSREQ);
 1362                         return (error);
 1363                 }
 1364 
 1365                 *mrp = mrep;
 1366                 *mdp = md;
 1367                 *dposp = dpos;
 1368                 m_freem(rep->r_mreq);
 1369                 mtx_destroy(&rep->r_mtx);
 1370                 FREE((caddr_t)rep, M_NFSREQ);
 1371                 return (0);
 1372         }
 1373         m_freem(mrep);
 1374         error = EPROTONOSUPPORT;
 1375 nfsmout:
 1376         m_freem(rep->r_mreq);
 1377         mtx_destroy(&rep->r_mtx);
 1378         free((caddr_t)rep, M_NFSREQ);
 1379         return (error);
 1380 }
 1381 
 1382 /*
 1383  * Nfs timer routine
 1384  * Scan the nfsreq list and retranmit any requests that have timed out
 1385  * To avoid retransmission attempts on STREAM sockets (in the future) make
 1386  * sure to set the r_retry field to 0 (implies nm_retry == 0).
 1387  * 
 1388  * The nfs reqq lock cannot be held while we do the pru_send() because of a
 1389  * lock ordering violation. The NFS client socket callback acquires 
 1390  * inp_lock->nfsreq mutex and pru_send acquires inp_lock. So we drop the 
 1391  * reqq mutex (and reacquire it after the pru_send()). The req structure
 1392  * (for the rexmit) is prevented from being removed by the R_PIN_REQ flag.
 1393  */
 1394 void
 1395 nfs_timer(void *arg)
 1396 {
 1397         struct nfsreq *rep;
 1398         struct mbuf *m;
 1399         struct socket *so;
 1400         struct nfsmount *nmp;
 1401         int timeo;
 1402         int error;
 1403         struct timeval now;
 1404 
 1405         getmicrouptime(&now);
 1406         mtx_lock(&nfs_reqq_mtx);
 1407         TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
 1408                 nmp = rep->r_nmp;
 1409                 mtx_lock(&rep->r_mtx);
 1410                 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) {
 1411                         mtx_unlock(&rep->r_mtx);                        
 1412                         continue;
 1413                 } else {
 1414                         /*
 1415                          * Terminate request if force-unmount in progress.
 1416                          * Note that NFS could have vfs_busy'ed the mount,
 1417                          * causing the unmount to wait for the mnt_lock, making
 1418                          * this bit of logic necessary.
 1419                          */
 1420                         if (rep->r_nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) {
 1421                                 nfs_softterm(rep);
 1422                                 mtx_unlock(&rep->r_mtx);
 1423                                 continue;
 1424                         }                               
 1425                         mtx_unlock(&rep->r_mtx);                        
 1426                 }
 1427                 if (nfs_sigintr(nmp, rep, rep->r_td))
 1428                         continue;
 1429                 mtx_lock(&nmp->nm_mtx);
 1430                 mtx_lock(&rep->r_mtx);
 1431                 if (nmp->nm_tprintf_initial_delay != 0 &&
 1432                     (rep->r_rexmit > 2 || (rep->r_flags & R_RESENDERR)) &&
 1433                     rep->r_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) {
 1434                         rep->r_lastmsg = now.tv_sec;
 1435                         /*
 1436                          * Pin down the request and drop locks for the acquisition
 1437                          * of Giant from tprintf() in nfs_down().
 1438                          */
 1439                         rep->r_flags |= R_PIN_REQ;
 1440                         mtx_unlock(&rep->r_mtx);
 1441                         mtx_unlock(&nmp->nm_mtx);
 1442                         mtx_unlock(&nfs_reqq_mtx);
 1443                         nfs_down(rep, nmp, rep->r_td, "not responding",
 1444                                  0, NFSSTA_TIMEO);
 1445                         mtx_lock(&nfs_reqq_mtx);
 1446                         mtx_lock(&nmp->nm_mtx);
 1447                         mtx_lock(&rep->r_mtx);
 1448                         rep->r_flags &= ~R_PIN_REQ;
 1449                         wakeup((caddr_t)&rep->r_flags);
 1450                 }
 1451                 if (rep->r_rtt >= 0) {
 1452                         rep->r_rtt++;
 1453                         if (nmp->nm_flag & NFSMNT_DUMBTIMR)
 1454                                 timeo = nmp->nm_timeo;
 1455                         else
 1456                                 timeo = nfs_estimate_rto(nmp, rep->r_procnum);
 1457                         if (nmp->nm_timeouts > 0)
 1458                                 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
 1459                         if (rep->r_rtt <= timeo) {
 1460                                 mtx_unlock(&rep->r_mtx);
 1461                                 mtx_unlock(&nmp->nm_mtx);
 1462                                 continue;
 1463                         }
 1464                         if (nmp->nm_timeouts < NFS_NBACKOFF)
 1465                                 nmp->nm_timeouts++;
 1466                 }
 1467                 if (rep->r_rexmit >= rep->r_retry) {    /* too many */
 1468                         nfsstats.rpctimeouts++;
 1469                         nfs_softterm(rep);
 1470                         mtx_unlock(&rep->r_mtx);
 1471                         mtx_unlock(&nmp->nm_mtx);
 1472                         continue;
 1473                 }
 1474                 if (nmp->nm_sotype != SOCK_DGRAM) {
 1475                         if (++rep->r_rexmit > NFS_MAXREXMIT)
 1476                                 rep->r_rexmit = NFS_MAXREXMIT;
 1477                         /*
 1478                          * For NFS/TCP, setting R_MUSTRESEND and waking up 
 1479                          * the requester will cause the request to be   
 1480                          * retransmitted (in nfs_reply()), re-connecting
 1481                          * if necessary.
 1482                          */
 1483                         rep->r_flags |= R_MUSTRESEND;
 1484                         wakeup((caddr_t)rep);
 1485                         rep->r_rtt = 0;
 1486                         mtx_unlock(&rep->r_mtx);
 1487                         mtx_unlock(&nmp->nm_mtx);
 1488                         continue;
 1489                 }
 1490                 if ((so = nmp->nm_so) == NULL) {
 1491                         mtx_unlock(&rep->r_mtx);
 1492                         mtx_unlock(&nmp->nm_mtx);
 1493                         continue;
 1494                 }
 1495                 /*
 1496                  * If there is enough space and the window allows..
 1497                  *      Resend it
 1498                  * Set r_rtt to -1 in case we fail to send it now.
 1499                  */
 1500                 rep->r_rtt = -1;
 1501                 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
 1502                     ((nmp->nm_flag & NFSMNT_DUMBTIMR) || (rep->r_flags & R_SENT) ||
 1503                      nmp->nm_sent < nmp->nm_cwnd)) {
 1504                         mtx_unlock(&rep->r_mtx);
 1505                         mtx_unlock(&nmp->nm_mtx);
 1506                         if ((m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))) {
 1507                                 /*
 1508                                  * Mark the request to indicate that a XMIT is in 
 1509                                  * progress to prevent the req structure being 
 1510                                  * removed in nfs_request().
 1511                                  */
 1512                                 mtx_lock(&rep->r_mtx);
 1513                                 rep->r_flags |= R_PIN_REQ;
 1514                                 mtx_unlock(&rep->r_mtx);
 1515                                 mtx_unlock(&nfs_reqq_mtx);
 1516                                 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
 1517                                         error = (*so->so_proto->pr_usrreqs->pru_send)
 1518                                                 (so, 0, m, NULL, NULL, curthread);
 1519                                 else    
 1520                                         error = (*so->so_proto->pr_usrreqs->pru_send)
 1521                                                 (so, 0, m, nmp->nm_nam, NULL, 
 1522                                                  curthread);
 1523                                 mtx_lock(&nfs_reqq_mtx);
 1524                                 mtx_lock(&nmp->nm_mtx);
 1525                                 mtx_lock(&rep->r_mtx);
 1526                                 rep->r_flags &= ~R_PIN_REQ;
 1527                                 wakeup((caddr_t)&rep->r_flags);
 1528                                 if (error) {
 1529                                         if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
 1530                                                 so->so_error = 0;
 1531                                         rep->r_flags |= R_RESENDERR;
 1532                                 } else {
 1533                                         /*
 1534                                          * Iff first send, start timing
 1535                                          * else turn timing off, backoff timer
 1536                                          * and divide congestion window by 2.
 1537                                          */
 1538                                         rep->r_flags &= ~R_RESENDERR;
 1539                                         if (rep->r_flags & R_SENT) {
 1540                                                 rep->r_flags &= ~R_TIMING;
 1541                                                 if (++rep->r_rexmit > NFS_MAXREXMIT)
 1542                                                         rep->r_rexmit = NFS_MAXREXMIT;
 1543                                                 nmp->nm_cwnd >>= 1;
 1544                                                 if (nmp->nm_cwnd < NFS_CWNDSCALE)
 1545                                                         nmp->nm_cwnd = NFS_CWNDSCALE;
 1546                                                 nfsstats.rpcretries++;
 1547                                         } else {
 1548                                                 rep->r_flags |= R_SENT;
 1549                                                 nmp->nm_sent += NFS_CWNDSCALE;
 1550                                         }
 1551                                         rep->r_rtt = 0;
 1552                                 }
 1553                                 mtx_unlock(&rep->r_mtx);
 1554                                 mtx_unlock(&nmp->nm_mtx);
 1555                         }
 1556                 } else {
 1557                         mtx_unlock(&rep->r_mtx);
 1558                         mtx_unlock(&nmp->nm_mtx);
 1559                 }
 1560         }
 1561         mtx_unlock(&nfs_reqq_mtx);
 1562         callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
 1563 }
 1564 
 1565 /*
 1566  * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
 1567  * wait for all requests to complete. This is used by forced unmounts
 1568  * to terminate any outstanding RPCs.
 1569  */
 1570 int
 1571 nfs_nmcancelreqs(nmp)
 1572         struct nfsmount *nmp;
 1573 {
 1574         struct nfsreq *req;
 1575         int i;
 1576 
 1577         mtx_lock(&nfs_reqq_mtx);
 1578         TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
 1579                 mtx_lock(&req->r_mtx);
 1580                 if (nmp != req->r_nmp || req->r_mrep != NULL ||
 1581                     (req->r_flags & R_SOFTTERM)) {
 1582                         mtx_unlock(&req->r_mtx);                        
 1583                         continue;
 1584                 }
 1585                 nfs_softterm(req);
 1586                 mtx_unlock(&req->r_mtx);
 1587         }
 1588         mtx_unlock(&nfs_reqq_mtx);
 1589 
 1590         for (i = 0; i < 30; i++) {
 1591                 mtx_lock(&nfs_reqq_mtx);
 1592                 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
 1593                         if (nmp == req->r_nmp)
 1594                                 break;
 1595                 }
 1596                 mtx_unlock(&nfs_reqq_mtx);
 1597                 if (req == NULL)
 1598                         return (0);
 1599                 tsleep(&lbolt, PSOCK, "nfscancel", 0);
 1600         }
 1601         return (EBUSY);
 1602 }
 1603 
 1604 /*
 1605  * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT).
 1606  * The nm_send count is decremented now to avoid deadlocks when the process in
 1607  * soreceive() hasn't yet managed to send its own request.
 1608  */
 1609 
 1610 static void
 1611 nfs_softterm(struct nfsreq *rep)
 1612 {
 1613         KASSERT(mtx_owned(&rep->r_mtx), ("NFS req lock not owned !"));
 1614         rep->r_flags |= R_SOFTTERM;
 1615         if (rep->r_flags & R_SENT) {
 1616                 rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
 1617                 rep->r_flags &= ~R_SENT;
 1618         }
 1619         /* 
 1620          * Request terminated, wakeup the blocked process, so that we
 1621          * can return EINTR back.
 1622          */
 1623         wakeup((caddr_t)rep);
 1624 }
 1625 
 1626 /*
 1627  * Any signal that can interrupt an NFS operation in an intr mount
 1628  * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
 1629  */
 1630 int nfs_sig_set[] = {
 1631         SIGINT,
 1632         SIGTERM,
 1633         SIGHUP,
 1634         SIGKILL,
 1635         SIGSTOP,
 1636         SIGQUIT
 1637 };
 1638 
 1639 /*
 1640  * Check to see if one of the signals in our subset is pending on
 1641  * the process (in an intr mount).
 1642  */
 1643 static int
 1644 nfs_sig_pending(sigset_t set)
 1645 {
 1646         int i;
 1647         
 1648         for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++)
 1649                 if (SIGISMEMBER(set, nfs_sig_set[i]))
 1650                         return (1);
 1651         return (0);
 1652 }
 1653  
 1654 /*
 1655  * The set/restore sigmask functions are used to (temporarily) overwrite
 1656  * the process p_sigmask during an RPC call (for example). These are also
 1657  * used in other places in the NFS client that might tsleep().
 1658  */
 1659 void
 1660 nfs_set_sigmask(struct thread *td, sigset_t *oldset)
 1661 {
 1662         sigset_t newset;
 1663         int i;
 1664         struct proc *p;
 1665         
 1666         SIGFILLSET(newset);
 1667         if (td == NULL)
 1668                 td = curthread; /* XXX */
 1669         p = td->td_proc;
 1670         /* Remove the NFS set of signals from newset */
 1671         PROC_LOCK(p);
 1672         mtx_lock(&p->p_sigacts->ps_mtx);
 1673         for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) {
 1674                 /*
 1675                  * But make sure we leave the ones already masked
 1676                  * by the process, ie. remove the signal from the
 1677                  * temporary signalmask only if it wasn't already
 1678                  * in p_sigmask.
 1679                  */
 1680                 if (!SIGISMEMBER(td->td_sigmask, nfs_sig_set[i]) &&
 1681                     !SIGISMEMBER(p->p_sigacts->ps_sigignore, nfs_sig_set[i]))
 1682                         SIGDELSET(newset, nfs_sig_set[i]);
 1683         }
 1684         mtx_unlock(&p->p_sigacts->ps_mtx);
 1685         PROC_UNLOCK(p);
 1686         kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 0);
 1687 }
 1688 
 1689 void
 1690 nfs_restore_sigmask(struct thread *td, sigset_t *set)
 1691 {
 1692         if (td == NULL)
 1693                 td = curthread; /* XXX */
 1694         kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
 1695 }
 1696 
 1697 /*
 1698  * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
 1699  * old one after msleep() returns.
 1700  */
 1701 int
 1702 nfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
 1703 {
 1704         sigset_t oldset;
 1705         int error;
 1706         struct proc *p;
 1707         
 1708         if ((priority & PCATCH) == 0)
 1709                 return msleep(ident, mtx, priority, wmesg, timo);
 1710         if (td == NULL)
 1711                 td = curthread; /* XXX */
 1712         nfs_set_sigmask(td, &oldset);
 1713         error = msleep(ident, mtx, priority, wmesg, timo);
 1714         nfs_restore_sigmask(td, &oldset);
 1715         p = td->td_proc;
 1716         return (error);
 1717 }
 1718 
 1719 /*
 1720  * Test for a termination condition pending on the process.
 1721  * This is used for NFSMNT_INT mounts.
 1722  */
 1723 int
 1724 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
 1725 {
 1726         struct proc *p;
 1727         sigset_t tmpset;
 1728         
 1729         if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
 1730                 return nfs4_sigintr(nmp, rep, td);
 1731         if (rep) {
 1732                 mtx_lock(&rep->r_mtx);
 1733                 if (rep->r_flags & R_SOFTTERM) {
 1734                         mtx_unlock(&rep->r_mtx);
 1735                         return (EIO);
 1736                 } else
 1737                         mtx_unlock(&rep->r_mtx);
 1738         }
 1739         /* Terminate all requests while attempting a forced unmount. */
 1740         if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
 1741                 return (EIO);
 1742         if (!(nmp->nm_flag & NFSMNT_INT))
 1743                 return (0);
 1744         if (td == NULL)
 1745                 return (0);
 1746         p = td->td_proc;
 1747         PROC_LOCK(p);
 1748         tmpset = p->p_siglist;
 1749         SIGSETOR(tmpset, td->td_siglist);
 1750         SIGSETNAND(tmpset, td->td_sigmask);
 1751         mtx_lock(&p->p_sigacts->ps_mtx);
 1752         SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
 1753         mtx_unlock(&p->p_sigacts->ps_mtx);
 1754         if ((SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist))
 1755             && nfs_sig_pending(tmpset)) {
 1756                 PROC_UNLOCK(p);
 1757                 return (EINTR);
 1758         }
 1759         PROC_UNLOCK(p);
 1760         return (0);
 1761 }
 1762 
 1763 /*
 1764  * Lock a socket against others.
 1765  * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
 1766  * and also to avoid race conditions between the processes with nfs requests
 1767  * in progress when a reconnect is necessary.
 1768  */
 1769 int
 1770 nfs_connect_lock(struct nfsreq *rep)
 1771 {
 1772         int *statep = &rep->r_nmp->nm_state;
 1773         struct thread *td;
 1774         int error, slpflag = 0, slptimeo = 0;
 1775 
 1776         td = rep->r_td;
 1777         if (rep->r_nmp->nm_flag & NFSMNT_INT)
 1778                 slpflag = PCATCH;
 1779         while (*statep & NFSSTA_SNDLOCK) {
 1780                 error = nfs_sigintr(rep->r_nmp, rep, td);
 1781                 if (error) {
 1782                         return (error);
 1783                 }
 1784                 *statep |= NFSSTA_WANTSND;
 1785                 (void) msleep(statep, &rep->r_nmp->nm_mtx,
 1786                               slpflag | (PZERO - 1), "nfsndlck", slptimeo);
 1787                 if (slpflag == PCATCH) {
 1788                         slpflag = 0;
 1789                         slptimeo = 2 * hz;
 1790                 }
 1791         }
 1792         *statep |= NFSSTA_SNDLOCK;
 1793         return (0);
 1794 }
 1795 
 1796 /*
 1797  * Unlock the stream socket for others.
 1798  */
 1799 void
 1800 nfs_connect_unlock(struct nfsreq *rep)
 1801 {
 1802         int *statep = &rep->r_nmp->nm_state;
 1803 
 1804         if ((*statep & NFSSTA_SNDLOCK) == 0)
 1805                 panic("nfs sndunlock");
 1806         *statep &= ~NFSSTA_SNDLOCK;
 1807         if (*statep & NFSSTA_WANTSND) {
 1808                 *statep &= ~NFSSTA_WANTSND;
 1809                 wakeup(statep);
 1810         }
 1811 }
 1812 
 1813 /*
 1814  *      nfs_realign:
 1815  *
 1816  *      Check for badly aligned mbuf data and realign by copying the unaligned
 1817  *      portion of the data into a new mbuf chain and freeing the portions
 1818  *      of the old chain that were replaced.
 1819  *
 1820  *      We cannot simply realign the data within the existing mbuf chain
 1821  *      because the underlying buffers may contain other rpc commands and
 1822  *      we cannot afford to overwrite them.
 1823  *
 1824  *      We would prefer to avoid this situation entirely.  The situation does
 1825  *      not occur with NFS/UDP and is supposed to only occassionally occur
 1826  *      with TCP.  Use vfs.nfs.realign_count and realign_test to check this.
 1827  *
 1828  */
 1829 static int
 1830 nfs_realign(struct mbuf **pm, int hsiz)
 1831 {
 1832         struct mbuf *m;
 1833         struct mbuf *n = NULL;
 1834         int off = 0;
 1835 
 1836         ++nfs_realign_test;
 1837         while ((m = *pm) != NULL) {
 1838                 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
 1839                         MGET(n, M_DONTWAIT, MT_DATA);
 1840                         if (n == NULL)
 1841                                 return (ENOMEM);
 1842                         if (m->m_len >= MINCLSIZE) {
 1843                                 MCLGET(n, M_DONTWAIT);
 1844                                 if (n->m_ext.ext_buf == NULL) {
 1845                                         m_freem(n);
 1846                                         return (ENOMEM);
 1847                                 }
 1848                         }
 1849                         n->m_len = 0;
 1850                         break;
 1851                 }
 1852                 pm = &m->m_next;
 1853         }
 1854         /*
 1855          * If n is non-NULL, loop on m copying data, then replace the
 1856          * portion of the chain that had to be realigned.
 1857          */
 1858         if (n != NULL) {
 1859                 ++nfs_realign_count;
 1860                 while (m) {
 1861                         m_copyback(n, off, m->m_len, mtod(m, caddr_t));
 1862                         off += m->m_len;
 1863                         m = m->m_next;
 1864                 }
 1865                 m_freem(*pm);
 1866                 *pm = n;
 1867         }
 1868         return (0);
 1869 }
 1870 
 1871 
 1872 static int
 1873 nfs_msg(struct thread *td, const char *server, const char *msg, int error)
 1874 {
 1875         struct proc *p;
 1876 
 1877         p = td ? td->td_proc : NULL;
 1878         if (error) {
 1879                 tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n", server,
 1880                     msg, error);
 1881         } else {
 1882                 tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg);
 1883         }
 1884         return (0);
 1885 }
 1886 
 1887 void
 1888 nfs_down(rep, nmp, td, msg, error, flags)
 1889         struct nfsreq *rep;
 1890         struct nfsmount *nmp;
 1891         struct thread *td;
 1892         const char *msg;
 1893         int error, flags;
 1894 {
 1895         if (nmp == NULL)
 1896                 return;
 1897         mtx_lock(&nmp->nm_mtx);
 1898         if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
 1899                 nmp->nm_state |= NFSSTA_TIMEO;
 1900                 mtx_unlock(&nmp->nm_mtx);
 1901                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1902                     VQ_NOTRESP, 0);
 1903         } else
 1904                 mtx_unlock(&nmp->nm_mtx);
 1905 #ifdef NFSSTA_LOCKTIMEO
 1906         mtx_lock(&nmp->nm_mtx);
 1907         if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
 1908                 nmp->nm_state |= NFSSTA_LOCKTIMEO;
 1909                 mtx_unlock(&nmp->nm_mtx);
 1910                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1911                     VQ_NOTRESPLOCK, 0);
 1912         } else
 1913                 mtx_unlock(&nmp->nm_mtx);
 1914 #endif
 1915         if (rep != NULL) {
 1916                 mtx_lock(&rep->r_mtx);
 1917                 rep->r_flags |= R_TPRINTFMSG;
 1918                 mtx_unlock(&rep->r_mtx);
 1919         }
 1920         nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
 1921 }
 1922 
 1923 void
 1924 nfs_up(rep, nmp, td, msg, flags)
 1925         struct nfsreq *rep;
 1926         struct nfsmount *nmp;
 1927         struct thread *td;
 1928         const char *msg;
 1929         int flags;
 1930 {
 1931         if (nmp == NULL || rep == NULL)
 1932                 return;
 1933         mtx_lock(&rep->r_mtx);
 1934         if ((rep->r_flags & R_TPRINTFMSG) != 0) {
 1935                 mtx_unlock(&rep->r_mtx);
 1936                 nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
 1937         } else
 1938                 mtx_unlock(&rep->r_mtx);
 1939 
 1940         mtx_lock(&nmp->nm_mtx);
 1941         if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
 1942                 nmp->nm_state &= ~NFSSTA_TIMEO;
 1943                 mtx_unlock(&nmp->nm_mtx);
 1944                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1945                     VQ_NOTRESP, 1);
 1946         } else
 1947                 mtx_unlock(&nmp->nm_mtx);
 1948         
 1949 #ifdef NFSSTA_LOCKTIMEO
 1950         mtx_lock(&nmp->nm_mtx);
 1951         if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
 1952                 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
 1953                 mtx_unlock(&nmp->nm_mtx);
 1954                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1955                     VQ_NOTRESPLOCK, 1);
 1956         } else
 1957                 mtx_unlock(&nmp->nm_mtx);
 1958 #endif
 1959 }

Cache object: 1f0efc21d96cce162114ada8c6a49d29


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.