The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/nfsclient/nfs_socket.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989, 1991, 1993, 1995
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * Rick Macklem at The University of Guelph.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)nfs_socket.c        8.5 (Berkeley) 3/30/95
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/6.4/sys/nfsclient/nfs_socket.c 176838 2008-03-05 20:04:16Z jhb $");
   37 
   38 /*
   39  * Socket operations for use by nfs
   40  */
   41 
   42 #include "opt_inet6.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/systm.h>
   46 #include <sys/kernel.h>
   47 #include <sys/lock.h>
   48 #include <sys/malloc.h>
   49 #include <sys/mbuf.h>
   50 #include <sys/mount.h>
   51 #include <sys/mutex.h>
   52 #include <sys/proc.h>
   53 #include <sys/protosw.h>
   54 #include <sys/signalvar.h>
   55 #include <sys/syscallsubr.h>
   56 #include <sys/socket.h>
   57 #include <sys/socketvar.h>
   58 #include <sys/sysctl.h>
   59 #include <sys/syslog.h>
   60 #include <sys/vnode.h>
   61 
   62 #include <netinet/in.h>
   63 #include <netinet/tcp.h>
   64 
   65 #include <rpc/rpcclnt.h>
   66 
   67 #include <nfs/rpcv2.h>
   68 #include <nfs/nfsproto.h>
   69 #include <nfsclient/nfs.h>
   70 #include <nfs/xdr_subs.h>
   71 #include <nfsclient/nfsm_subs.h>
   72 #include <nfsclient/nfsmount.h>
   73 #include <nfsclient/nfsnode.h>
   74 
   75 #include <nfs4client/nfs4.h>
   76 
   77 #define TRUE    1
   78 #define FALSE   0
   79 
   80 static int      nfs_realign_test;
   81 static int      nfs_realign_count;
   82 static int      nfs_bufpackets = 4;
   83 static int      nfs_reconnects;
   84 static int      nfs3_jukebox_delay = 10;
   85 static int     nfs_skip_wcc_data_onerr = 1;
   86 
   87 SYSCTL_DECL(_vfs_nfs);
   88 
   89 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
   90 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
   91 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
   92 SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
   93     "number of times the nfs client has had to reconnect");
   94 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
   95     "number of seconds to delay a retry after receiving EJUKEBOX");
   96 SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0, "");
   97 
   98 /*
   99  * There is a congestion window for outstanding rpcs maintained per mount
  100  * point. The cwnd size is adjusted in roughly the way that:
  101  * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
  102  * SIGCOMM '88". ACM, August 1988.
  103  * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
  104  * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
  105  * of rpcs is in progress.
  106  * (The sent count and cwnd are scaled for integer arith.)
  107  * Variants of "slow start" were tried and were found to be too much of a
  108  * performance hit (ave. rtt 3 times larger),
  109  * I suspect due to the large rtt that nfs rpcs have.
  110  */
  111 #define NFS_CWNDSCALE   256
  112 #define NFS_MAXCWND     (NFS_CWNDSCALE * 32)
  113 #define NFS_NBACKOFF    8
  114 static int nfs_backoff[NFS_NBACKOFF] = { 2, 4, 8, 16, 32, 64, 128, 256, };
  115 struct callout  nfs_callout;
  116 
  117 static int      nfs_msg(struct thread *, const char *, const char *, int);
  118 static int      nfs_realign(struct mbuf **pm, int hsiz);
  119 static int      nfs_reply(struct nfsreq *);
  120 static void     nfs_softterm(struct nfsreq *rep);
  121 static int      nfs_reconnect(struct nfsreq *rep);
  122 static void nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag);
  123 static void nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag);
  124 static void wakeup_nfsreq(struct nfsreq *req);
  125 
  126 extern struct mtx nfs_reqq_mtx;
  127 extern struct mtx nfs_reply_mtx;
  128 
  129 /*
  130  * RTT estimator
  131  */
  132 
  133 static enum nfs_rto_timer_t nfs_proct[NFS_NPROCS] = {
  134         NFS_DEFAULT_TIMER,      /* NULL */
  135         NFS_GETATTR_TIMER,      /* GETATTR */
  136         NFS_DEFAULT_TIMER,      /* SETATTR */
  137         NFS_LOOKUP_TIMER,       /* LOOKUP */
  138         NFS_GETATTR_TIMER,      /* ACCESS */
  139         NFS_READ_TIMER,         /* READLINK */
  140         NFS_READ_TIMER,         /* READ */
  141         NFS_WRITE_TIMER,        /* WRITE */
  142         NFS_DEFAULT_TIMER,      /* CREATE */
  143         NFS_DEFAULT_TIMER,      /* MKDIR */
  144         NFS_DEFAULT_TIMER,      /* SYMLINK */
  145         NFS_DEFAULT_TIMER,      /* MKNOD */
  146         NFS_DEFAULT_TIMER,      /* REMOVE */
  147         NFS_DEFAULT_TIMER,      /* RMDIR */
  148         NFS_DEFAULT_TIMER,      /* RENAME */
  149         NFS_DEFAULT_TIMER,      /* LINK */
  150         NFS_READ_TIMER,         /* READDIR */
  151         NFS_READ_TIMER,         /* READDIRPLUS */
  152         NFS_DEFAULT_TIMER,      /* FSSTAT */
  153         NFS_DEFAULT_TIMER,      /* FSINFO */
  154         NFS_DEFAULT_TIMER,      /* PATHCONF */
  155         NFS_DEFAULT_TIMER,      /* COMMIT */
  156         NFS_DEFAULT_TIMER,      /* NOOP */
  157 };
  158 
  159 /*
  160  * Choose the correct RTT timer for this NFS procedure.
  161  */
  162 static inline enum nfs_rto_timer_t
  163 nfs_rto_timer(u_int32_t procnum)
  164 {
  165         return nfs_proct[procnum];
  166 }
  167 
  168 /*
  169  * Initialize the RTT estimator state for a new mount point.
  170  */
  171 static void
  172 nfs_init_rtt(struct nfsmount *nmp)
  173 {
  174         int i;
  175 
  176         for (i = 0; i < NFS_MAX_TIMER; i++)
  177                 nmp->nm_srtt[i] = NFS_INITRTT;
  178         for (i = 0; i < NFS_MAX_TIMER; i++)
  179                 nmp->nm_sdrtt[i] = 0;
  180 }
  181 
  182 /*
  183  * Update a mount point's RTT estimator state using data from the
  184  * passed-in request.
  185  * 
  186  * Use a gain of 0.125 on the mean and a gain of 0.25 on the deviation.
  187  *
  188  * NB: Since the timer resolution of NFS_HZ is so course, it can often
  189  * result in r_rtt == 0. Since r_rtt == N means that the actual RTT is
  190  * between N + dt and N + 2 - dt ticks, add 1 before calculating the
  191  * update values.
  192  */
  193 static void
  194 nfs_update_rtt(struct nfsreq *rep)
  195 {
  196         int t1 = rep->r_rtt + 1;
  197         int index = nfs_rto_timer(rep->r_procnum) - 1;
  198         int *srtt = &rep->r_nmp->nm_srtt[index];
  199         int *sdrtt = &rep->r_nmp->nm_sdrtt[index];
  200 
  201         t1 -= *srtt >> 3;
  202         *srtt += t1;
  203         if (t1 < 0)
  204                 t1 = -t1;
  205         t1 -= *sdrtt >> 2;
  206         *sdrtt += t1;
  207 }
  208 
  209 /*
  210  * Estimate RTO for an NFS RPC sent via an unreliable datagram.
  211  *
  212  * Use the mean and mean deviation of RTT for the appropriate type
  213  * of RPC for the frequent RPCs and a default for the others.
  214  * The justification for doing "other" this way is that these RPCs
  215  * happen so infrequently that timer est. would probably be stale.
  216  * Also, since many of these RPCs are non-idempotent, a conservative
  217  * timeout is desired.
  218  *
  219  * getattr, lookup - A+2D
  220  * read, write     - A+4D
  221  * other           - nm_timeo
  222  */
  223 static int
  224 nfs_estimate_rto(struct nfsmount *nmp, u_int32_t procnum)
  225 {
  226         enum nfs_rto_timer_t timer = nfs_rto_timer(procnum);
  227         int index = timer - 1;
  228         int rto;
  229 
  230         switch (timer) {
  231         case NFS_GETATTR_TIMER:
  232         case NFS_LOOKUP_TIMER:
  233                 rto = ((nmp->nm_srtt[index] + 3) >> 2) +
  234                                 ((nmp->nm_sdrtt[index] + 1) >> 1);
  235                 break;
  236         case NFS_READ_TIMER:
  237         case NFS_WRITE_TIMER:
  238                 rto = ((nmp->nm_srtt[index] + 7) >> 3) +
  239                                 (nmp->nm_sdrtt[index] + 1);
  240                 break;
  241         default:
  242                 rto = nmp->nm_timeo;
  243                 return (rto);
  244         }
  245 
  246         if (rto < NFS_MINRTO)
  247                 rto = NFS_MINRTO;
  248         else if (rto > NFS_MAXRTO)
  249                 rto = NFS_MAXRTO;
  250 
  251         return (rto);
  252 }
  253 
  254 
  255 /*
  256  * Initialize sockets and congestion for a new NFS connection.
  257  * We do not free the sockaddr if error.
  258  */
  259 int
  260 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
  261 {
  262         struct socket *so;
  263         int error, rcvreserve, sndreserve;
  264         int pktscale;
  265         struct sockaddr *saddr;
  266         struct ucred *origcred;
  267         struct thread *td = curthread;
  268 
  269         /*
  270          * We need to establish the socket using the credentials of
  271          * the mountpoint.  Some parts of this process (such as
  272          * sobind() and soconnect()) will use the curent thread's
  273          * credential instead of the socket credential.  To work
  274          * around this, temporarily change the current thread's
  275          * credential to that of the mountpoint.
  276          *
  277          * XXX: It would be better to explicitly pass the correct
  278          * credential to sobind() and soconnect().
  279          */
  280         origcred = td->td_ucred;
  281         td->td_ucred = nmp->nm_mountp->mnt_cred;
  282 
  283         NET_ASSERT_GIANT();
  284 
  285         if (nmp->nm_sotype == SOCK_STREAM) {
  286                 mtx_lock(&nmp->nm_nfstcpstate.mtx);
  287                 nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
  288                 nmp->nm_nfstcpstate.rpcresid = 0;
  289                 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
  290         }       
  291         nmp->nm_so = NULL;
  292         saddr = nmp->nm_nam;
  293         error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
  294                 nmp->nm_soproto, nmp->nm_mountp->mnt_cred, td);
  295         if (error)
  296                 goto bad;
  297         so = nmp->nm_so;
  298         nmp->nm_soflags = so->so_proto->pr_flags;
  299 
  300         /*
  301          * Some servers require that the client port be a reserved port number.
  302          */
  303         if (nmp->nm_flag & NFSMNT_RESVPORT) {
  304                 struct sockopt sopt;
  305                 int ip, ip2, len;
  306                 struct sockaddr_in6 ssin;
  307                 struct sockaddr *sa;
  308 
  309                 bzero(&sopt, sizeof sopt);
  310                 switch(saddr->sa_family) {
  311                 case AF_INET:
  312                         sopt.sopt_level = IPPROTO_IP;
  313                         sopt.sopt_name = IP_PORTRANGE;
  314                         ip = IP_PORTRANGE_LOW;
  315                         ip2 = IP_PORTRANGE_DEFAULT;
  316                         len = sizeof (struct sockaddr_in);
  317                         break;
  318 #ifdef INET6
  319                 case AF_INET6:
  320                         sopt.sopt_level = IPPROTO_IPV6;
  321                         sopt.sopt_name = IPV6_PORTRANGE;
  322                         ip = IPV6_PORTRANGE_LOW;
  323                         ip2 = IPV6_PORTRANGE_DEFAULT;
  324                         len = sizeof (struct sockaddr_in6);
  325                         break;
  326 #endif
  327                 default:
  328                         goto noresvport;
  329                 }
  330                 sa = (struct sockaddr *)&ssin;
  331                 bzero(sa, len);
  332                 sa->sa_len = len;
  333                 sa->sa_family = saddr->sa_family;
  334                 sopt.sopt_dir = SOPT_SET;
  335                 sopt.sopt_val = (void *)&ip;
  336                 sopt.sopt_valsize = sizeof(ip);
  337                 error = sosetopt(so, &sopt);
  338                 if (error)
  339                         goto bad;
  340                 error = sobind(so, sa, td);
  341                 if (error)
  342                         goto bad;
  343                 ip = ip2;
  344                 error = sosetopt(so, &sopt);
  345                 if (error)
  346                         goto bad;
  347         noresvport: ;
  348         }
  349 
  350         /*
  351          * Protocols that do not require connections may be optionally left
  352          * unconnected for servers that reply from a port other than NFS_PORT.
  353          */
  354         if (nmp->nm_flag & NFSMNT_NOCONN) {
  355                 if (nmp->nm_soflags & PR_CONNREQUIRED) {
  356                         error = ENOTCONN;
  357                         goto bad;
  358                 }
  359         } else {
  360                 error = soconnect(so, nmp->nm_nam, td);
  361                 if (error)
  362                         goto bad;
  363 
  364                 /*
  365                  * Wait for the connection to complete. Cribbed from the
  366                  * connect system call but with the wait timing out so
  367                  * that interruptible mounts don't hang here for a long time.
  368                  */
  369                 SOCK_LOCK(so);
  370                 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
  371                         (void) msleep(&so->so_timeo, SOCK_MTX(so),
  372                             PSOCK, "nfscon", 2 * hz);
  373                         if ((so->so_state & SS_ISCONNECTING) &&
  374                             so->so_error == 0 && rep &&
  375                             (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0) {
  376                                 so->so_state &= ~SS_ISCONNECTING;
  377                                 SOCK_UNLOCK(so);
  378                                 goto bad;
  379                         }
  380                 }
  381                 if (so->so_error) {
  382                         error = so->so_error;
  383                         so->so_error = 0;
  384                         SOCK_UNLOCK(so);
  385                         goto bad;
  386                 }
  387                 SOCK_UNLOCK(so);
  388         }
  389         so->so_rcv.sb_timeo = 12 * hz;
  390         so->so_snd.sb_timeo = 5 * hz;
  391 
  392         /*
  393          * Get buffer reservation size from sysctl, but impose reasonable
  394          * limits.
  395          */
  396         pktscale = nfs_bufpackets;
  397         if (pktscale < 2)
  398                 pktscale = 2;
  399         if (pktscale > 64)
  400                 pktscale = 64;
  401 
  402         if (nmp->nm_sotype == SOCK_DGRAM) {
  403                 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
  404                 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
  405                     NFS_MAXPKTHDR) * pktscale;
  406         } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
  407                 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
  408                 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
  409                     NFS_MAXPKTHDR) * pktscale;
  410         } else {
  411                 if (nmp->nm_sotype != SOCK_STREAM)
  412                         panic("nfscon sotype");
  413                 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
  414                         struct sockopt sopt;
  415                         int val;
  416 
  417                         bzero(&sopt, sizeof sopt);
  418                         sopt.sopt_dir = SOPT_SET;
  419                         sopt.sopt_level = SOL_SOCKET;
  420                         sopt.sopt_name = SO_KEEPALIVE;
  421                         sopt.sopt_val = &val;
  422                         sopt.sopt_valsize = sizeof val;
  423                         val = 1;
  424                         sosetopt(so, &sopt);
  425                 }
  426                 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
  427                         struct sockopt sopt;
  428                         int val;
  429 
  430                         bzero(&sopt, sizeof sopt);
  431                         sopt.sopt_dir = SOPT_SET;
  432                         sopt.sopt_level = IPPROTO_TCP;
  433                         sopt.sopt_name = TCP_NODELAY;
  434                         sopt.sopt_val = &val;
  435                         sopt.sopt_valsize = sizeof val;
  436                         val = 1;
  437                         sosetopt(so, &sopt);
  438                 }
  439                 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
  440                     sizeof (u_int32_t)) * pktscale;
  441                 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
  442                     sizeof (u_int32_t)) * pktscale;
  443         }
  444         error = soreserve(so, sndreserve, rcvreserve);
  445         if (error)
  446                 goto bad;
  447         SOCKBUF_LOCK(&so->so_rcv);
  448         so->so_rcv.sb_flags |= SB_NOINTR;
  449         so->so_upcallarg = (caddr_t)nmp;
  450         if (so->so_type == SOCK_STREAM)
  451                 so->so_upcall = nfs_clnt_tcp_soupcall;
  452         else    
  453                 so->so_upcall = nfs_clnt_udp_soupcall;
  454         so->so_rcv.sb_flags |= SB_UPCALL;
  455         SOCKBUF_UNLOCK(&so->so_rcv);
  456         SOCKBUF_LOCK(&so->so_snd);
  457         so->so_snd.sb_flags |= SB_NOINTR;
  458         SOCKBUF_UNLOCK(&so->so_snd);
  459 
  460         /* Restore current thread's credentials. */
  461         td->td_ucred = origcred;
  462 
  463         /* Initialize other non-zero congestion variables */
  464         nfs_init_rtt(nmp);
  465         nmp->nm_cwnd = NFS_MAXCWND / 2;     /* Initial send window */
  466         nmp->nm_sent = 0;
  467         nmp->nm_timeouts = 0;
  468         return (0);
  469 
  470 bad:
  471         /* Restore current thread's credentials. */
  472         td->td_ucred = origcred;
  473 
  474         nfs_disconnect(nmp);
  475         return (error);
  476 }
  477 
  478 /*
  479  * Reconnect routine:
  480  * Called when a connection is broken on a reliable protocol.
  481  * - clean up the old socket
  482  * - nfs_connect() again
  483  * - set R_MUSTRESEND for all outstanding requests on mount point
  484  * If this fails the mount point is DEAD!
  485  * nb: Must be called with the nfs_sndlock() set on the mount point.
  486  */
  487 static int
  488 nfs_reconnect(struct nfsreq *rep)
  489 {
  490         struct nfsreq *rp;
  491         struct nfsmount *nmp = rep->r_nmp;
  492         int error;
  493 
  494         nfs_reconnects++;
  495         nfs_disconnect(nmp);
  496         while ((error = nfs_connect(nmp, rep)) != 0) {
  497                 if (error == ERESTART)
  498                         error = EINTR;
  499                 if (error == EIO || error == EINTR)
  500                         return (error);
  501                 (void) tsleep(&lbolt, PSOCK, "nfscon", 0);
  502         }
  503 
  504         /*
  505          * Clear the FORCE_RECONNECT flag only after the connect 
  506          * succeeds. To prevent races between multiple processes 
  507          * waiting on the mountpoint where the connection is being
  508          * torn down. The first one to acquire the sndlock will 
  509          * retry the connection. The others block on the sndlock
  510          * until the connection is established successfully, and 
  511          * then re-transmit the request.
  512          */
  513         mtx_lock(&nmp->nm_nfstcpstate.mtx);
  514         nmp->nm_nfstcpstate.flags &= ~NFS_TCP_FORCE_RECONNECT;
  515         mtx_unlock(&nmp->nm_nfstcpstate.mtx);   
  516 
  517         /*
  518          * Loop through outstanding request list and fix up all requests
  519          * on old socket.
  520          */
  521         mtx_lock(&nfs_reqq_mtx);
  522         TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
  523                 if (rp->r_nmp == nmp)
  524                         rp->r_flags |= R_MUSTRESEND;
  525         }
  526         mtx_unlock(&nfs_reqq_mtx);
  527         return (0);
  528 }
  529 
  530 /*
  531  * NFS disconnect. Clean up and unlink.
  532  */
  533 void
  534 nfs_disconnect(struct nfsmount *nmp)
  535 {
  536         struct socket *so;
  537 
  538         NET_ASSERT_GIANT();
  539 
  540         if (nmp->nm_so) {
  541                 so = nmp->nm_so;
  542                 nmp->nm_so = NULL;
  543                 SOCKBUF_LOCK(&so->so_rcv);
  544                 so->so_upcallarg = NULL;
  545                 so->so_upcall = NULL;
  546                 so->so_rcv.sb_flags &= ~SB_UPCALL;
  547                 SOCKBUF_UNLOCK(&so->so_rcv);
  548                 soshutdown(so, SHUT_WR);
  549                 soclose(so);
  550         }
  551 }
  552 
  553 void
  554 nfs_safedisconnect(struct nfsmount *nmp)
  555 {
  556         struct nfsreq dummyreq;
  557 
  558         bzero(&dummyreq, sizeof(dummyreq));
  559         dummyreq.r_nmp = nmp;
  560         nfs_disconnect(nmp);
  561 }
  562 
  563 /*
  564  * This is the nfs send routine. For connection based socket types, it
  565  * must be called with an nfs_sndlock() on the socket.
  566  * - return EINTR if the RPC is terminated, 0 otherwise
  567  * - set R_MUSTRESEND if the send fails for any reason
  568  * - do any cleanup required by recoverable socket errors (?)
  569  */
  570 int
  571 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
  572     struct nfsreq *rep)
  573 {
  574         struct sockaddr *sendnam;
  575         int error, error2, soflags, flags;
  576 
  577         NET_ASSERT_GIANT();
  578 
  579         KASSERT(rep, ("nfs_send: called with rep == NULL"));
  580 
  581         error = nfs_sigintr(rep->r_nmp, rep, rep->r_td);
  582         if (error) {
  583                 m_freem(top);
  584                 return (error);
  585         }
  586         if ((so = rep->r_nmp->nm_so) == NULL) {
  587                 rep->r_flags |= R_MUSTRESEND;
  588                 m_freem(top);
  589                 return (0);
  590         }
  591         rep->r_flags &= ~R_MUSTRESEND;
  592         soflags = rep->r_nmp->nm_soflags;
  593 
  594         if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
  595                 sendnam = NULL;
  596         else
  597                 sendnam = nam;
  598         if (so->so_type == SOCK_SEQPACKET)
  599                 flags = MSG_EOR;
  600         else
  601                 flags = 0;
  602 
  603         error = so->so_proto->pr_usrreqs->pru_sosend(so, sendnam, 0, top, 0,
  604                                                      flags, curthread /*XXX*/);
  605         if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
  606                 error = 0;
  607                 rep->r_flags |= R_MUSTRESEND;
  608         }
  609 
  610         if (error) {
  611                 /*
  612                  * Don't report EPIPE errors on nfs sockets.
  613                  * These can be due to idle tcp mounts which will be closed by
  614                  * netapp, solaris, etc. if left idle too long.
  615                  */
  616                 if (error != EPIPE) {
  617                         log(LOG_INFO, "nfs send error %d for server %s\n",
  618                             error,
  619                             rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
  620                 }
  621                 /*
  622                  * Deal with errors for the client side.
  623                  */
  624                 error2 = NFS_SIGREP(rep);
  625                 if (error2)
  626                         error = error2;
  627                 else
  628                         rep->r_flags |= R_MUSTRESEND;
  629 
  630                 /*
  631                  * Handle any recoverable (soft) socket errors here. (?)
  632                  * Make EWOULDBLOCK a recoverable error, we'll rexmit from nfs_timer().
  633                  */
  634                 if (error != EINTR && error != ERESTART && error != EIO && error != EPIPE)
  635                         error = 0;
  636         }
  637         return (error);
  638 }
  639 
  640 int
  641 nfs_reply(struct nfsreq *rep)
  642 {
  643         register struct socket *so;
  644         register struct mbuf *m;
  645         int error = 0, sotype, slpflag;
  646 
  647         NET_ASSERT_GIANT();
  648 
  649         sotype = rep->r_nmp->nm_sotype;
  650         /*
  651          * For reliable protocols, lock against other senders/receivers
  652          * in case a reconnect is necessary.
  653          */
  654         if (sotype != SOCK_DGRAM) {
  655                 error = nfs_sndlock(rep);
  656                 if (error)
  657                         return (error);
  658 tryagain:
  659                 if (rep->r_mrep) {
  660                         nfs_sndunlock(rep);
  661                         return (0);
  662                 }
  663                 if (rep->r_flags & R_SOFTTERM) {
  664                         nfs_sndunlock(rep);
  665                         return (EINTR);
  666                 }
  667                 so = rep->r_nmp->nm_so;
  668                 mtx_lock(&rep->r_nmp->nm_nfstcpstate.mtx);
  669                 if (!so || 
  670                     (rep->r_nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT)) {
  671                         mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
  672                         error = nfs_reconnect(rep);
  673                         if (error) {
  674                                 nfs_sndunlock(rep);
  675                                 return (error);
  676                         }
  677                         goto tryagain;
  678                 } else
  679                         mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
  680                 while (rep->r_flags & R_MUSTRESEND) {
  681                         m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT);
  682                         nfsstats.rpcretries++;
  683                         error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
  684                         if (error) {
  685                                 if (error == EINTR || error == ERESTART ||
  686                                     (error = nfs_reconnect(rep)) != 0) {
  687                                         nfs_sndunlock(rep);
  688                                         return (error);
  689                                 }
  690                                 goto tryagain;
  691                         }
  692                 }
  693                 nfs_sndunlock(rep);
  694         }
  695         slpflag = 0;
  696         if (rep->r_nmp->nm_flag & NFSMNT_INT)
  697                 slpflag = PCATCH;
  698         mtx_lock(&nfs_reply_mtx);
  699         while ((rep->r_mrep == NULL) && (error == 0) && 
  700                ((rep->r_flags & R_SOFTTERM) == 0) &&
  701                ((sotype == SOCK_DGRAM) || ((rep->r_flags & R_MUSTRESEND) == 0)))
  702                 error = msleep((caddr_t)rep, &nfs_reply_mtx, 
  703                                slpflag | (PZERO - 1), "nfsreq", 0);
  704         mtx_unlock(&nfs_reply_mtx);
  705         if (error == EINTR || error == ERESTART)
  706                 /* NFS operations aren't restartable. Map ERESTART to EINTR */
  707                 return (EINTR);
  708         if (rep->r_flags & R_SOFTTERM)
  709                 /* Request was terminated because we exceeded the retries (soft mount) */
  710                 return (ETIMEDOUT);
  711         if (sotype == SOCK_STREAM) {
  712                 mtx_lock(&rep->r_nmp->nm_nfstcpstate.mtx);
  713                 if (((rep->r_nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) || 
  714                      (rep->r_flags & R_MUSTRESEND))) {
  715                         mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);    
  716                         error = nfs_sndlock(rep);
  717                         if (error)
  718                                 return (error);
  719                         goto tryagain;
  720                 } else
  721                         mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
  722         }
  723         return (error);
  724 }
  725 
  726 /*
  727  * XXX TO DO
  728  * Make nfs_realign() non-blocking. Also make nfsm_dissect() nonblocking.
  729  */
  730 static void
  731 nfs_clnt_match_xid(struct socket *so, 
  732                    struct nfsmount *nmp, 
  733                    struct mbuf *mrep)
  734 {
  735         struct mbuf *md;
  736         caddr_t dpos;
  737         u_int32_t rxid, *tl;
  738         struct nfsreq *rep;
  739         int error;
  740         
  741         /*
  742          * Search for any mbufs that are not a multiple of 4 bytes long
  743          * or with m_data not longword aligned.
  744          * These could cause pointer alignment problems, so copy them to
  745          * well aligned mbufs.
  746          */
  747         if (nfs_realign(&mrep, 5 * NFSX_UNSIGNED) == ENOMEM) {
  748                 m_freem(mrep);
  749                 nfsstats.rpcinvalid++;
  750                 return;
  751         }
  752         
  753         /*
  754          * Get the xid and check that it is an rpc reply
  755          */
  756         md = mrep;
  757         dpos = mtod(md, caddr_t);
  758         tl = nfsm_dissect_nonblock(u_int32_t *, 2*NFSX_UNSIGNED);
  759         rxid = *tl++;
  760         if (*tl != rpc_reply) {
  761                 m_freem(mrep);
  762 nfsmout:
  763                 nfsstats.rpcinvalid++;
  764                 return;
  765         }
  766 
  767         mtx_lock(&nfs_reqq_mtx);
  768         /*
  769          * Loop through the request list to match up the reply
  770          * Iff no match, just drop the datagram
  771          */
  772         TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
  773                 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
  774                         /* Found it.. */
  775                         rep->r_mrep = mrep;
  776                         rep->r_md = md;
  777                         rep->r_dpos = dpos;
  778                         /*
  779                          * Update congestion window.
  780                          * Do the additive increase of
  781                          * one rpc/rtt.
  782                          */
  783                         if (nmp->nm_cwnd <= nmp->nm_sent) {
  784                                 nmp->nm_cwnd +=
  785                                         (NFS_CWNDSCALE * NFS_CWNDSCALE +
  786                                          (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
  787                                 if (nmp->nm_cwnd > NFS_MAXCWND)
  788                                         nmp->nm_cwnd = NFS_MAXCWND;
  789                         }       
  790                         if (rep->r_flags & R_SENT) {
  791                                 rep->r_flags &= ~R_SENT;
  792                                 nmp->nm_sent -= NFS_CWNDSCALE;
  793                         }
  794                         if (rep->r_flags & R_TIMING)
  795                                 nfs_update_rtt(rep);
  796                         nmp->nm_timeouts = 0;
  797                         break;
  798                 }
  799         }
  800         /*
  801          * If not matched to a request, drop it.
  802          * If it's mine, wake up requestor.
  803          */
  804         if (rep == 0) {
  805                 nfsstats.rpcunexpected++;
  806                 m_freem(mrep);
  807         } else
  808                 wakeup_nfsreq(rep);
  809         mtx_unlock(&nfs_reqq_mtx);
  810 }
  811 
  812 /* 
  813  * The wakeup of the requestor should be done under the mutex
  814  * to avoid potential missed wakeups.
  815  */
  816 static void 
  817 wakeup_nfsreq(struct nfsreq *req)
  818 {
  819         mtx_lock(&nfs_reply_mtx);
  820         wakeup((caddr_t)req);
  821         mtx_unlock(&nfs_reply_mtx);     
  822 }
  823 
  824 static void
  825 nfs_mark_for_reconnect(struct nfsmount *nmp)
  826 {
  827         struct nfsreq *rp;
  828 
  829         mtx_lock(&nmp->nm_nfstcpstate.mtx);
  830         nmp->nm_nfstcpstate.flags |= NFS_TCP_FORCE_RECONNECT;
  831         mtx_unlock(&nmp->nm_nfstcpstate.mtx);
  832         /* 
  833          * Wakeup all processes that are waiting for replies 
  834          * on this mount point. One of them does the reconnect.
  835          */
  836         mtx_lock(&nfs_reqq_mtx);
  837         TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
  838                 if (rp->r_nmp == nmp) {
  839                         rp->r_flags |= R_MUSTRESEND;
  840                         wakeup_nfsreq(rp);
  841                 }
  842         }
  843         mtx_unlock(&nfs_reqq_mtx);
  844 }
  845 
  846 static int
  847 nfstcp_readable(struct socket *so, int bytes)
  848 {
  849         int retval;
  850         
  851         SOCKBUF_LOCK(&so->so_rcv);
  852         retval = (so->so_rcv.sb_cc >= (bytes) ||
  853                   (so->so_rcv.sb_state & SBS_CANTRCVMORE) ||
  854                   so->so_error);
  855         SOCKBUF_UNLOCK(&so->so_rcv);
  856         return (retval);
  857 }
  858 
  859 #define nfstcp_marker_readable(so)      nfstcp_readable(so, sizeof(u_int32_t))
  860 
  861 static int
  862 nfs_copy_len(struct mbuf *mp, char *buf, int len)
  863 {
  864         while (len > 0 && mp != NULL) {
  865                 int copylen = min(len, mp->m_len);
  866                 
  867                 bcopy(mp->m_data, buf, copylen);
  868                 buf += copylen;
  869                 len -= copylen;
  870                 mp = mp->m_next;
  871         }
  872         return (len);
  873 }
  874 
  875 static void
  876 nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag)
  877 {
  878         struct nfsmount *nmp = (struct nfsmount *)arg;
  879         struct mbuf *mp = NULL;
  880         struct uio auio;
  881         int error;
  882         u_int32_t len;
  883         int rcvflg;
  884 
  885         /*
  886          * Don't pick any more data from the socket if we've marked the 
  887          * mountpoint for reconnect.
  888          */
  889         mtx_lock(&nmp->nm_nfstcpstate.mtx);
  890         if (nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) {
  891                 mtx_unlock(&nmp->nm_nfstcpstate.mtx);           
  892                 return;
  893         } else                  
  894                 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
  895         auio.uio_td = curthread;
  896         auio.uio_segflg = UIO_SYSSPACE;
  897         auio.uio_rw = UIO_READ;
  898         for ( ; ; ) {
  899                 if (nmp->nm_nfstcpstate.flags & NFS_TCP_EXPECT_RPCMARKER) {
  900                         int resid;
  901 
  902                         if (!nfstcp_marker_readable(so)) {
  903                                 /* Marker is not readable */
  904                                 return;
  905                         }
  906                         auio.uio_resid = sizeof(u_int32_t);
  907                         auio.uio_iov = NULL;
  908                         auio.uio_iovcnt = 0;
  909                         mp = NULL;
  910                         rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
  911                         error =  so->so_proto->pr_usrreqs->pru_soreceive
  912                                 (so, (struct sockaddr **)0,
  913                                  &auio, &mp, (struct mbuf **)0, &rcvflg);
  914                         /*
  915                          * We've already tested that the socket is readable. 2 cases 
  916                          * here, we either read 0 bytes (client closed connection), 
  917                          * or got some other error. In both cases, we tear down the 
  918                          * connection.
  919                          */
  920                         if (error || auio.uio_resid > 0) {
  921                                 if (error && error != ECONNRESET) {
  922                                         log(LOG_ERR, 
  923                                             "nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
  924                                             error);
  925                                 }
  926                                 goto mark_reconnect;
  927                         }
  928                         if (mp == NULL)
  929                                 panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
  930                         /*
  931                          * Sigh. We can't do the obvious thing here (which would
  932                          * be to have soreceive copy the length from mbufs for us).
  933                          * Calling uiomove() from the context of a socket callback
  934                          * (even for kernel-kernel copies) leads to LORs (since
  935                          * we hold network locks at this point).
  936                          */
  937                         if ((resid = nfs_copy_len(mp, (char *)&len, 
  938                                                   sizeof(u_int32_t)))) {
  939                                 log(LOG_ERR, "%s (%d) from nfs server %s\n",
  940                                     "Bad RPC HDR length",
  941                                     (int)(sizeof(u_int32_t) - resid),
  942                                     nmp->nm_mountp->mnt_stat.f_mntfromname);
  943                                 goto mark_reconnect;
  944                         }                               
  945                         len = ntohl(len) & ~0x80000000;
  946                         m_freem(mp);
  947                         /*
  948                          * This is SERIOUS! We are out of sync with the sender
  949                          * and forcing a disconnect/reconnect is all I can do.
  950                          */
  951                         if (len > NFS_MAXPACKET || len == 0) {
  952                                 log(LOG_ERR, "%s (%d) from nfs server %s\n",
  953                                     "impossible packet length",
  954                                     len,
  955                                     nmp->nm_mountp->mnt_stat.f_mntfromname);
  956                                 goto mark_reconnect;
  957                         }
  958                         nmp->nm_nfstcpstate.rpcresid = len;
  959                         nmp->nm_nfstcpstate.flags &= ~(NFS_TCP_EXPECT_RPCMARKER);
  960                 }
  961                 /* 
  962                  * Processed RPC marker or no RPC marker to process. 
  963                  * Pull in and process data.
  964                  */
  965                 if (nmp->nm_nfstcpstate.rpcresid > 0) {
  966                         if (!nfstcp_readable(so, nmp->nm_nfstcpstate.rpcresid)) {
  967                                 /* All data not readable */
  968                                 return;
  969                         }
  970                         auio.uio_resid = nmp->nm_nfstcpstate.rpcresid;
  971                         auio.uio_iov = NULL;
  972                         auio.uio_iovcnt = 0;
  973                         mp = NULL;
  974                         rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
  975                         error =  so->so_proto->pr_usrreqs->pru_soreceive
  976                                 (so, (struct sockaddr **)0,
  977                                  &auio, &mp, (struct mbuf **)0, &rcvflg);
  978                         if (error || auio.uio_resid > 0) {
  979                                 if (error && error != ECONNRESET) {
  980                                         log(LOG_ERR, 
  981                                             "nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
  982                                             error);
  983                                 }
  984                                 goto mark_reconnect;                            
  985                         }
  986                         if (mp == NULL)
  987                                 panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
  988                         nmp->nm_nfstcpstate.rpcresid = 0;
  989                         nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
  990                         /* We got the entire RPC reply. Match XIDs and wake up requestor */
  991                         nfs_clnt_match_xid(so, nmp, mp);
  992                 }
  993         }
  994 
  995 mark_reconnect:
  996         nfs_mark_for_reconnect(nmp);
  997 }
  998 
  999 static void
 1000 nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag)
 1001 {
 1002         struct nfsmount *nmp = (struct nfsmount *)arg;
 1003         struct uio auio;
 1004         struct mbuf *mp = NULL;
 1005         struct mbuf *control = NULL;
 1006         int error, rcvflag;
 1007 
 1008         auio.uio_resid = 1000000;
 1009         auio.uio_td = curthread;
 1010         rcvflag = MSG_DONTWAIT;
 1011         auio.uio_resid = 1000000000;
 1012         do {
 1013                 mp = control = NULL;
 1014                 error = so->so_proto->pr_usrreqs->pru_soreceive(so,
 1015                                         NULL, &auio, &mp,
 1016                                         &control, &rcvflag);
 1017                 if (control)
 1018                         m_freem(control);
 1019                 if (mp)
 1020                         nfs_clnt_match_xid(so, nmp, mp);
 1021         } while (mp && !error);
 1022 }
 1023 
 1024 /*
 1025  * nfs_request - goes something like this
 1026  *      - fill in request struct
 1027  *      - links it into list
 1028  *      - calls nfs_send() for first transmit
 1029  *      - calls nfs_receive() to get reply
 1030  *      - break down rpc header and return with nfs reply pointed to
 1031  *        by mrep or error
 1032  * nb: always frees up mreq mbuf list
 1033  */
 1034 int
 1035 nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
 1036     struct thread *td, struct ucred *cred, struct mbuf **mrp,
 1037     struct mbuf **mdp, caddr_t *dposp)
 1038 {
 1039         struct mbuf *mrep, *m2;
 1040         struct nfsreq *rep;
 1041         u_int32_t *tl;
 1042         int i;
 1043         struct nfsmount *nmp;
 1044         struct mbuf *m, *md, *mheadend;
 1045         time_t waituntil;
 1046         caddr_t dpos;
 1047         int s, error = 0, mrest_len, auth_len, auth_type;
 1048         struct timeval now;
 1049         u_int32_t *xidp;
 1050 
 1051         /* Reject requests while attempting a forced unmount. */
 1052         if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
 1053                 m_freem(mrest);
 1054                 return (ESTALE);
 1055         }
 1056         nmp = VFSTONFS(vp->v_mount);
 1057         if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
 1058                 return nfs4_request(vp, mrest, procnum, td, cred, mrp, mdp, dposp);
 1059         MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
 1060         rep->r_mrep = rep->r_md = NULL;
 1061         rep->r_nmp = nmp;
 1062         rep->r_vp = vp;
 1063         rep->r_td = td;
 1064         rep->r_procnum = procnum;
 1065 
 1066         getmicrouptime(&now);
 1067         rep->r_lastmsg = now.tv_sec -
 1068             ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
 1069         mrest_len = m_length(mrest, NULL);
 1070 
 1071         /*
 1072          * Get the RPC header with authorization.
 1073          */
 1074         auth_type = RPCAUTH_UNIX;
 1075         if (cred->cr_ngroups < 1)
 1076                 panic("nfsreq nogrps");
 1077         auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
 1078                 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
 1079                 5 * NFSX_UNSIGNED;
 1080         m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len,
 1081              mrest, mrest_len, &mheadend, &xidp);
 1082 
 1083         /*
 1084          * For stream protocols, insert a Sun RPC Record Mark.
 1085          */
 1086         if (nmp->nm_sotype == SOCK_STREAM) {
 1087                 M_PREPEND(m, NFSX_UNSIGNED, M_TRYWAIT);
 1088                 *mtod(m, u_int32_t *) = htonl(0x80000000 |
 1089                          (m->m_pkthdr.len - NFSX_UNSIGNED));
 1090         }
 1091         rep->r_mreq = m;
 1092         rep->r_xid = *xidp;
 1093 tryagain:
 1094         if (nmp->nm_flag & NFSMNT_SOFT)
 1095                 rep->r_retry = nmp->nm_retry;
 1096         else
 1097                 rep->r_retry = NFS_MAXREXMIT + 1;       /* past clip limit */
 1098         rep->r_rtt = rep->r_rexmit = 0;
 1099         if (nfs_rto_timer(procnum) != NFS_DEFAULT_TIMER)
 1100                 rep->r_flags = R_TIMING;
 1101         else
 1102                 rep->r_flags = 0;
 1103         rep->r_mrep = NULL;
 1104 
 1105         /*
 1106          * Do the client side RPC.
 1107          */
 1108         nfsstats.rpcrequests++;
 1109         /*
 1110          * Chain request into list of outstanding requests. Be sure
 1111          * to put it LAST so timer finds oldest requests first.
 1112          */
 1113         s = splsoftclock();
 1114         mtx_lock(&nfs_reqq_mtx);
 1115         if (TAILQ_EMPTY(&nfs_reqq))
 1116                 callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
 1117         TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain);
 1118         mtx_unlock(&nfs_reqq_mtx);
 1119 
 1120         /*
 1121          * If backing off another request or avoiding congestion, don't
 1122          * send this one now but let timer do it. If not timing a request,
 1123          * do it now.
 1124          */
 1125         if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM ||
 1126                 (nmp->nm_flag & NFSMNT_DUMBTIMR) ||
 1127                 nmp->nm_sent < nmp->nm_cwnd)) {
 1128                 splx(s);
 1129                 error = nfs_sndlock(rep);
 1130                 if (!error) {
 1131                         m2 = m_copym(m, 0, M_COPYALL, M_TRYWAIT);
 1132                         error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
 1133                         nfs_sndunlock(rep);
 1134                 }
 1135                 mtx_lock(&nfs_reqq_mtx);
 1136                 /* 
 1137                  * nfs_timer() could've re-transmitted the request if we ended up
 1138                  * blocking on nfs_send() too long, so check for R_SENT here.
 1139                  */
 1140                 if (!error && (rep->r_flags & (R_SENT | R_MUSTRESEND)) == 0) {
 1141                         nmp->nm_sent += NFS_CWNDSCALE;
 1142                         rep->r_flags |= R_SENT;
 1143                 }
 1144                 mtx_unlock(&nfs_reqq_mtx);
 1145         } else {
 1146                 splx(s);
 1147                 rep->r_rtt = -1;
 1148         }
 1149 
 1150         /*
 1151          * Wait for the reply from our send or the timer's.
 1152          */
 1153         if (!error || error == EPIPE)
 1154                 error = nfs_reply(rep);
 1155 
 1156         /*
 1157          * RPC done, unlink the request.
 1158          */
 1159         s = splsoftclock();
 1160         mtx_lock(&nfs_reqq_mtx);
 1161         TAILQ_REMOVE(&nfs_reqq, rep, r_chain);
 1162         if (TAILQ_EMPTY(&nfs_reqq))
 1163                 callout_stop(&nfs_callout);
 1164         /*
 1165          * Decrement the outstanding request count.
 1166          */
 1167         if (rep->r_flags & R_SENT) {
 1168                 rep->r_flags &= ~R_SENT;        /* paranoia */
 1169                 nmp->nm_sent -= NFS_CWNDSCALE;
 1170         }
 1171         mtx_unlock(&nfs_reqq_mtx);
 1172         splx(s);
 1173 
 1174         /*
 1175          * If there was a successful reply and a tprintf msg.
 1176          * tprintf a response.
 1177          */
 1178         if (!error) {
 1179                 mtx_lock(&Giant);
 1180                 nfs_up(rep, nmp, rep->r_td, "is alive again", NFSSTA_TIMEO);
 1181                 mtx_unlock(&Giant);
 1182         }
 1183         mrep = rep->r_mrep;
 1184         md = rep->r_md;
 1185         dpos = rep->r_dpos;
 1186         if (error) {
 1187                 /*
 1188                  * If we got interrupted by a signal in nfs_reply(), there's
 1189                  * a very small window where the reply could've come in before
 1190                  * this process got scheduled in. To handle that case, we need 
 1191                  * to free the reply if it was delivered.
 1192                  */
 1193                 if (rep->r_mrep != NULL)
 1194                         m_freem(rep->r_mrep);
 1195                 m_freem(rep->r_mreq);
 1196                 free((caddr_t)rep, M_NFSREQ);
 1197                 return (error);
 1198         }
 1199 
 1200         if (rep->r_mrep == NULL)
 1201                 panic("nfs_request: rep->r_mrep shouldn't be NULL if no error\n");
 1202 
 1203         /*
 1204          * break down the rpc header and check if ok
 1205          */
 1206         tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 1207         if (*tl++ == rpc_msgdenied) {
 1208                 if (*tl == rpc_mismatch)
 1209                         error = EOPNOTSUPP;
 1210                 else
 1211                         error = EACCES;
 1212                 m_freem(mrep);
 1213                 m_freem(rep->r_mreq);
 1214                 free((caddr_t)rep, M_NFSREQ);
 1215                 return (error);
 1216         }
 1217 
 1218         /*
 1219          * Just throw away any verifyer (ie: kerberos etc).
 1220          */
 1221         i = fxdr_unsigned(int, *tl++);          /* verf type */
 1222         i = fxdr_unsigned(int32_t, *tl);        /* len */
 1223         if (i > 0)
 1224                 nfsm_adv(nfsm_rndup(i));
 1225         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 1226         /* 0 == ok */
 1227         if (*tl == 0) {
 1228                 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 1229                 if (*tl != 0) {
 1230                         error = fxdr_unsigned(int, *tl);
 1231                         if ((nmp->nm_flag & NFSMNT_NFSV3) &&
 1232                                 error == NFSERR_TRYLATER) {
 1233                                 m_freem(mrep);
 1234                                 error = 0;
 1235                                 waituntil = time_second + nfs3_jukebox_delay;
 1236                                 while (time_second < waituntil)
 1237                                         (void) tsleep(&lbolt,
 1238                                                 PSOCK, "nqnfstry", 0);
 1239                                 rep->r_xid = *xidp = txdr_unsigned(nfs_xid_gen());
 1240                                 goto tryagain;
 1241                         }
 1242 
 1243                         /*
 1244                          * If the File Handle was stale, invalidate the
 1245                          * lookup cache, just in case.
 1246                          */
 1247                         if (error == ESTALE)
 1248                                 cache_purge(vp);
 1249                         /*
 1250                          * Skip wcc data on NFS errors for now. NetApp filers return corrupt
 1251                          * postop attrs in the wcc data for NFS err EROFS. Not sure if they 
 1252                          * could return corrupt postop attrs for others errors.
 1253                          */
 1254                         if ((nmp->nm_flag & NFSMNT_NFSV3) && !nfs_skip_wcc_data_onerr) {
 1255                                 *mrp = mrep;
 1256                                 *mdp = md;
 1257                                 *dposp = dpos;
 1258                                 error |= NFSERR_RETERR;
 1259                         } else
 1260                                 m_freem(mrep);
 1261                         m_freem(rep->r_mreq);
 1262                         free((caddr_t)rep, M_NFSREQ);
 1263                         return (error);
 1264                 }
 1265 
 1266                 *mrp = mrep;
 1267                 *mdp = md;
 1268                 *dposp = dpos;
 1269                 m_freem(rep->r_mreq);
 1270                 FREE((caddr_t)rep, M_NFSREQ);
 1271                 return (0);
 1272         }
 1273         m_freem(mrep);
 1274         error = EPROTONOSUPPORT;
 1275 nfsmout:
 1276         m_freem(rep->r_mreq);
 1277         free((caddr_t)rep, M_NFSREQ);
 1278         return (error);
 1279 }
 1280 
 1281 /*
 1282  * Nfs timer routine
 1283  * Scan the nfsreq list and retranmit any requests that have timed out
 1284  * To avoid retransmission attempts on STREAM sockets (in the future) make
 1285  * sure to set the r_retry field to 0 (implies nm_retry == 0).
 1286  * 
 1287  * XXX - 
 1288  * For now, since we don't register MPSAFE callouts for the NFS client -
 1289  * softclock() acquires Giant before calling us. That prevents req entries
 1290  * from being removed from the list (from nfs_request()). But we still 
 1291  * acquire the nfs reqq mutex to make sure the state of individual req
 1292  * entries is not modified from RPC reply handling (from socket callback)
 1293  * while nfs_timer is walking the list of reqs.
 1294  * The nfs reqq lock cannot be held while we do the pru_send() because of a
 1295  * lock ordering violation. The NFS client socket callback acquires 
 1296  * inp_lock->nfsreq mutex and pru_send acquires inp_lock. So we drop the 
 1297  * reqq mutex (and reacquire it after the pru_send()). This won't work
 1298  * when we move to fine grained locking for NFS. When we get to that point, 
 1299  * a rewrite of nfs_timer() will be needed.
 1300  */
 1301 void
 1302 nfs_timer(void *arg)
 1303 {
 1304         struct nfsreq *rep;
 1305         struct mbuf *m;
 1306         struct socket *so;
 1307         struct nfsmount *nmp;
 1308         int timeo;
 1309         int s, error;
 1310         struct timeval now;
 1311 
 1312         getmicrouptime(&now);
 1313         s = splnet();
 1314         mtx_lock(&Giant);       /* nfs_down -> tprintf */
 1315         mtx_lock(&nfs_reqq_mtx);
 1316         TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
 1317                 nmp = rep->r_nmp;
 1318                 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
 1319                         continue;
 1320                 if (nfs_sigintr(nmp, rep, rep->r_td))
 1321                         continue;
 1322                 if (nmp->nm_tprintf_initial_delay != 0 &&
 1323                     (rep->r_rexmit > 2 || (rep->r_flags & R_RESENDERR)) &&
 1324                     rep->r_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) {
 1325                         rep->r_lastmsg = now.tv_sec;
 1326                         nfs_down(rep, nmp, rep->r_td, "not responding",
 1327                             0, NFSSTA_TIMEO);
 1328 #if 0
 1329                         if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
 1330                                 /* we're not yet completely mounted and */
 1331                                 /* we can't complete an RPC, so we fail */
 1332                                 nfsstats.rpctimeouts++;
 1333                                 nfs_softterm(rep);
 1334                                 continue;
 1335                         }
 1336 #endif
 1337                 }
 1338                 if (rep->r_rtt >= 0) {
 1339                         rep->r_rtt++;
 1340                         if (nmp->nm_flag & NFSMNT_DUMBTIMR)
 1341                                 timeo = nmp->nm_timeo;
 1342                         else
 1343                                 timeo = nfs_estimate_rto(nmp, rep->r_procnum);
 1344                         if (nmp->nm_timeouts > 0)
 1345                                 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
 1346                         if (rep->r_rtt <= timeo)
 1347                                 continue;
 1348                         if (nmp->nm_timeouts < NFS_NBACKOFF)
 1349                                 nmp->nm_timeouts++;
 1350                 }
 1351                 if (rep->r_rexmit >= rep->r_retry) {    /* too many */
 1352                         nfsstats.rpctimeouts++;
 1353                         nfs_softterm(rep);
 1354                         continue;
 1355                 }
 1356                 if (nmp->nm_sotype != SOCK_DGRAM) {
 1357                         if (++rep->r_rexmit > NFS_MAXREXMIT)
 1358                                 rep->r_rexmit = NFS_MAXREXMIT;
 1359                         /*
 1360                          * For NFS/TCP, setting R_MUSTRESEND and waking up 
 1361                          * the requester will cause the request to be   
 1362                          * retransmitted (in nfs_reply()), re-connecting
 1363                          * if necessary.
 1364                          */
 1365                         rep->r_flags |= R_MUSTRESEND;
 1366                         wakeup_nfsreq(rep);
 1367                         rep->r_rtt = 0;
 1368                         continue;
 1369                 }
 1370                 if ((so = nmp->nm_so) == NULL)
 1371                         continue;
 1372                 /*
 1373                  * If there is enough space and the window allows..
 1374                  *      Resend it
 1375                  * Set r_rtt to -1 in case we fail to send it now.
 1376                  */
 1377                 rep->r_rtt = -1;
 1378                 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
 1379                    ((nmp->nm_flag & NFSMNT_DUMBTIMR) ||
 1380                     (rep->r_flags & R_SENT) ||
 1381                     nmp->nm_sent < nmp->nm_cwnd) &&
 1382                    (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){
 1383                         mtx_unlock(&nfs_reqq_mtx);
 1384                         if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
 1385                             error = (*so->so_proto->pr_usrreqs->pru_send)
 1386                                     (so, 0, m, NULL, NULL, curthread);
 1387                         else
 1388                             error = (*so->so_proto->pr_usrreqs->pru_send)
 1389                                     (so, 0, m, nmp->nm_nam, NULL, curthread);
 1390                         mtx_lock(&nfs_reqq_mtx);
 1391                         if (error) {
 1392                                 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
 1393                                         so->so_error = 0;
 1394                                 rep->r_flags |= R_RESENDERR;
 1395                         } else {
 1396                                 /*
 1397                                  * Iff first send, start timing
 1398                                  * else turn timing off, backoff timer
 1399                                  * and divide congestion window by 2.
 1400                                  */
 1401                                 rep->r_flags &= ~R_RESENDERR;
 1402                                 if (rep->r_flags & R_SENT) {
 1403                                         rep->r_flags &= ~R_TIMING;
 1404                                         if (++rep->r_rexmit > NFS_MAXREXMIT)
 1405                                                 rep->r_rexmit = NFS_MAXREXMIT;
 1406                                         nmp->nm_cwnd >>= 1;
 1407                                         if (nmp->nm_cwnd < NFS_CWNDSCALE)
 1408                                                 nmp->nm_cwnd = NFS_CWNDSCALE;
 1409                                         nfsstats.rpcretries++;
 1410                                 } else {
 1411                                         rep->r_flags |= R_SENT;
 1412                                         nmp->nm_sent += NFS_CWNDSCALE;
 1413                                 }
 1414                                 rep->r_rtt = 0;
 1415                         }
 1416                 }
 1417         }
 1418         mtx_unlock(&nfs_reqq_mtx);
 1419         mtx_unlock(&Giant);     /* nfs_down -> tprintf */
 1420         splx(s);
 1421         callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
 1422 }
 1423 
 1424 /*
 1425  * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
 1426  * wait for all requests to complete. This is used by forced unmounts
 1427  * to terminate any outstanding RPCs.
 1428  */
 1429 int
 1430 nfs_nmcancelreqs(nmp)
 1431         struct nfsmount *nmp;
 1432 {
 1433         struct nfsreq *req;
 1434         int i, s;
 1435 
 1436         s = splnet();
 1437         mtx_lock(&nfs_reqq_mtx);
 1438         TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
 1439                 if (nmp != req->r_nmp || req->r_mrep != NULL ||
 1440                     (req->r_flags & R_SOFTTERM))
 1441                         continue;
 1442                 nfs_softterm(req);
 1443         }
 1444         mtx_unlock(&nfs_reqq_mtx);
 1445         splx(s);
 1446 
 1447         for (i = 0; i < 30; i++) {
 1448                 s = splnet();
 1449                 mtx_lock(&nfs_reqq_mtx);
 1450                 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
 1451                         if (nmp == req->r_nmp)
 1452                                 break;
 1453                 }
 1454                 mtx_unlock(&nfs_reqq_mtx);
 1455                 splx(s);
 1456                 if (req == NULL)
 1457                         return (0);
 1458                 tsleep(&lbolt, PSOCK, "nfscancel", 0);
 1459         }
 1460         return (EBUSY);
 1461 }
 1462 
 1463 /*
 1464  * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT).
 1465  * The nm_send count is decremented now to avoid deadlocks when the process in
 1466  * soreceive() hasn't yet managed to send its own request.
 1467  */
 1468 
 1469 static void
 1470 nfs_softterm(struct nfsreq *rep)
 1471 {
 1472 
 1473         rep->r_flags |= R_SOFTTERM;
 1474         if (rep->r_flags & R_SENT) {
 1475                 rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
 1476                 rep->r_flags &= ~R_SENT;
 1477         }
 1478         /* 
 1479          * Request terminated, wakeup the blocked process, so that we
 1480          * can return EINTR back.
 1481          */
 1482         wakeup_nfsreq(rep);
 1483 }
 1484 
 1485 /*
 1486  * Any signal that can interrupt an NFS operation in an intr mount
 1487  * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
 1488  */
 1489 int nfs_sig_set[] = {
 1490         SIGINT,
 1491         SIGTERM,
 1492         SIGHUP,
 1493         SIGKILL,
 1494         SIGSTOP,
 1495         SIGQUIT
 1496 };
 1497 
 1498 /*
 1499  * Check to see if one of the signals in our subset is pending on
 1500  * the process (in an intr mount).
 1501  */
 1502 static int
 1503 nfs_sig_pending(sigset_t set)
 1504 {
 1505         int i;
 1506         
 1507         for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++)
 1508                 if (SIGISMEMBER(set, nfs_sig_set[i]))
 1509                         return (1);
 1510         return (0);
 1511 }
 1512  
 1513 /*
 1514  * The set/restore sigmask functions are used to (temporarily) overwrite
 1515  * the process p_sigmask during an RPC call (for example). These are also
 1516  * used in other places in the NFS client that might tsleep().
 1517  */
 1518 void
 1519 nfs_set_sigmask(struct thread *td, sigset_t *oldset)
 1520 {
 1521         sigset_t newset;
 1522         int i;
 1523         struct proc *p;
 1524         
 1525         SIGFILLSET(newset);
 1526         if (td == NULL)
 1527                 td = curthread; /* XXX */
 1528         p = td->td_proc;
 1529         /* Remove the NFS set of signals from newset */
 1530         PROC_LOCK(p);
 1531         mtx_lock(&p->p_sigacts->ps_mtx);
 1532         for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) {
 1533                 /*
 1534                  * But make sure we leave the ones already masked
 1535                  * by the process, ie. remove the signal from the
 1536                  * temporary signalmask only if it wasn't already
 1537                  * in p_sigmask.
 1538                  */
 1539                 if (!SIGISMEMBER(td->td_sigmask, nfs_sig_set[i]) &&
 1540                     !SIGISMEMBER(p->p_sigacts->ps_sigignore, nfs_sig_set[i]))
 1541                         SIGDELSET(newset, nfs_sig_set[i]);
 1542         }
 1543         mtx_unlock(&p->p_sigacts->ps_mtx);
 1544         PROC_UNLOCK(p);
 1545         kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 0);
 1546 }
 1547 
 1548 void
 1549 nfs_restore_sigmask(struct thread *td, sigset_t *set)
 1550 {
 1551         if (td == NULL)
 1552                 td = curthread; /* XXX */
 1553         kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
 1554 }
 1555 
 1556 /*
 1557  * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
 1558  * old one after msleep() returns.
 1559  */
 1560 int
 1561 nfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
 1562 {
 1563         sigset_t oldset;
 1564         int error;
 1565         struct proc *p;
 1566         
 1567         if ((priority & PCATCH) == 0)
 1568                 return msleep(ident, mtx, priority, wmesg, timo);
 1569         if (td == NULL)
 1570                 td = curthread; /* XXX */
 1571         nfs_set_sigmask(td, &oldset);
 1572         error = msleep(ident, mtx, priority, wmesg, timo);
 1573         nfs_restore_sigmask(td, &oldset);
 1574         p = td->td_proc;
 1575         return (error);
 1576 }
 1577 
 1578 /*
 1579  * NFS wrapper to tsleep(), that shoves a new p_sigmask and restores the
 1580  * old one after tsleep() returns.
 1581  */
 1582 int
 1583 nfs_tsleep(struct thread *td, void *ident, int priority, char *wmesg, int timo)
 1584 {
 1585         sigset_t oldset;
 1586         int error;
 1587         struct proc *p;
 1588         
 1589         if ((priority & PCATCH) == 0)
 1590                 return tsleep(ident, priority, wmesg, timo);
 1591         if (td == NULL)
 1592                 td = curthread; /* XXX */
 1593         nfs_set_sigmask(td, &oldset);
 1594         error = tsleep(ident, priority, wmesg, timo);
 1595         nfs_restore_sigmask(td, &oldset);
 1596         p = td->td_proc;
 1597         return (error);
 1598 }
 1599 
 1600 /*
 1601  * Test for a termination condition pending on the process.
 1602  * This is used for NFSMNT_INT mounts.
 1603  */
 1604 int
 1605 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
 1606 {
 1607         struct proc *p;
 1608         sigset_t tmpset;
 1609 
 1610         if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
 1611                 return nfs4_sigintr(nmp, rep, td);
 1612         if (rep && (rep->r_flags & R_SOFTTERM))
 1613                 return (EIO);
 1614         /* Terminate all requests while attempting a forced unmount. */
 1615         if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
 1616                 return (EIO);
 1617         if (!(nmp->nm_flag & NFSMNT_INT))
 1618                 return (0);
 1619         if (td == NULL)
 1620                 return (0);
 1621 
 1622         p = td->td_proc;
 1623         PROC_LOCK(p);
 1624         tmpset = p->p_siglist;
 1625         SIGSETOR(tmpset, td->td_siglist);
 1626         SIGSETNAND(tmpset, td->td_sigmask);
 1627         mtx_lock(&p->p_sigacts->ps_mtx);
 1628         SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
 1629         mtx_unlock(&p->p_sigacts->ps_mtx);
 1630         if ((SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist))
 1631             && nfs_sig_pending(tmpset)) {
 1632                 PROC_UNLOCK(p);
 1633                 return (EINTR);
 1634         }
 1635         PROC_UNLOCK(p);
 1636 
 1637         return (0);
 1638 }
 1639 
 1640 /*
 1641  * Lock a socket against others.
 1642  * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
 1643  * and also to avoid race conditions between the processes with nfs requests
 1644  * in progress when a reconnect is necessary.
 1645  */
 1646 int
 1647 nfs_sndlock(struct nfsreq *rep)
 1648 {
 1649         int *statep = &rep->r_nmp->nm_state;
 1650         struct thread *td;
 1651         int error, slpflag = 0, slptimeo = 0;
 1652 
 1653         td = rep->r_td;
 1654         if (rep->r_nmp->nm_flag & NFSMNT_INT)
 1655                 slpflag = PCATCH;
 1656         while (*statep & NFSSTA_SNDLOCK) {
 1657                 error = nfs_sigintr(rep->r_nmp, rep, td);
 1658                 if (error)
 1659                         return (error);
 1660                 *statep |= NFSSTA_WANTSND;
 1661                 (void) tsleep(statep, slpflag | (PZERO - 1),
 1662                         "nfsndlck", slptimeo);
 1663                 if (slpflag == PCATCH) {
 1664                         slpflag = 0;
 1665                         slptimeo = 2 * hz;
 1666                 }
 1667         }
 1668         *statep |= NFSSTA_SNDLOCK;
 1669         return (0);
 1670 }
 1671 
 1672 /*
 1673  * Unlock the stream socket for others.
 1674  */
 1675 void
 1676 nfs_sndunlock(struct nfsreq *rep)
 1677 {
 1678         int *statep = &rep->r_nmp->nm_state;
 1679 
 1680         if ((*statep & NFSSTA_SNDLOCK) == 0)
 1681                 panic("nfs sndunlock");
 1682         *statep &= ~NFSSTA_SNDLOCK;
 1683         if (*statep & NFSSTA_WANTSND) {
 1684                 *statep &= ~NFSSTA_WANTSND;
 1685                 wakeup(statep);
 1686         }
 1687 }
 1688 
 1689 /*
 1690  *      nfs_realign:
 1691  *
 1692  *      Check for badly aligned mbuf data and realign by copying the unaligned
 1693  *      portion of the data into a new mbuf chain and freeing the portions
 1694  *      of the old chain that were replaced.
 1695  *
 1696  *      We cannot simply realign the data within the existing mbuf chain
 1697  *      because the underlying buffers may contain other rpc commands and
 1698  *      we cannot afford to overwrite them.
 1699  *
 1700  *      We would prefer to avoid this situation entirely.  The situation does
 1701  *      not occur with NFS/UDP and is supposed to only occassionally occur
 1702  *      with TCP.  Use vfs.nfs.realign_count and realign_test to check this.
 1703  *
 1704  */
 1705 static int
 1706 nfs_realign(struct mbuf **pm, int hsiz)
 1707 {
 1708         struct mbuf *m;
 1709         struct mbuf *n = NULL;
 1710         int off = 0;
 1711 
 1712         ++nfs_realign_test;
 1713         while ((m = *pm) != NULL) {
 1714                 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
 1715                         MGET(n, M_DONTWAIT, MT_DATA);
 1716                         if (n == NULL)
 1717                                 return (ENOMEM);
 1718                         if (m->m_len >= MINCLSIZE) {
 1719                                 MCLGET(n, M_DONTWAIT);
 1720                                 if (n->m_ext.ext_buf == NULL) {
 1721                                         m_freem(n);
 1722                                         return (ENOMEM);
 1723                                 }
 1724                         }
 1725                         n->m_len = 0;
 1726                         break;
 1727                 }
 1728                 pm = &m->m_next;
 1729         }
 1730         /*
 1731          * If n is non-NULL, loop on m copying data, then replace the
 1732          * portion of the chain that had to be realigned.
 1733          */
 1734         if (n != NULL) {
 1735                 ++nfs_realign_count;
 1736                 while (m) {
 1737                         m_copyback(n, off, m->m_len, mtod(m, caddr_t));
 1738                         off += m->m_len;
 1739                         m = m->m_next;
 1740                 }
 1741                 m_freem(*pm);
 1742                 *pm = n;
 1743         }
 1744         return (0);
 1745 }
 1746 
 1747 
 1748 static int
 1749 nfs_msg(struct thread *td, const char *server, const char *msg, int error)
 1750 {
 1751         struct proc *p;
 1752 
 1753         GIANT_REQUIRED; /* tprintf */
 1754 
 1755         p = td ? td->td_proc : NULL;
 1756         if (error) {
 1757                 tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n", server,
 1758                     msg, error);
 1759         } else {
 1760                 tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg);
 1761         }
 1762         return (0);
 1763 }
 1764 
 1765 void
 1766 nfs_down(rep, nmp, td, msg, error, flags)
 1767         struct nfsreq *rep;
 1768         struct nfsmount *nmp;
 1769         struct thread *td;
 1770         const char *msg;
 1771         int error, flags;
 1772 {
 1773 
 1774         GIANT_REQUIRED; /* nfs_msg */
 1775 
 1776         if (nmp == NULL)
 1777                 return;
 1778         if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
 1779                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1780                     VQ_NOTRESP, 0);
 1781                 nmp->nm_state |= NFSSTA_TIMEO;
 1782         }
 1783 #ifdef NFSSTA_LOCKTIMEO
 1784         if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
 1785                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1786                     VQ_NOTRESPLOCK, 0);
 1787                 nmp->nm_state |= NFSSTA_LOCKTIMEO;
 1788         }
 1789 #endif
 1790         if (rep)
 1791                 rep->r_flags |= R_TPRINTFMSG;
 1792         nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
 1793 }
 1794 
 1795 void
 1796 nfs_up(rep, nmp, td, msg, flags)
 1797         struct nfsreq *rep;
 1798         struct nfsmount *nmp;
 1799         struct thread *td;
 1800         const char *msg;
 1801         int flags;
 1802 {
 1803 
 1804         GIANT_REQUIRED; /* nfs_msg */
 1805 
 1806         if (nmp == NULL)
 1807                 return;
 1808         if ((rep == NULL) || (rep->r_flags & R_TPRINTFMSG) != 0)
 1809                 nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
 1810         if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
 1811                 nmp->nm_state &= ~NFSSTA_TIMEO;
 1812                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1813                     VQ_NOTRESP, 1);
 1814         }
 1815 #ifdef NFSSTA_LOCKTIMEO
 1816         if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
 1817                 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
 1818                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1819                     VQ_NOTRESPLOCK, 1);
 1820         }
 1821 #endif
 1822 }
 1823 

Cache object: dbc24f754b4f4331df9e07776bd465fc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.