The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/nfsclient/nfs_socket.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989, 1991, 1993, 1995
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * Rick Macklem at The University of Guelph.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)nfs_socket.c        8.5 (Berkeley) 3/30/95
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/6.1/sys/nfsclient/nfs_socket.c 158179 2006-04-30 16:44:43Z cvs2svn $");
   37 
   38 /*
   39  * Socket operations for use by nfs
   40  */
   41 
   42 #include "opt_inet6.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/systm.h>
   46 #include <sys/kernel.h>
   47 #include <sys/lock.h>
   48 #include <sys/malloc.h>
   49 #include <sys/mbuf.h>
   50 #include <sys/mount.h>
   51 #include <sys/mutex.h>
   52 #include <sys/proc.h>
   53 #include <sys/protosw.h>
   54 #include <sys/signalvar.h>
   55 #include <sys/syscallsubr.h>
   56 #include <sys/socket.h>
   57 #include <sys/socketvar.h>
   58 #include <sys/sysctl.h>
   59 #include <sys/syslog.h>
   60 #include <sys/vnode.h>
   61 
   62 #include <netinet/in.h>
   63 #include <netinet/tcp.h>
   64 
   65 #include <rpc/rpcclnt.h>
   66 
   67 #include <nfs/rpcv2.h>
   68 #include <nfs/nfsproto.h>
   69 #include <nfsclient/nfs.h>
   70 #include <nfs/xdr_subs.h>
   71 #include <nfsclient/nfsm_subs.h>
   72 #include <nfsclient/nfsmount.h>
   73 #include <nfsclient/nfsnode.h>
   74 
   75 #include <nfs4client/nfs4.h>
   76 
   77 #define TRUE    1
   78 #define FALSE   0
   79 
   80 extern u_int32_t nfs_xid;
   81 
   82 /*
   83  * Estimate rto for an nfs rpc sent via. an unreliable datagram.
   84  * Use the mean and mean deviation of rtt for the appropriate type of rpc
   85  * for the frequent rpcs and a default for the others.
   86  * The justification for doing "other" this way is that these rpcs
   87  * happen so infrequently that timer est. would probably be stale.
   88  * Also, since many of these rpcs are
   89  * non-idempotent, a conservative timeout is desired.
   90  * getattr, lookup - A+2D
   91  * read, write     - A+4D
   92  * other           - nm_timeo
   93  */
   94 #define NFS_RTO(n, t) \
   95         ((t) == 0 ? (n)->nm_timeo : \
   96          ((t) < 3 ? \
   97           (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
   98           ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
   99 #define NFS_SRTT(r)     (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
  100 #define NFS_SDRTT(r)    (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
  101 
  102 /*
  103  * Defines which timer to use for the procnum.
  104  * 0 - default
  105  * 1 - getattr
  106  * 2 - lookup
  107  * 3 - read
  108  * 4 - write
  109  */
  110 static int proct[NFS_NPROCS] = {
  111         0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0,
  112 };
  113 
  114 static int      nfs_realign_test;
  115 static int      nfs_realign_count;
  116 static int      nfs_bufpackets = 4;
  117 static int      nfs_reconnects;
  118 static int      nfs3_jukebox_delay = 10;
  119 
  120 SYSCTL_DECL(_vfs_nfs);
  121 
  122 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
  123 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
  124 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
  125 SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
  126     "number of times the nfs client has had to reconnect");
  127 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
  128     "number of seconds to delay a retry after receiving EJUKEBOX");
  129 
  130 
  131 /*
  132  * There is a congestion window for outstanding rpcs maintained per mount
  133  * point. The cwnd size is adjusted in roughly the way that:
  134  * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
  135  * SIGCOMM '88". ACM, August 1988.
  136  * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
  137  * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
  138  * of rpcs is in progress.
  139  * (The sent count and cwnd are scaled for integer arith.)
  140  * Variants of "slow start" were tried and were found to be too much of a
  141  * performance hit (ave. rtt 3 times larger),
  142  * I suspect due to the large rtt that nfs rpcs have.
  143  */
  144 #define NFS_CWNDSCALE   256
  145 #define NFS_MAXCWND     (NFS_CWNDSCALE * 32)
  146 #define NFS_NBACKOFF    8
  147 static int nfs_backoff[NFS_NBACKOFF] = { 2, 4, 8, 16, 32, 64, 128, 256, };
  148 struct callout  nfs_callout;
  149 
  150 static int      nfs_msg(struct thread *, const char *, const char *, int);
  151 static int      nfs_realign(struct mbuf **pm, int hsiz);
  152 static int      nfs_reply(struct nfsreq *);
  153 static void     nfs_softterm(struct nfsreq *rep);
  154 static int      nfs_reconnect(struct nfsreq *rep);
  155 static void nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag);
  156 static void nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag);
  157 static void wakeup_nfsreq(struct nfsreq *req);
  158 
  159 extern struct mtx nfs_reqq_mtx;
  160 extern struct mtx nfs_reply_mtx;
  161 
  162 /*
  163  * Initialize sockets and congestion for a new NFS connection.
  164  * We do not free the sockaddr if error.
  165  */
  166 int
  167 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
  168 {
  169         struct socket *so;
  170         int error, rcvreserve, sndreserve;
  171         int pktscale;
  172         struct sockaddr *saddr;
  173         struct thread *td = &thread0; /* only used for socreate and sobind */
  174 
  175         NET_ASSERT_GIANT();
  176 
  177         if (nmp->nm_sotype == SOCK_STREAM) {
  178                 mtx_lock(&nmp->nm_nfstcpstate.mtx);
  179                 nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
  180                 nmp->nm_nfstcpstate.rpcresid = 0;
  181                 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
  182         }       
  183         nmp->nm_so = NULL;
  184         saddr = nmp->nm_nam;
  185         error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
  186                 nmp->nm_soproto, nmp->nm_mountp->mnt_cred, td);
  187         if (error)
  188                 goto bad;
  189         so = nmp->nm_so;
  190         nmp->nm_soflags = so->so_proto->pr_flags;
  191 
  192         /*
  193          * Some servers require that the client port be a reserved port number.
  194          */
  195         if (nmp->nm_flag & NFSMNT_RESVPORT) {
  196                 struct sockopt sopt;
  197                 int ip, ip2, len;
  198                 struct sockaddr_in6 ssin;
  199                 struct sockaddr *sa;
  200 
  201                 bzero(&sopt, sizeof sopt);
  202                 switch(saddr->sa_family) {
  203                 case AF_INET:
  204                         sopt.sopt_level = IPPROTO_IP;
  205                         sopt.sopt_name = IP_PORTRANGE;
  206                         ip = IP_PORTRANGE_LOW;
  207                         ip2 = IP_PORTRANGE_DEFAULT;
  208                         len = sizeof (struct sockaddr_in);
  209                         break;
  210 #ifdef INET6
  211                 case AF_INET6:
  212                         sopt.sopt_level = IPPROTO_IPV6;
  213                         sopt.sopt_name = IPV6_PORTRANGE;
  214                         ip = IPV6_PORTRANGE_LOW;
  215                         ip2 = IPV6_PORTRANGE_DEFAULT;
  216                         len = sizeof (struct sockaddr_in6);
  217                         break;
  218 #endif
  219                 default:
  220                         goto noresvport;
  221                 }
  222                 sa = (struct sockaddr *)&ssin;
  223                 bzero(sa, len);
  224                 sa->sa_len = len;
  225                 sa->sa_family = saddr->sa_family;
  226                 sopt.sopt_dir = SOPT_SET;
  227                 sopt.sopt_val = (void *)&ip;
  228                 sopt.sopt_valsize = sizeof(ip);
  229                 error = sosetopt(so, &sopt);
  230                 if (error)
  231                         goto bad;
  232                 error = sobind(so, sa, td);
  233                 if (error)
  234                         goto bad;
  235                 ip = ip2;
  236                 error = sosetopt(so, &sopt);
  237                 if (error)
  238                         goto bad;
  239         noresvport: ;
  240         }
  241 
  242         /*
  243          * Protocols that do not require connections may be optionally left
  244          * unconnected for servers that reply from a port other than NFS_PORT.
  245          */
  246         if (nmp->nm_flag & NFSMNT_NOCONN) {
  247                 if (nmp->nm_soflags & PR_CONNREQUIRED) {
  248                         error = ENOTCONN;
  249                         goto bad;
  250                 }
  251         } else {
  252                 error = soconnect(so, nmp->nm_nam, td);
  253                 if (error)
  254                         goto bad;
  255 
  256                 /*
  257                  * Wait for the connection to complete. Cribbed from the
  258                  * connect system call but with the wait timing out so
  259                  * that interruptible mounts don't hang here for a long time.
  260                  */
  261                 SOCK_LOCK(so);
  262                 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
  263                         (void) msleep(&so->so_timeo, SOCK_MTX(so),
  264                             PSOCK, "nfscon", 2 * hz);
  265                         if ((so->so_state & SS_ISCONNECTING) &&
  266                             so->so_error == 0 && rep &&
  267                             (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0) {
  268                                 so->so_state &= ~SS_ISCONNECTING;
  269                                 SOCK_UNLOCK(so);
  270                                 goto bad;
  271                         }
  272                 }
  273                 if (so->so_error) {
  274                         error = so->so_error;
  275                         so->so_error = 0;
  276                         SOCK_UNLOCK(so);
  277                         goto bad;
  278                 }
  279                 SOCK_UNLOCK(so);
  280         }
  281         so->so_rcv.sb_timeo = 12 * hz;
  282         so->so_snd.sb_timeo = 5 * hz;
  283 
  284         /*
  285          * Get buffer reservation size from sysctl, but impose reasonable
  286          * limits.
  287          */
  288         pktscale = nfs_bufpackets;
  289         if (pktscale < 2)
  290                 pktscale = 2;
  291         if (pktscale > 64)
  292                 pktscale = 64;
  293 
  294         if (nmp->nm_sotype == SOCK_DGRAM) {
  295                 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
  296                 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
  297                     NFS_MAXPKTHDR) * pktscale;
  298         } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
  299                 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
  300                 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
  301                     NFS_MAXPKTHDR) * pktscale;
  302         } else {
  303                 if (nmp->nm_sotype != SOCK_STREAM)
  304                         panic("nfscon sotype");
  305                 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
  306                         struct sockopt sopt;
  307                         int val;
  308 
  309                         bzero(&sopt, sizeof sopt);
  310                         sopt.sopt_dir = SOPT_SET;
  311                         sopt.sopt_level = SOL_SOCKET;
  312                         sopt.sopt_name = SO_KEEPALIVE;
  313                         sopt.sopt_val = &val;
  314                         sopt.sopt_valsize = sizeof val;
  315                         val = 1;
  316                         sosetopt(so, &sopt);
  317                 }
  318                 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
  319                         struct sockopt sopt;
  320                         int val;
  321 
  322                         bzero(&sopt, sizeof sopt);
  323                         sopt.sopt_dir = SOPT_SET;
  324                         sopt.sopt_level = IPPROTO_TCP;
  325                         sopt.sopt_name = TCP_NODELAY;
  326                         sopt.sopt_val = &val;
  327                         sopt.sopt_valsize = sizeof val;
  328                         val = 1;
  329                         sosetopt(so, &sopt);
  330                 }
  331                 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
  332                     sizeof (u_int32_t)) * pktscale;
  333                 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
  334                     sizeof (u_int32_t)) * pktscale;
  335         }
  336         error = soreserve(so, sndreserve, rcvreserve);
  337         if (error)
  338                 goto bad;
  339         SOCKBUF_LOCK(&so->so_rcv);
  340         so->so_rcv.sb_flags |= SB_NOINTR;
  341         so->so_upcallarg = (caddr_t)nmp;
  342         if (so->so_type == SOCK_STREAM)
  343                 so->so_upcall = nfs_clnt_tcp_soupcall;
  344         else    
  345                 so->so_upcall = nfs_clnt_udp_soupcall;
  346         so->so_rcv.sb_flags |= SB_UPCALL;
  347         SOCKBUF_UNLOCK(&so->so_rcv);
  348         SOCKBUF_LOCK(&so->so_snd);
  349         so->so_snd.sb_flags |= SB_NOINTR;
  350         SOCKBUF_UNLOCK(&so->so_snd);
  351 
  352         /* Initialize other non-zero congestion variables */
  353         nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
  354                 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
  355         nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
  356                 nmp->nm_sdrtt[3] = 0;
  357         nmp->nm_cwnd = NFS_MAXCWND / 2;     /* Initial send window */
  358         nmp->nm_sent = 0;
  359         nmp->nm_timeouts = 0;
  360         return (0);
  361 
  362 bad:
  363         nfs_disconnect(nmp);
  364         return (error);
  365 }
  366 
  367 /*
  368  * Reconnect routine:
  369  * Called when a connection is broken on a reliable protocol.
  370  * - clean up the old socket
  371  * - nfs_connect() again
  372  * - set R_MUSTRESEND for all outstanding requests on mount point
  373  * If this fails the mount point is DEAD!
  374  * nb: Must be called with the nfs_sndlock() set on the mount point.
  375  */
  376 static int
  377 nfs_reconnect(struct nfsreq *rep)
  378 {
  379         struct nfsreq *rp;
  380         struct nfsmount *nmp = rep->r_nmp;
  381         int error;
  382 
  383         nfs_reconnects++;
  384         nfs_disconnect(nmp);
  385         while ((error = nfs_connect(nmp, rep)) != 0) {
  386                 if (error == ERESTART)
  387                         error = EINTR;
  388                 if (error == EIO || error == EINTR)
  389                         return (error);
  390                 (void) tsleep(&lbolt, PSOCK, "nfscon", 0);
  391         }
  392 
  393         /*
  394          * Clear the FORCE_RECONNECT flag only after the connect 
  395          * succeeds. To prevent races between multiple processes 
  396          * waiting on the mountpoint where the connection is being
  397          * torn down. The first one to acquire the sndlock will 
  398          * retry the connection. The others block on the sndlock
  399          * until the connection is established successfully, and 
  400          * then re-transmit the request.
  401          */
  402         mtx_lock(&nmp->nm_nfstcpstate.mtx);
  403         nmp->nm_nfstcpstate.flags &= ~NFS_TCP_FORCE_RECONNECT;
  404         mtx_unlock(&nmp->nm_nfstcpstate.mtx);   
  405 
  406         /*
  407          * Loop through outstanding request list and fix up all requests
  408          * on old socket.
  409          */
  410         mtx_lock(&nfs_reqq_mtx);
  411         TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
  412                 if (rp->r_nmp == nmp)
  413                         rp->r_flags |= R_MUSTRESEND;
  414         }
  415         mtx_unlock(&nfs_reqq_mtx);
  416         return (0);
  417 }
  418 
  419 /*
  420  * NFS disconnect. Clean up and unlink.
  421  */
  422 void
  423 nfs_disconnect(struct nfsmount *nmp)
  424 {
  425         struct socket *so;
  426 
  427         NET_ASSERT_GIANT();
  428 
  429         if (nmp->nm_so) {
  430                 so = nmp->nm_so;
  431                 nmp->nm_so = NULL;
  432                 SOCKBUF_LOCK(&so->so_rcv);
  433                 so->so_upcallarg = NULL;
  434                 so->so_upcall = NULL;
  435                 so->so_rcv.sb_flags &= ~SB_UPCALL;
  436                 SOCKBUF_UNLOCK(&so->so_rcv);
  437                 soshutdown(so, SHUT_WR);
  438                 soclose(so);
  439         }
  440 }
  441 
  442 void
  443 nfs_safedisconnect(struct nfsmount *nmp)
  444 {
  445         struct nfsreq dummyreq;
  446 
  447         bzero(&dummyreq, sizeof(dummyreq));
  448         dummyreq.r_nmp = nmp;
  449         nfs_disconnect(nmp);
  450 }
  451 
  452 /*
  453  * This is the nfs send routine. For connection based socket types, it
  454  * must be called with an nfs_sndlock() on the socket.
  455  * - return EINTR if the RPC is terminated, 0 otherwise
  456  * - set R_MUSTRESEND if the send fails for any reason
  457  * - do any cleanup required by recoverable socket errors (?)
  458  */
  459 int
  460 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
  461     struct nfsreq *rep)
  462 {
  463         struct sockaddr *sendnam;
  464         int error, error2, soflags, flags;
  465 
  466         NET_ASSERT_GIANT();
  467 
  468         KASSERT(rep, ("nfs_send: called with rep == NULL"));
  469 
  470         error = nfs_sigintr(rep->r_nmp, rep, rep->r_td);
  471         if (error) {
  472                 m_freem(top);
  473                 return (error);
  474         }
  475         if ((so = rep->r_nmp->nm_so) == NULL) {
  476                 rep->r_flags |= R_MUSTRESEND;
  477                 m_freem(top);
  478                 return (0);
  479         }
  480         rep->r_flags &= ~R_MUSTRESEND;
  481         soflags = rep->r_nmp->nm_soflags;
  482 
  483         if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
  484                 sendnam = NULL;
  485         else
  486                 sendnam = nam;
  487         if (so->so_type == SOCK_SEQPACKET)
  488                 flags = MSG_EOR;
  489         else
  490                 flags = 0;
  491 
  492         error = so->so_proto->pr_usrreqs->pru_sosend(so, sendnam, 0, top, 0,
  493                                                      flags, curthread /*XXX*/);
  494         if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
  495                 error = 0;
  496                 rep->r_flags |= R_MUSTRESEND;
  497         }
  498 
  499         if (error) {
  500                 /*
  501                  * Don't report EPIPE errors on nfs sockets.
  502                  * These can be due to idle tcp mounts which will be closed by
  503                  * netapp, solaris, etc. if left idle too long.
  504                  */
  505                 if (error != EPIPE) {
  506                         log(LOG_INFO, "nfs send error %d for server %s\n",
  507                             error,
  508                             rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
  509                 }
  510                 /*
  511                  * Deal with errors for the client side.
  512                  */
  513                 error2 = NFS_SIGREP(rep);
  514                 if (error2)
  515                         error = error2;
  516                 else
  517                         rep->r_flags |= R_MUSTRESEND;
  518 
  519                 /*
  520                  * Handle any recoverable (soft) socket errors here. (?)
  521                  */
  522                 if (error != EINTR && error != ERESTART && error != EIO &&
  523                         error != EWOULDBLOCK && error != EPIPE)
  524                         error = 0;
  525         }
  526         return (error);
  527 }
  528 
  529 int
  530 nfs_reply(struct nfsreq *rep)
  531 {
  532         register struct socket *so;
  533         register struct mbuf *m;
  534         int error = 0, sotype, slpflag;
  535 
  536         NET_ASSERT_GIANT();
  537 
  538         sotype = rep->r_nmp->nm_sotype;
  539         /*
  540          * For reliable protocols, lock against other senders/receivers
  541          * in case a reconnect is necessary.
  542          */
  543         if (sotype != SOCK_DGRAM) {
  544                 error = nfs_sndlock(rep);
  545                 if (error)
  546                         return (error);
  547 tryagain:
  548                 if (rep->r_mrep) {
  549                         nfs_sndunlock(rep);
  550                         return (0);
  551                 }
  552                 if (rep->r_flags & R_SOFTTERM) {
  553                         nfs_sndunlock(rep);
  554                         return (EINTR);
  555                 }
  556                 so = rep->r_nmp->nm_so;
  557                 mtx_lock(&rep->r_nmp->nm_nfstcpstate.mtx);
  558                 if (!so || 
  559                     (rep->r_nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT)) {
  560                         mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
  561                         error = nfs_reconnect(rep);
  562                         if (error) {
  563                                 nfs_sndunlock(rep);
  564                                 return (error);
  565                         }
  566                         goto tryagain;
  567                 } else
  568                         mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
  569                 while (rep->r_flags & R_MUSTRESEND) {
  570                         m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT);
  571                         nfsstats.rpcretries++;
  572                         error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
  573                         if (error) {
  574                                 if (error == EINTR || error == ERESTART ||
  575                                     (error = nfs_reconnect(rep)) != 0) {
  576                                         nfs_sndunlock(rep);
  577                                         return (error);
  578                                 }
  579                                 goto tryagain;
  580                         }
  581                 }
  582                 nfs_sndunlock(rep);
  583         }
  584         slpflag = 0;
  585         if (rep->r_nmp->nm_flag & NFSMNT_INT)
  586                 slpflag = PCATCH;
  587         mtx_lock(&nfs_reply_mtx);
  588         while ((rep->r_mrep == NULL) && (error == 0) && 
  589                ((rep->r_flags & R_SOFTTERM) == 0) &&
  590                ((sotype == SOCK_DGRAM) || ((rep->r_flags & R_MUSTRESEND) == 0)))
  591                 error = msleep((caddr_t)rep, &nfs_reply_mtx, 
  592                                slpflag | (PZERO - 1), "nfsreq", 0);
  593         mtx_unlock(&nfs_reply_mtx);
  594         if (error == EINTR || error == ERESTART)
  595                 /* NFS operations aren't restartable. Map ERESTART to EINTR */
  596                 return (EINTR);
  597         if (rep->r_flags & R_SOFTTERM)
  598                 /* Request was terminated because we exceeded the retries (soft mount) */
  599                 return (ETIMEDOUT);
  600         if (sotype == SOCK_STREAM) {
  601                 mtx_lock(&rep->r_nmp->nm_nfstcpstate.mtx);
  602                 if (((rep->r_nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) || 
  603                      (rep->r_flags & R_MUSTRESEND))) {
  604                         mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);    
  605                         error = nfs_sndlock(rep);
  606                         if (error)
  607                                 return (error);
  608                         goto tryagain;
  609                 } else
  610                         mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
  611         }
  612         return (error);
  613 }
  614 
  615 /*
  616  * XXX TO DO
  617  * Make nfs_realign() non-blocking. Also make nfsm_dissect() nonblocking.
  618  */
  619 static void
  620 nfs_clnt_match_xid(struct socket *so, 
  621                    struct nfsmount *nmp, 
  622                    struct mbuf *mrep)
  623 {
  624         struct mbuf *md;
  625         caddr_t dpos;
  626         u_int32_t rxid, *tl;
  627         struct nfsreq *rep;
  628         register int32_t t1;
  629         int error;
  630         
  631         /*
  632          * Search for any mbufs that are not a multiple of 4 bytes long
  633          * or with m_data not longword aligned.
  634          * These could cause pointer alignment problems, so copy them to
  635          * well aligned mbufs.
  636          */
  637         if (nfs_realign(&mrep, 5 * NFSX_UNSIGNED) == ENOMEM) {
  638                 m_freem(mrep);
  639                 nfsstats.rpcinvalid++;
  640                 return;
  641         }
  642         
  643         /*
  644          * Get the xid and check that it is an rpc reply
  645          */
  646         md = mrep;
  647         dpos = mtod(md, caddr_t);
  648         tl = nfsm_dissect_nonblock(u_int32_t *, 2*NFSX_UNSIGNED);
  649         rxid = *tl++;
  650         if (*tl != rpc_reply) {
  651                 m_freem(mrep);
  652 nfsmout:
  653                 nfsstats.rpcinvalid++;
  654                 return;
  655         }
  656 
  657         mtx_lock(&nfs_reqq_mtx);
  658         /*
  659          * Loop through the request list to match up the reply
  660          * Iff no match, just drop the datagram
  661          */
  662         TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
  663                 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
  664                         /* Found it.. */
  665                         rep->r_mrep = mrep;
  666                         rep->r_md = md;
  667                         rep->r_dpos = dpos;
  668                         /*
  669                          * Update congestion window.
  670                          * Do the additive increase of
  671                          * one rpc/rtt.
  672                          */
  673                         if (nmp->nm_cwnd <= nmp->nm_sent) {
  674                                 nmp->nm_cwnd +=
  675                                         (NFS_CWNDSCALE * NFS_CWNDSCALE +
  676                                          (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
  677                                 if (nmp->nm_cwnd > NFS_MAXCWND)
  678                                         nmp->nm_cwnd = NFS_MAXCWND;
  679                         }       
  680                         if (rep->r_flags & R_SENT) {
  681                                 rep->r_flags &= ~R_SENT;
  682                                 nmp->nm_sent -= NFS_CWNDSCALE;
  683                         }
  684                         /*
  685                          * Update rtt using a gain of 0.125 on the mean
  686                          * and a gain of 0.25 on the deviation.
  687                          */
  688                         if (rep->r_flags & R_TIMING) {
  689                                 /*
  690                                  * Since the timer resolution of
  691                                  * NFS_HZ is so course, it can often
  692                                  * result in r_rtt == 0. Since
  693                                  * r_rtt == N means that the actual
  694                                  * rtt is between N+dt and N+2-dt ticks,
  695                                  * add 1.
  696                                  */
  697                                 t1 = rep->r_rtt + 1;
  698                                 t1 -= (NFS_SRTT(rep) >> 3);
  699                                 NFS_SRTT(rep) += t1;
  700                                 if (t1 < 0)
  701                                         t1 = -t1;
  702                                 t1 -= (NFS_SDRTT(rep) >> 2);
  703                                 NFS_SDRTT(rep) += t1;
  704                         }
  705                         nmp->nm_timeouts = 0;
  706                         break;
  707                 }
  708         }
  709         /*
  710          * If not matched to a request, drop it.
  711          * If it's mine, wake up requestor.
  712          */
  713         if (rep == 0) {
  714                 nfsstats.rpcunexpected++;
  715                 m_freem(mrep);
  716         } else
  717                 wakeup_nfsreq(rep);
  718         mtx_unlock(&nfs_reqq_mtx);
  719 }
  720 
  721 /* 
  722  * The wakeup of the requestor should be done under the mutex
  723  * to avoid potential missed wakeups.
  724  */
  725 static void 
  726 wakeup_nfsreq(struct nfsreq *req)
  727 {
  728         mtx_lock(&nfs_reply_mtx);
  729         wakeup((caddr_t)req);
  730         mtx_unlock(&nfs_reply_mtx);     
  731 }
  732 
  733 static void
  734 nfs_mark_for_reconnect(struct nfsmount *nmp)
  735 {
  736         struct nfsreq *rp;
  737 
  738         mtx_lock(&nmp->nm_nfstcpstate.mtx);
  739         nmp->nm_nfstcpstate.flags |= NFS_TCP_FORCE_RECONNECT;
  740         mtx_unlock(&nmp->nm_nfstcpstate.mtx);
  741         /* 
  742          * Wakeup all processes that are waiting for replies 
  743          * on this mount point. One of them does the reconnect.
  744          */
  745         mtx_lock(&nfs_reqq_mtx);
  746         TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
  747                 if (rp->r_nmp == nmp) {
  748                         rp->r_flags |= R_MUSTRESEND;
  749                         wakeup_nfsreq(rp);
  750                 }
  751         }
  752         mtx_unlock(&nfs_reqq_mtx);
  753 }
  754 
  755 static int
  756 nfstcp_readable(struct socket *so, int bytes)
  757 {
  758         int retval;
  759         
  760         SOCKBUF_LOCK(&so->so_rcv);
  761         retval = (so->so_rcv.sb_cc >= (bytes) ||
  762                   (so->so_rcv.sb_state & SBS_CANTRCVMORE) ||
  763                   so->so_error);
  764         SOCKBUF_UNLOCK(&so->so_rcv);
  765         return (retval);
  766 }
  767 
  768 #define nfstcp_marker_readable(so)      nfstcp_readable(so, sizeof(u_int32_t))
  769 
  770 static void
  771 nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag)
  772 {
  773         struct nfsmount *nmp = (struct nfsmount *)arg;
  774         struct mbuf *mp = NULL;
  775         struct uio auio;
  776         int error;
  777         u_int32_t len;
  778         int rcvflg;
  779 
  780         /*
  781          * Don't pick any more data from the socket if we've marked the 
  782          * mountpoint for reconnect.
  783          */
  784         mtx_lock(&nmp->nm_nfstcpstate.mtx);
  785         if (nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) {
  786                 mtx_unlock(&nmp->nm_nfstcpstate.mtx);           
  787                 return;
  788         } else                  
  789                 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
  790         auio.uio_td = curthread;
  791         auio.uio_segflg = UIO_SYSSPACE;
  792         auio.uio_rw = UIO_READ;
  793         for ( ; ; ) {
  794                 if (nmp->nm_nfstcpstate.flags & NFS_TCP_EXPECT_RPCMARKER) {
  795                         if (!nfstcp_marker_readable(so)) {
  796                                 /* Marker is not readable */
  797                                 return;
  798                         }
  799                         auio.uio_resid = sizeof(u_int32_t);
  800                         auio.uio_iov = NULL;
  801                         auio.uio_iovcnt = 0;
  802                         mp = NULL;
  803                         rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
  804                         error =  so->so_proto->pr_usrreqs->pru_soreceive
  805                                 (so, (struct sockaddr **)0,
  806                                  &auio, &mp, (struct mbuf **)0, &rcvflg);
  807                         /*
  808                          * We've already tested that the socket is readable. 2 cases 
  809                          * here, we either read 0 bytes (client closed connection), 
  810                          * or got some other error. In both cases, we tear down the 
  811                          * connection.
  812                          */
  813                         if (error || auio.uio_resid > 0) {
  814                                 if (error && error != ECONNRESET) {
  815                                         log(LOG_ERR, 
  816                                             "nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
  817                                             error);
  818                                 }
  819                                 goto mark_reconnect;
  820                         }
  821                         if (mp == NULL)
  822                                 panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
  823                         bcopy(mtod(mp, u_int32_t *), &len, sizeof(len));
  824                         len = ntohl(len) & ~0x80000000;
  825                         m_freem(mp);
  826                         /*
  827                          * This is SERIOUS! We are out of sync with the sender
  828                          * and forcing a disconnect/reconnect is all I can do.
  829                          */
  830                         if (len > NFS_MAXPACKET || len == 0) {
  831                                 log(LOG_ERR, "%s (%d) from nfs server %s\n",
  832                                     "impossible packet length",
  833                                     len,
  834                                     nmp->nm_mountp->mnt_stat.f_mntfromname);
  835                                 goto mark_reconnect;
  836                         }
  837                         nmp->nm_nfstcpstate.rpcresid = len;
  838                         nmp->nm_nfstcpstate.flags &= ~(NFS_TCP_EXPECT_RPCMARKER);
  839                 }
  840                 /* 
  841                  * Processed RPC marker or no RPC marker to process. 
  842                  * Pull in and process data.
  843                  */
  844                 if (nmp->nm_nfstcpstate.rpcresid > 0) {
  845                         if (!nfstcp_readable(so, nmp->nm_nfstcpstate.rpcresid)) {
  846                                 /* All data not readable */
  847                                 return;
  848                         }
  849                         auio.uio_resid = nmp->nm_nfstcpstate.rpcresid;
  850                         auio.uio_iov = NULL;
  851                         auio.uio_iovcnt = 0;
  852                         mp = NULL;
  853                         rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
  854                         error =  so->so_proto->pr_usrreqs->pru_soreceive
  855                                 (so, (struct sockaddr **)0,
  856                                  &auio, &mp, (struct mbuf **)0, &rcvflg);
  857                         if (error || auio.uio_resid > 0) {
  858                                 if (error && error != ECONNRESET) {
  859                                         log(LOG_ERR, 
  860                                             "nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
  861                                             error);
  862                                 }
  863                                 goto mark_reconnect;                            
  864                         }
  865                         if (mp == NULL)
  866                                 panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
  867                         nmp->nm_nfstcpstate.rpcresid = 0;
  868                         nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
  869                         /* We got the entire RPC reply. Match XIDs and wake up requestor */
  870                         nfs_clnt_match_xid(so, nmp, mp);
  871                 }
  872         }
  873 
  874 mark_reconnect:
  875         nfs_mark_for_reconnect(nmp);
  876 }
  877 
  878 static void
  879 nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag)
  880 {
  881         struct nfsmount *nmp = (struct nfsmount *)arg;
  882         struct uio auio;
  883         struct mbuf *mp = NULL;
  884         struct mbuf *control = NULL;
  885         int error, rcvflag;
  886 
  887         auio.uio_resid = 1000000;
  888         auio.uio_td = curthread;
  889         rcvflag = MSG_DONTWAIT;
  890         auio.uio_resid = 1000000000;
  891         do {
  892                 mp = control = NULL;
  893                 error = so->so_proto->pr_usrreqs->pru_soreceive(so,
  894                                         NULL, &auio, &mp,
  895                                         &control, &rcvflag);
  896                 if (control)
  897                         m_freem(control);
  898                 if (mp)
  899                         nfs_clnt_match_xid(so, nmp, mp);
  900         } while (mp && !error);
  901 }
  902 
  903 /*
  904  * nfs_request - goes something like this
  905  *      - fill in request struct
  906  *      - links it into list
  907  *      - calls nfs_send() for first transmit
  908  *      - calls nfs_receive() to get reply
  909  *      - break down rpc header and return with nfs reply pointed to
  910  *        by mrep or error
  911  * nb: always frees up mreq mbuf list
  912  */
  913 int
  914 nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
  915     struct thread *td, struct ucred *cred, struct mbuf **mrp,
  916     struct mbuf **mdp, caddr_t *dposp)
  917 {
  918         struct mbuf *mrep, *m2;
  919         struct nfsreq *rep;
  920         u_int32_t *tl;
  921         int i;
  922         struct nfsmount *nmp;
  923         struct mbuf *m, *md, *mheadend;
  924         time_t waituntil;
  925         caddr_t dpos;
  926         int s, error = 0, mrest_len, auth_len, auth_type;
  927         struct timeval now;
  928         u_int32_t *xidp;
  929 
  930         /* Reject requests while attempting a forced unmount. */
  931         if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
  932                 m_freem(mrest);
  933                 return (ESTALE);
  934         }
  935         nmp = VFSTONFS(vp->v_mount);
  936         if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
  937                 return nfs4_request(vp, mrest, procnum, td, cred, mrp, mdp, dposp);
  938         MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
  939         rep->r_mrep = rep->r_md = NULL;
  940         rep->r_nmp = nmp;
  941         rep->r_vp = vp;
  942         rep->r_td = td;
  943         rep->r_procnum = procnum;
  944 
  945         getmicrouptime(&now);
  946         rep->r_lastmsg = now.tv_sec -
  947             ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
  948         mrest_len = m_length(mrest, NULL);
  949 
  950         /*
  951          * Get the RPC header with authorization.
  952          */
  953         auth_type = RPCAUTH_UNIX;
  954         if (cred->cr_ngroups < 1)
  955                 panic("nfsreq nogrps");
  956         auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
  957                 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
  958                 5 * NFSX_UNSIGNED;
  959         m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len,
  960              mrest, mrest_len, &mheadend, &xidp);
  961 
  962         /*
  963          * For stream protocols, insert a Sun RPC Record Mark.
  964          */
  965         if (nmp->nm_sotype == SOCK_STREAM) {
  966                 M_PREPEND(m, NFSX_UNSIGNED, M_TRYWAIT);
  967                 *mtod(m, u_int32_t *) = htonl(0x80000000 |
  968                          (m->m_pkthdr.len - NFSX_UNSIGNED));
  969         }
  970         rep->r_mreq = m;
  971         rep->r_xid = *xidp;
  972 tryagain:
  973         if (nmp->nm_flag & NFSMNT_SOFT)
  974                 rep->r_retry = nmp->nm_retry;
  975         else
  976                 rep->r_retry = NFS_MAXREXMIT + 1;       /* past clip limit */
  977         rep->r_rtt = rep->r_rexmit = 0;
  978         if (proct[procnum] > 0)
  979                 rep->r_flags = R_TIMING;
  980         else
  981                 rep->r_flags = 0;
  982         rep->r_mrep = NULL;
  983 
  984         /*
  985          * Do the client side RPC.
  986          */
  987         nfsstats.rpcrequests++;
  988         /*
  989          * Chain request into list of outstanding requests. Be sure
  990          * to put it LAST so timer finds oldest requests first.
  991          */
  992         s = splsoftclock();
  993         mtx_lock(&nfs_reqq_mtx);
  994         if (TAILQ_EMPTY(&nfs_reqq))
  995                 callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
  996         TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain);
  997         mtx_unlock(&nfs_reqq_mtx);
  998 
  999         /*
 1000          * If backing off another request or avoiding congestion, don't
 1001          * send this one now but let timer do it. If not timing a request,
 1002          * do it now.
 1003          */
 1004         if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM ||
 1005                 (nmp->nm_flag & NFSMNT_DUMBTIMR) ||
 1006                 nmp->nm_sent < nmp->nm_cwnd)) {
 1007                 splx(s);
 1008                 error = nfs_sndlock(rep);
 1009                 if (!error) {
 1010                         m2 = m_copym(m, 0, M_COPYALL, M_TRYWAIT);
 1011                         error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
 1012                         nfs_sndunlock(rep);
 1013                 }
 1014                 mtx_lock(&nfs_reqq_mtx);
 1015                 if (!error && (rep->r_flags & R_MUSTRESEND) == 0) {
 1016                         nmp->nm_sent += NFS_CWNDSCALE;
 1017                         rep->r_flags |= R_SENT;
 1018                 }
 1019                 mtx_unlock(&nfs_reqq_mtx);
 1020         } else {
 1021                 splx(s);
 1022                 rep->r_rtt = -1;
 1023         }
 1024 
 1025         /*
 1026          * Wait for the reply from our send or the timer's.
 1027          */
 1028         if (!error || error == EPIPE)
 1029                 error = nfs_reply(rep);
 1030 
 1031         /*
 1032          * RPC done, unlink the request.
 1033          */
 1034         s = splsoftclock();
 1035         mtx_lock(&nfs_reqq_mtx);
 1036         TAILQ_REMOVE(&nfs_reqq, rep, r_chain);
 1037         if (TAILQ_EMPTY(&nfs_reqq))
 1038                 callout_stop(&nfs_callout);
 1039         /*
 1040          * Decrement the outstanding request count.
 1041          */
 1042         if (rep->r_flags & R_SENT) {
 1043                 rep->r_flags &= ~R_SENT;        /* paranoia */
 1044                 nmp->nm_sent -= NFS_CWNDSCALE;
 1045         }
 1046         mtx_unlock(&nfs_reqq_mtx);
 1047         splx(s);
 1048 
 1049         /*
 1050          * If there was a successful reply and a tprintf msg.
 1051          * tprintf a response.
 1052          */
 1053         if (!error) {
 1054                 mtx_lock(&Giant);
 1055                 nfs_up(rep, nmp, rep->r_td, "is alive again", NFSSTA_TIMEO);
 1056                 mtx_unlock(&Giant);
 1057         }
 1058         mrep = rep->r_mrep;
 1059         md = rep->r_md;
 1060         dpos = rep->r_dpos;
 1061         if (error) {
 1062                 /*
 1063                  * If we got interrupted by a signal in nfs_reply(), there's
 1064                  * a very small window where the reply could've come in before
 1065                  * this process got scheduled in. To handle that case, we need 
 1066                  * to free the reply if it was delivered.
 1067                  */
 1068                 if (rep->r_mrep != NULL)
 1069                         m_freem(rep->r_mrep);
 1070                 m_freem(rep->r_mreq);
 1071                 free((caddr_t)rep, M_NFSREQ);
 1072                 return (error);
 1073         }
 1074 
 1075         if (rep->r_mrep == NULL)
 1076                 panic("nfs_request: rep->r_mrep shouldn't be NULL if no error\n");
 1077 
 1078         /*
 1079          * break down the rpc header and check if ok
 1080          */
 1081         tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
 1082         if (*tl++ == rpc_msgdenied) {
 1083                 if (*tl == rpc_mismatch)
 1084                         error = EOPNOTSUPP;
 1085                 else
 1086                         error = EACCES;
 1087                 m_freem(mrep);
 1088                 m_freem(rep->r_mreq);
 1089                 free((caddr_t)rep, M_NFSREQ);
 1090                 return (error);
 1091         }
 1092 
 1093         /*
 1094          * Just throw away any verifyer (ie: kerberos etc).
 1095          */
 1096         i = fxdr_unsigned(int, *tl++);          /* verf type */
 1097         i = fxdr_unsigned(int32_t, *tl);        /* len */
 1098         if (i > 0)
 1099                 nfsm_adv(nfsm_rndup(i));
 1100         tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 1101         /* 0 == ok */
 1102         if (*tl == 0) {
 1103                 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
 1104                 if (*tl != 0) {
 1105                         error = fxdr_unsigned(int, *tl);
 1106                         if ((nmp->nm_flag & NFSMNT_NFSV3) &&
 1107                                 error == NFSERR_TRYLATER) {
 1108                                 m_freem(mrep);
 1109                                 error = 0;
 1110                                 waituntil = time_second + nfs3_jukebox_delay;
 1111                                 while (time_second < waituntil)
 1112                                         (void) tsleep(&lbolt,
 1113                                                 PSOCK, "nqnfstry", 0);
 1114                                 if (++nfs_xid == 0)
 1115                                         nfs_xid++;
 1116                                 rep->r_xid = *xidp = txdr_unsigned(nfs_xid);
 1117                                 goto tryagain;
 1118                         }
 1119 
 1120                         /*
 1121                          * If the File Handle was stale, invalidate the
 1122                          * lookup cache, just in case.
 1123                          */
 1124                         if (error == ESTALE)
 1125                                 cache_purge(vp);
 1126                         if (nmp->nm_flag & NFSMNT_NFSV3) {
 1127                                 *mrp = mrep;
 1128                                 *mdp = md;
 1129                                 *dposp = dpos;
 1130                                 error |= NFSERR_RETERR;
 1131                         } else
 1132                                 m_freem(mrep);
 1133                         m_freem(rep->r_mreq);
 1134                         free((caddr_t)rep, M_NFSREQ);
 1135                         return (error);
 1136                 }
 1137 
 1138                 *mrp = mrep;
 1139                 *mdp = md;
 1140                 *dposp = dpos;
 1141                 m_freem(rep->r_mreq);
 1142                 FREE((caddr_t)rep, M_NFSREQ);
 1143                 return (0);
 1144         }
 1145         m_freem(mrep);
 1146         error = EPROTONOSUPPORT;
 1147 nfsmout:
 1148         m_freem(rep->r_mreq);
 1149         free((caddr_t)rep, M_NFSREQ);
 1150         return (error);
 1151 }
 1152 
 1153 /*
 1154  * Nfs timer routine
 1155  * Scan the nfsreq list and retranmit any requests that have timed out
 1156  * To avoid retransmission attempts on STREAM sockets (in the future) make
 1157  * sure to set the r_retry field to 0 (implies nm_retry == 0).
 1158  * 
 1159  * XXX - 
 1160  * For now, since we don't register MPSAFE callouts for the NFS client -
 1161  * softclock() acquires Giant before calling us. That prevents req entries
 1162  * from being removed from the list (from nfs_request()). But we still 
 1163  * acquire the nfs reqq mutex to make sure the state of individual req
 1164  * entries is not modified from RPC reply handling (from socket callback)
 1165  * while nfs_timer is walking the list of reqs.
 1166  * The nfs reqq lock cannot be held while we do the pru_send() because of a
 1167  * lock ordering violation. The NFS client socket callback acquires 
 1168  * inp_lock->nfsreq mutex and pru_send acquires inp_lock. So we drop the 
 1169  * reqq mutex (and reacquire it after the pru_send()). This won't work
 1170  * when we move to fine grained locking for NFS. When we get to that point, 
 1171  * a rewrite of nfs_timer() will be needed.
 1172  */
 1173 void
 1174 nfs_timer(void *arg)
 1175 {
 1176         struct nfsreq *rep;
 1177         struct mbuf *m;
 1178         struct socket *so;
 1179         struct nfsmount *nmp;
 1180         int timeo;
 1181         int s, error;
 1182         struct timeval now;
 1183 
 1184         getmicrouptime(&now);
 1185         s = splnet();
 1186         mtx_lock(&Giant);       /* nfs_down -> tprintf */
 1187         mtx_lock(&nfs_reqq_mtx);
 1188         TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
 1189                 nmp = rep->r_nmp;
 1190                 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
 1191                         continue;
 1192                 if (nfs_sigintr(nmp, rep, rep->r_td))
 1193                         continue;
 1194                 if (nmp->nm_tprintf_initial_delay != 0 &&
 1195                     (rep->r_rexmit > 2 || (rep->r_flags & R_RESENDERR)) &&
 1196                     rep->r_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) {
 1197                         rep->r_lastmsg = now.tv_sec;
 1198                         nfs_down(rep, nmp, rep->r_td, "not responding",
 1199                             0, NFSSTA_TIMEO);
 1200 #if 0
 1201                         if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
 1202                                 /* we're not yet completely mounted and */
 1203                                 /* we can't complete an RPC, so we fail */
 1204                                 nfsstats.rpctimeouts++;
 1205                                 nfs_softterm(rep);
 1206                                 continue;
 1207                         }
 1208 #endif
 1209                 }
 1210                 if (rep->r_rtt >= 0) {
 1211                         rep->r_rtt++;
 1212                         if (nmp->nm_flag & NFSMNT_DUMBTIMR)
 1213                                 timeo = nmp->nm_timeo;
 1214                         else
 1215                                 timeo = NFS_RTO(nmp, proct[rep->r_procnum]);
 1216                         if (nmp->nm_timeouts > 0)
 1217                                 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
 1218                         if (rep->r_rtt <= timeo)
 1219                                 continue;
 1220                         if (nmp->nm_timeouts < NFS_NBACKOFF)
 1221                                 nmp->nm_timeouts++;
 1222                 }
 1223                 if (rep->r_rexmit >= rep->r_retry) {    /* too many */
 1224                         nfsstats.rpctimeouts++;
 1225                         nfs_softterm(rep);
 1226                         continue;
 1227                 }
 1228                 if (nmp->nm_sotype != SOCK_DGRAM) {
 1229                         if (++rep->r_rexmit > NFS_MAXREXMIT)
 1230                                 rep->r_rexmit = NFS_MAXREXMIT;
 1231                         /*
 1232                          * For NFS/TCP, setting R_MUSTRESEND and waking up 
 1233                          * the requester will cause the request to be   
 1234                          * retransmitted (in nfs_reply()), re-connecting
 1235                          * if necessary.
 1236                          */
 1237                         rep->r_flags |= R_MUSTRESEND;
 1238                         wakeup_nfsreq(rep);
 1239                         rep->r_rtt = 0;
 1240                         continue;
 1241                 }
 1242                 if ((so = nmp->nm_so) == NULL)
 1243                         continue;
 1244                 /*
 1245                  * If there is enough space and the window allows..
 1246                  *      Resend it
 1247                  * Set r_rtt to -1 in case we fail to send it now.
 1248                  */
 1249                 rep->r_rtt = -1;
 1250                 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
 1251                    ((nmp->nm_flag & NFSMNT_DUMBTIMR) ||
 1252                     (rep->r_flags & R_SENT) ||
 1253                     nmp->nm_sent < nmp->nm_cwnd) &&
 1254                    (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){
 1255                         mtx_unlock(&nfs_reqq_mtx);
 1256                         if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
 1257                             error = (*so->so_proto->pr_usrreqs->pru_send)
 1258                                     (so, 0, m, NULL, NULL, curthread);
 1259                         else
 1260                             error = (*so->so_proto->pr_usrreqs->pru_send)
 1261                                     (so, 0, m, nmp->nm_nam, NULL, curthread);
 1262                         mtx_lock(&nfs_reqq_mtx);
 1263                         if (error) {
 1264                                 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
 1265                                         so->so_error = 0;
 1266                                 rep->r_flags |= R_RESENDERR;
 1267                         } else {
 1268                                 /*
 1269                                  * Iff first send, start timing
 1270                                  * else turn timing off, backoff timer
 1271                                  * and divide congestion window by 2.
 1272                                  */
 1273                                 rep->r_flags &= ~R_RESENDERR;
 1274                                 if (rep->r_flags & R_SENT) {
 1275                                         rep->r_flags &= ~R_TIMING;
 1276                                         if (++rep->r_rexmit > NFS_MAXREXMIT)
 1277                                                 rep->r_rexmit = NFS_MAXREXMIT;
 1278                                         nmp->nm_cwnd >>= 1;
 1279                                         if (nmp->nm_cwnd < NFS_CWNDSCALE)
 1280                                                 nmp->nm_cwnd = NFS_CWNDSCALE;
 1281                                         nfsstats.rpcretries++;
 1282                                 } else {
 1283                                         rep->r_flags |= R_SENT;
 1284                                         nmp->nm_sent += NFS_CWNDSCALE;
 1285                                 }
 1286                                 rep->r_rtt = 0;
 1287                         }
 1288                 }
 1289         }
 1290         mtx_unlock(&nfs_reqq_mtx);
 1291         mtx_unlock(&Giant);     /* nfs_down -> tprintf */
 1292         splx(s);
 1293         callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
 1294 }
 1295 
 1296 /*
 1297  * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
 1298  * wait for all requests to complete. This is used by forced unmounts
 1299  * to terminate any outstanding RPCs.
 1300  */
 1301 int
 1302 nfs_nmcancelreqs(nmp)
 1303         struct nfsmount *nmp;
 1304 {
 1305         struct nfsreq *req;
 1306         int i, s;
 1307 
 1308         s = splnet();
 1309         mtx_lock(&nfs_reqq_mtx);
 1310         TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
 1311                 if (nmp != req->r_nmp || req->r_mrep != NULL ||
 1312                     (req->r_flags & R_SOFTTERM))
 1313                         continue;
 1314                 nfs_softterm(req);
 1315         }
 1316         mtx_unlock(&nfs_reqq_mtx);
 1317         splx(s);
 1318 
 1319         for (i = 0; i < 30; i++) {
 1320                 s = splnet();
 1321                 mtx_lock(&nfs_reqq_mtx);
 1322                 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
 1323                         if (nmp == req->r_nmp)
 1324                                 break;
 1325                 }
 1326                 mtx_unlock(&nfs_reqq_mtx);
 1327                 splx(s);
 1328                 if (req == NULL)
 1329                         return (0);
 1330                 tsleep(&lbolt, PSOCK, "nfscancel", 0);
 1331         }
 1332         return (EBUSY);
 1333 }
 1334 
 1335 /*
 1336  * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT).
 1337  * The nm_send count is decremented now to avoid deadlocks when the process in
 1338  * soreceive() hasn't yet managed to send its own request.
 1339  */
 1340 
 1341 static void
 1342 nfs_softterm(struct nfsreq *rep)
 1343 {
 1344 
 1345         rep->r_flags |= R_SOFTTERM;
 1346         if (rep->r_flags & R_SENT) {
 1347                 rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
 1348                 rep->r_flags &= ~R_SENT;
 1349         }
 1350         /* 
 1351          * Request terminated, wakeup the blocked process, so that we
 1352          * can return EINTR back.
 1353          */
 1354         wakeup_nfsreq(rep);
 1355 }
 1356 
 1357 /*
 1358  * Any signal that can interrupt an NFS operation in an intr mount
 1359  * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
 1360  */
 1361 int nfs_sig_set[] = {
 1362         SIGINT,
 1363         SIGTERM,
 1364         SIGHUP,
 1365         SIGKILL,
 1366         SIGSTOP,
 1367         SIGQUIT
 1368 };
 1369 
 1370 /*
 1371  * Check to see if one of the signals in our subset is pending on
 1372  * the process (in an intr mount).
 1373  */
 1374 static int
 1375 nfs_sig_pending(sigset_t set)
 1376 {
 1377         int i;
 1378         
 1379         for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++)
 1380                 if (SIGISMEMBER(set, nfs_sig_set[i]))
 1381                         return (1);
 1382         return (0);
 1383 }
 1384  
 1385 /*
 1386  * The set/restore sigmask functions are used to (temporarily) overwrite
 1387  * the process p_sigmask during an RPC call (for example). These are also
 1388  * used in other places in the NFS client that might tsleep().
 1389  */
 1390 void
 1391 nfs_set_sigmask(struct thread *td, sigset_t *oldset)
 1392 {
 1393         sigset_t newset;
 1394         int i;
 1395         struct proc *p;
 1396         
 1397         SIGFILLSET(newset);
 1398         if (td == NULL)
 1399                 td = curthread; /* XXX */
 1400         p = td->td_proc;
 1401         /* Remove the NFS set of signals from newset */
 1402         PROC_LOCK(p);
 1403         mtx_lock(&p->p_sigacts->ps_mtx);
 1404         for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) {
 1405                 /*
 1406                  * But make sure we leave the ones already masked
 1407                  * by the process, ie. remove the signal from the
 1408                  * temporary signalmask only if it wasn't already
 1409                  * in p_sigmask.
 1410                  */
 1411                 if (!SIGISMEMBER(td->td_sigmask, nfs_sig_set[i]) &&
 1412                     !SIGISMEMBER(p->p_sigacts->ps_sigignore, nfs_sig_set[i]))
 1413                         SIGDELSET(newset, nfs_sig_set[i]);
 1414         }
 1415         mtx_unlock(&p->p_sigacts->ps_mtx);
 1416         PROC_UNLOCK(p);
 1417         kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 0);
 1418 }
 1419 
 1420 void
 1421 nfs_restore_sigmask(struct thread *td, sigset_t *set)
 1422 {
 1423         if (td == NULL)
 1424                 td = curthread; /* XXX */
 1425         kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
 1426 }
 1427 
 1428 /*
 1429  * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
 1430  * old one after msleep() returns.
 1431  */
 1432 int
 1433 nfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
 1434 {
 1435         sigset_t oldset;
 1436         int error;
 1437         struct proc *p;
 1438         
 1439         if ((priority & PCATCH) == 0)
 1440                 return msleep(ident, mtx, priority, wmesg, timo);
 1441         if (td == NULL)
 1442                 td = curthread; /* XXX */
 1443         nfs_set_sigmask(td, &oldset);
 1444         error = msleep(ident, mtx, priority, wmesg, timo);
 1445         nfs_restore_sigmask(td, &oldset);
 1446         p = td->td_proc;
 1447         return (error);
 1448 }
 1449 
 1450 /*
 1451  * NFS wrapper to tsleep(), that shoves a new p_sigmask and restores the
 1452  * old one after tsleep() returns.
 1453  */
 1454 int
 1455 nfs_tsleep(struct thread *td, void *ident, int priority, char *wmesg, int timo)
 1456 {
 1457         sigset_t oldset;
 1458         int error;
 1459         struct proc *p;
 1460         
 1461         if ((priority & PCATCH) == 0)
 1462                 return tsleep(ident, priority, wmesg, timo);
 1463         if (td == NULL)
 1464                 td = curthread; /* XXX */
 1465         nfs_set_sigmask(td, &oldset);
 1466         error = tsleep(ident, priority, wmesg, timo);
 1467         nfs_restore_sigmask(td, &oldset);
 1468         p = td->td_proc;
 1469         return (error);
 1470 }
 1471 
 1472 /*
 1473  * Test for a termination condition pending on the process.
 1474  * This is used for NFSMNT_INT mounts.
 1475  */
 1476 int
 1477 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
 1478 {
 1479         struct proc *p;
 1480         sigset_t tmpset;
 1481 
 1482         if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
 1483                 return nfs4_sigintr(nmp, rep, td);
 1484         if (rep && (rep->r_flags & R_SOFTTERM))
 1485                 return (EIO);
 1486         /* Terminate all requests while attempting a forced unmount. */
 1487         if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
 1488                 return (EIO);
 1489         if (!(nmp->nm_flag & NFSMNT_INT))
 1490                 return (0);
 1491         if (td == NULL)
 1492                 return (0);
 1493 
 1494         p = td->td_proc;
 1495         PROC_LOCK(p);
 1496         tmpset = p->p_siglist;
 1497         SIGSETNAND(tmpset, td->td_sigmask);
 1498         mtx_lock(&p->p_sigacts->ps_mtx);
 1499         SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
 1500         mtx_unlock(&p->p_sigacts->ps_mtx);
 1501         if (SIGNOTEMPTY(p->p_siglist) && nfs_sig_pending(tmpset)) {
 1502                 PROC_UNLOCK(p);
 1503                 return (EINTR);
 1504         }
 1505         PROC_UNLOCK(p);
 1506 
 1507         return (0);
 1508 }
 1509 
 1510 /*
 1511  * Lock a socket against others.
 1512  * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
 1513  * and also to avoid race conditions between the processes with nfs requests
 1514  * in progress when a reconnect is necessary.
 1515  */
 1516 int
 1517 nfs_sndlock(struct nfsreq *rep)
 1518 {
 1519         int *statep = &rep->r_nmp->nm_state;
 1520         struct thread *td;
 1521         int error, slpflag = 0, slptimeo = 0;
 1522 
 1523         td = rep->r_td;
 1524         if (rep->r_nmp->nm_flag & NFSMNT_INT)
 1525                 slpflag = PCATCH;
 1526         while (*statep & NFSSTA_SNDLOCK) {
 1527                 error = nfs_sigintr(rep->r_nmp, rep, td);
 1528                 if (error)
 1529                         return (error);
 1530                 *statep |= NFSSTA_WANTSND;
 1531                 (void) tsleep(statep, slpflag | (PZERO - 1),
 1532                         "nfsndlck", slptimeo);
 1533                 if (slpflag == PCATCH) {
 1534                         slpflag = 0;
 1535                         slptimeo = 2 * hz;
 1536                 }
 1537         }
 1538         *statep |= NFSSTA_SNDLOCK;
 1539         return (0);
 1540 }
 1541 
 1542 /*
 1543  * Unlock the stream socket for others.
 1544  */
 1545 void
 1546 nfs_sndunlock(struct nfsreq *rep)
 1547 {
 1548         int *statep = &rep->r_nmp->nm_state;
 1549 
 1550         if ((*statep & NFSSTA_SNDLOCK) == 0)
 1551                 panic("nfs sndunlock");
 1552         *statep &= ~NFSSTA_SNDLOCK;
 1553         if (*statep & NFSSTA_WANTSND) {
 1554                 *statep &= ~NFSSTA_WANTSND;
 1555                 wakeup(statep);
 1556         }
 1557 }
 1558 
 1559 /*
 1560  *      nfs_realign:
 1561  *
 1562  *      Check for badly aligned mbuf data and realign by copying the unaligned
 1563  *      portion of the data into a new mbuf chain and freeing the portions
 1564  *      of the old chain that were replaced.
 1565  *
 1566  *      We cannot simply realign the data within the existing mbuf chain
 1567  *      because the underlying buffers may contain other rpc commands and
 1568  *      we cannot afford to overwrite them.
 1569  *
 1570  *      We would prefer to avoid this situation entirely.  The situation does
 1571  *      not occur with NFS/UDP and is supposed to only occassionally occur
 1572  *      with TCP.  Use vfs.nfs.realign_count and realign_test to check this.
 1573  *
 1574  */
 1575 static int
 1576 nfs_realign(struct mbuf **pm, int hsiz)
 1577 {
 1578         struct mbuf *m;
 1579         struct mbuf *n = NULL;
 1580         int off = 0;
 1581 
 1582         ++nfs_realign_test;
 1583         while ((m = *pm) != NULL) {
 1584                 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
 1585                         MGET(n, M_DONTWAIT, MT_DATA);
 1586                         if (n == NULL)
 1587                                 return (ENOMEM);
 1588                         if (m->m_len >= MINCLSIZE) {
 1589                                 MCLGET(n, M_DONTWAIT);
 1590                                 if (n->m_ext.ext_buf == NULL) {
 1591                                         m_freem(n);
 1592                                         return (ENOMEM);
 1593                                 }
 1594                         }
 1595                         n->m_len = 0;
 1596                         break;
 1597                 }
 1598                 pm = &m->m_next;
 1599         }
 1600         /*
 1601          * If n is non-NULL, loop on m copying data, then replace the
 1602          * portion of the chain that had to be realigned.
 1603          */
 1604         if (n != NULL) {
 1605                 ++nfs_realign_count;
 1606                 while (m) {
 1607                         m_copyback(n, off, m->m_len, mtod(m, caddr_t));
 1608                         off += m->m_len;
 1609                         m = m->m_next;
 1610                 }
 1611                 m_freem(*pm);
 1612                 *pm = n;
 1613         }
 1614         return (0);
 1615 }
 1616 
 1617 
 1618 static int
 1619 nfs_msg(struct thread *td, const char *server, const char *msg, int error)
 1620 {
 1621         struct proc *p;
 1622 
 1623         GIANT_REQUIRED; /* tprintf */
 1624 
 1625         p = td ? td->td_proc : NULL;
 1626         if (error) {
 1627                 tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n", server,
 1628                     msg, error);
 1629         } else {
 1630                 tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg);
 1631         }
 1632         return (0);
 1633 }
 1634 
 1635 void
 1636 nfs_down(rep, nmp, td, msg, error, flags)
 1637         struct nfsreq *rep;
 1638         struct nfsmount *nmp;
 1639         struct thread *td;
 1640         const char *msg;
 1641         int error, flags;
 1642 {
 1643 
 1644         GIANT_REQUIRED; /* nfs_msg */
 1645 
 1646         if (nmp == NULL)
 1647                 return;
 1648         if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
 1649                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1650                     VQ_NOTRESP, 0);
 1651                 nmp->nm_state |= NFSSTA_TIMEO;
 1652         }
 1653 #ifdef NFSSTA_LOCKTIMEO
 1654         if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
 1655                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1656                     VQ_NOTRESPLOCK, 0);
 1657                 nmp->nm_state |= NFSSTA_LOCKTIMEO;
 1658         }
 1659 #endif
 1660         if (rep)
 1661                 rep->r_flags |= R_TPRINTFMSG;
 1662         nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
 1663 }
 1664 
 1665 void
 1666 nfs_up(rep, nmp, td, msg, flags)
 1667         struct nfsreq *rep;
 1668         struct nfsmount *nmp;
 1669         struct thread *td;
 1670         const char *msg;
 1671         int flags;
 1672 {
 1673 
 1674         GIANT_REQUIRED; /* nfs_msg */
 1675 
 1676         if (nmp == NULL)
 1677                 return;
 1678         if ((rep == NULL) || (rep->r_flags & R_TPRINTFMSG) != 0)
 1679                 nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
 1680         if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
 1681                 nmp->nm_state &= ~NFSSTA_TIMEO;
 1682                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1683                     VQ_NOTRESP, 1);
 1684         }
 1685 #ifdef NFSSTA_LOCKTIMEO
 1686         if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
 1687                 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
 1688                 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
 1689                     VQ_NOTRESPLOCK, 1);
 1690         }
 1691 #endif
 1692 }
 1693 

Cache object: adf68a1ebf0e99ecb3df8a0af6b307db


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.