1 /*-
2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/6.3/sys/nfsclient/nfs_socket.c 173680 2007-11-16 21:24:54Z ups $");
37
38 /*
39 * Socket operations for use by nfs
40 */
41
42 #include "opt_inet6.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/mount.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/protosw.h>
54 #include <sys/signalvar.h>
55 #include <sys/syscallsubr.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/sysctl.h>
59 #include <sys/syslog.h>
60 #include <sys/vnode.h>
61
62 #include <netinet/in.h>
63 #include <netinet/tcp.h>
64
65 #include <rpc/rpcclnt.h>
66
67 #include <nfs/rpcv2.h>
68 #include <nfs/nfsproto.h>
69 #include <nfsclient/nfs.h>
70 #include <nfs/xdr_subs.h>
71 #include <nfsclient/nfsm_subs.h>
72 #include <nfsclient/nfsmount.h>
73 #include <nfsclient/nfsnode.h>
74
75 #include <nfs4client/nfs4.h>
76
77 #define TRUE 1
78 #define FALSE 0
79
80 extern u_int32_t nfs_xid;
81
82 static int nfs_realign_test;
83 static int nfs_realign_count;
84 static int nfs_bufpackets = 4;
85 static int nfs_reconnects;
86 static int nfs3_jukebox_delay = 10;
87 static int nfs_skip_wcc_data_onerr = 1;
88
89 SYSCTL_DECL(_vfs_nfs);
90
91 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
92 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
93 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
94 SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
95 "number of times the nfs client has had to reconnect");
96 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
97 "number of seconds to delay a retry after receiving EJUKEBOX");
98 SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0, "");
99
100 /*
101 * There is a congestion window for outstanding rpcs maintained per mount
102 * point. The cwnd size is adjusted in roughly the way that:
103 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
104 * SIGCOMM '88". ACM, August 1988.
105 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
106 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
107 * of rpcs is in progress.
108 * (The sent count and cwnd are scaled for integer arith.)
109 * Variants of "slow start" were tried and were found to be too much of a
110 * performance hit (ave. rtt 3 times larger),
111 * I suspect due to the large rtt that nfs rpcs have.
112 */
113 #define NFS_CWNDSCALE 256
114 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
115 #define NFS_NBACKOFF 8
116 static int nfs_backoff[NFS_NBACKOFF] = { 2, 4, 8, 16, 32, 64, 128, 256, };
117 struct callout nfs_callout;
118
119 static int nfs_msg(struct thread *, const char *, const char *, int);
120 static int nfs_realign(struct mbuf **pm, int hsiz);
121 static int nfs_reply(struct nfsreq *);
122 static void nfs_softterm(struct nfsreq *rep);
123 static int nfs_reconnect(struct nfsreq *rep);
124 static void nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag);
125 static void nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag);
126 static void wakeup_nfsreq(struct nfsreq *req);
127
128 extern struct mtx nfs_reqq_mtx;
129 extern struct mtx nfs_reply_mtx;
130
131 /*
132 * RTT estimator
133 */
134
135 static enum nfs_rto_timer_t nfs_proct[NFS_NPROCS] = {
136 NFS_DEFAULT_TIMER, /* NULL */
137 NFS_GETATTR_TIMER, /* GETATTR */
138 NFS_DEFAULT_TIMER, /* SETATTR */
139 NFS_LOOKUP_TIMER, /* LOOKUP */
140 NFS_GETATTR_TIMER, /* ACCESS */
141 NFS_READ_TIMER, /* READLINK */
142 NFS_READ_TIMER, /* READ */
143 NFS_WRITE_TIMER, /* WRITE */
144 NFS_DEFAULT_TIMER, /* CREATE */
145 NFS_DEFAULT_TIMER, /* MKDIR */
146 NFS_DEFAULT_TIMER, /* SYMLINK */
147 NFS_DEFAULT_TIMER, /* MKNOD */
148 NFS_DEFAULT_TIMER, /* REMOVE */
149 NFS_DEFAULT_TIMER, /* RMDIR */
150 NFS_DEFAULT_TIMER, /* RENAME */
151 NFS_DEFAULT_TIMER, /* LINK */
152 NFS_READ_TIMER, /* READDIR */
153 NFS_READ_TIMER, /* READDIRPLUS */
154 NFS_DEFAULT_TIMER, /* FSSTAT */
155 NFS_DEFAULT_TIMER, /* FSINFO */
156 NFS_DEFAULT_TIMER, /* PATHCONF */
157 NFS_DEFAULT_TIMER, /* COMMIT */
158 NFS_DEFAULT_TIMER, /* NOOP */
159 };
160
161 /*
162 * Choose the correct RTT timer for this NFS procedure.
163 */
164 static inline enum nfs_rto_timer_t
165 nfs_rto_timer(u_int32_t procnum)
166 {
167 return nfs_proct[procnum];
168 }
169
170 /*
171 * Initialize the RTT estimator state for a new mount point.
172 */
173 static void
174 nfs_init_rtt(struct nfsmount *nmp)
175 {
176 int i;
177
178 for (i = 0; i < NFS_MAX_TIMER; i++)
179 nmp->nm_srtt[i] = NFS_INITRTT;
180 for (i = 0; i < NFS_MAX_TIMER; i++)
181 nmp->nm_sdrtt[i] = 0;
182 }
183
184 /*
185 * Update a mount point's RTT estimator state using data from the
186 * passed-in request.
187 *
188 * Use a gain of 0.125 on the mean and a gain of 0.25 on the deviation.
189 *
190 * NB: Since the timer resolution of NFS_HZ is so course, it can often
191 * result in r_rtt == 0. Since r_rtt == N means that the actual RTT is
192 * between N + dt and N + 2 - dt ticks, add 1 before calculating the
193 * update values.
194 */
195 static void
196 nfs_update_rtt(struct nfsreq *rep)
197 {
198 int t1 = rep->r_rtt + 1;
199 int index = nfs_rto_timer(rep->r_procnum) - 1;
200 int *srtt = &rep->r_nmp->nm_srtt[index];
201 int *sdrtt = &rep->r_nmp->nm_sdrtt[index];
202
203 t1 -= *srtt >> 3;
204 *srtt += t1;
205 if (t1 < 0)
206 t1 = -t1;
207 t1 -= *sdrtt >> 2;
208 *sdrtt += t1;
209 }
210
211 /*
212 * Estimate RTO for an NFS RPC sent via an unreliable datagram.
213 *
214 * Use the mean and mean deviation of RTT for the appropriate type
215 * of RPC for the frequent RPCs and a default for the others.
216 * The justification for doing "other" this way is that these RPCs
217 * happen so infrequently that timer est. would probably be stale.
218 * Also, since many of these RPCs are non-idempotent, a conservative
219 * timeout is desired.
220 *
221 * getattr, lookup - A+2D
222 * read, write - A+4D
223 * other - nm_timeo
224 */
225 static int
226 nfs_estimate_rto(struct nfsmount *nmp, u_int32_t procnum)
227 {
228 enum nfs_rto_timer_t timer = nfs_rto_timer(procnum);
229 int index = timer - 1;
230 int rto;
231
232 switch (timer) {
233 case NFS_GETATTR_TIMER:
234 case NFS_LOOKUP_TIMER:
235 rto = ((nmp->nm_srtt[index] + 3) >> 2) +
236 ((nmp->nm_sdrtt[index] + 1) >> 1);
237 break;
238 case NFS_READ_TIMER:
239 case NFS_WRITE_TIMER:
240 rto = ((nmp->nm_srtt[index] + 7) >> 3) +
241 (nmp->nm_sdrtt[index] + 1);
242 break;
243 default:
244 rto = nmp->nm_timeo;
245 return (rto);
246 }
247
248 if (rto < NFS_MINRTO)
249 rto = NFS_MINRTO;
250 else if (rto > NFS_MAXRTO)
251 rto = NFS_MAXRTO;
252
253 return (rto);
254 }
255
256
257 /*
258 * Initialize sockets and congestion for a new NFS connection.
259 * We do not free the sockaddr if error.
260 */
261 int
262 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
263 {
264 struct socket *so;
265 int error, rcvreserve, sndreserve;
266 int pktscale;
267 struct sockaddr *saddr;
268 struct thread *td = &thread0; /* only used for socreate and sobind */
269
270 NET_ASSERT_GIANT();
271
272 if (nmp->nm_sotype == SOCK_STREAM) {
273 mtx_lock(&nmp->nm_nfstcpstate.mtx);
274 nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
275 nmp->nm_nfstcpstate.rpcresid = 0;
276 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
277 }
278 nmp->nm_so = NULL;
279 saddr = nmp->nm_nam;
280 error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
281 nmp->nm_soproto, nmp->nm_mountp->mnt_cred, td);
282 if (error)
283 goto bad;
284 so = nmp->nm_so;
285 nmp->nm_soflags = so->so_proto->pr_flags;
286
287 /*
288 * Some servers require that the client port be a reserved port number.
289 */
290 if (nmp->nm_flag & NFSMNT_RESVPORT) {
291 struct sockopt sopt;
292 int ip, ip2, len;
293 struct sockaddr_in6 ssin;
294 struct sockaddr *sa;
295
296 bzero(&sopt, sizeof sopt);
297 switch(saddr->sa_family) {
298 case AF_INET:
299 sopt.sopt_level = IPPROTO_IP;
300 sopt.sopt_name = IP_PORTRANGE;
301 ip = IP_PORTRANGE_LOW;
302 ip2 = IP_PORTRANGE_DEFAULT;
303 len = sizeof (struct sockaddr_in);
304 break;
305 #ifdef INET6
306 case AF_INET6:
307 sopt.sopt_level = IPPROTO_IPV6;
308 sopt.sopt_name = IPV6_PORTRANGE;
309 ip = IPV6_PORTRANGE_LOW;
310 ip2 = IPV6_PORTRANGE_DEFAULT;
311 len = sizeof (struct sockaddr_in6);
312 break;
313 #endif
314 default:
315 goto noresvport;
316 }
317 sa = (struct sockaddr *)&ssin;
318 bzero(sa, len);
319 sa->sa_len = len;
320 sa->sa_family = saddr->sa_family;
321 sopt.sopt_dir = SOPT_SET;
322 sopt.sopt_val = (void *)&ip;
323 sopt.sopt_valsize = sizeof(ip);
324 error = sosetopt(so, &sopt);
325 if (error)
326 goto bad;
327 error = sobind(so, sa, td);
328 if (error)
329 goto bad;
330 ip = ip2;
331 error = sosetopt(so, &sopt);
332 if (error)
333 goto bad;
334 noresvport: ;
335 }
336
337 /*
338 * Protocols that do not require connections may be optionally left
339 * unconnected for servers that reply from a port other than NFS_PORT.
340 */
341 if (nmp->nm_flag & NFSMNT_NOCONN) {
342 if (nmp->nm_soflags & PR_CONNREQUIRED) {
343 error = ENOTCONN;
344 goto bad;
345 }
346 } else {
347 error = soconnect(so, nmp->nm_nam, td);
348 if (error)
349 goto bad;
350
351 /*
352 * Wait for the connection to complete. Cribbed from the
353 * connect system call but with the wait timing out so
354 * that interruptible mounts don't hang here for a long time.
355 */
356 SOCK_LOCK(so);
357 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
358 (void) msleep(&so->so_timeo, SOCK_MTX(so),
359 PSOCK, "nfscon", 2 * hz);
360 if ((so->so_state & SS_ISCONNECTING) &&
361 so->so_error == 0 && rep &&
362 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0) {
363 so->so_state &= ~SS_ISCONNECTING;
364 SOCK_UNLOCK(so);
365 goto bad;
366 }
367 }
368 if (so->so_error) {
369 error = so->so_error;
370 so->so_error = 0;
371 SOCK_UNLOCK(so);
372 goto bad;
373 }
374 SOCK_UNLOCK(so);
375 }
376 so->so_rcv.sb_timeo = 12 * hz;
377 so->so_snd.sb_timeo = 5 * hz;
378
379 /*
380 * Get buffer reservation size from sysctl, but impose reasonable
381 * limits.
382 */
383 pktscale = nfs_bufpackets;
384 if (pktscale < 2)
385 pktscale = 2;
386 if (pktscale > 64)
387 pktscale = 64;
388
389 if (nmp->nm_sotype == SOCK_DGRAM) {
390 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
391 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
392 NFS_MAXPKTHDR) * pktscale;
393 } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
394 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
395 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
396 NFS_MAXPKTHDR) * pktscale;
397 } else {
398 if (nmp->nm_sotype != SOCK_STREAM)
399 panic("nfscon sotype");
400 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
401 struct sockopt sopt;
402 int val;
403
404 bzero(&sopt, sizeof sopt);
405 sopt.sopt_dir = SOPT_SET;
406 sopt.sopt_level = SOL_SOCKET;
407 sopt.sopt_name = SO_KEEPALIVE;
408 sopt.sopt_val = &val;
409 sopt.sopt_valsize = sizeof val;
410 val = 1;
411 sosetopt(so, &sopt);
412 }
413 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
414 struct sockopt sopt;
415 int val;
416
417 bzero(&sopt, sizeof sopt);
418 sopt.sopt_dir = SOPT_SET;
419 sopt.sopt_level = IPPROTO_TCP;
420 sopt.sopt_name = TCP_NODELAY;
421 sopt.sopt_val = &val;
422 sopt.sopt_valsize = sizeof val;
423 val = 1;
424 sosetopt(so, &sopt);
425 }
426 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
427 sizeof (u_int32_t)) * pktscale;
428 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
429 sizeof (u_int32_t)) * pktscale;
430 }
431 error = soreserve(so, sndreserve, rcvreserve);
432 if (error)
433 goto bad;
434 SOCKBUF_LOCK(&so->so_rcv);
435 so->so_rcv.sb_flags |= SB_NOINTR;
436 so->so_upcallarg = (caddr_t)nmp;
437 if (so->so_type == SOCK_STREAM)
438 so->so_upcall = nfs_clnt_tcp_soupcall;
439 else
440 so->so_upcall = nfs_clnt_udp_soupcall;
441 so->so_rcv.sb_flags |= SB_UPCALL;
442 SOCKBUF_UNLOCK(&so->so_rcv);
443 SOCKBUF_LOCK(&so->so_snd);
444 so->so_snd.sb_flags |= SB_NOINTR;
445 SOCKBUF_UNLOCK(&so->so_snd);
446
447 /* Initialize other non-zero congestion variables */
448 nfs_init_rtt(nmp);
449 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
450 nmp->nm_sent = 0;
451 nmp->nm_timeouts = 0;
452 return (0);
453
454 bad:
455 nfs_disconnect(nmp);
456 return (error);
457 }
458
459 /*
460 * Reconnect routine:
461 * Called when a connection is broken on a reliable protocol.
462 * - clean up the old socket
463 * - nfs_connect() again
464 * - set R_MUSTRESEND for all outstanding requests on mount point
465 * If this fails the mount point is DEAD!
466 * nb: Must be called with the nfs_sndlock() set on the mount point.
467 */
468 static int
469 nfs_reconnect(struct nfsreq *rep)
470 {
471 struct nfsreq *rp;
472 struct nfsmount *nmp = rep->r_nmp;
473 int error;
474
475 nfs_reconnects++;
476 nfs_disconnect(nmp);
477 while ((error = nfs_connect(nmp, rep)) != 0) {
478 if (error == ERESTART)
479 error = EINTR;
480 if (error == EIO || error == EINTR)
481 return (error);
482 (void) tsleep(&lbolt, PSOCK, "nfscon", 0);
483 }
484
485 /*
486 * Clear the FORCE_RECONNECT flag only after the connect
487 * succeeds. To prevent races between multiple processes
488 * waiting on the mountpoint where the connection is being
489 * torn down. The first one to acquire the sndlock will
490 * retry the connection. The others block on the sndlock
491 * until the connection is established successfully, and
492 * then re-transmit the request.
493 */
494 mtx_lock(&nmp->nm_nfstcpstate.mtx);
495 nmp->nm_nfstcpstate.flags &= ~NFS_TCP_FORCE_RECONNECT;
496 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
497
498 /*
499 * Loop through outstanding request list and fix up all requests
500 * on old socket.
501 */
502 mtx_lock(&nfs_reqq_mtx);
503 TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
504 if (rp->r_nmp == nmp)
505 rp->r_flags |= R_MUSTRESEND;
506 }
507 mtx_unlock(&nfs_reqq_mtx);
508 return (0);
509 }
510
511 /*
512 * NFS disconnect. Clean up and unlink.
513 */
514 void
515 nfs_disconnect(struct nfsmount *nmp)
516 {
517 struct socket *so;
518
519 NET_ASSERT_GIANT();
520
521 if (nmp->nm_so) {
522 so = nmp->nm_so;
523 nmp->nm_so = NULL;
524 SOCKBUF_LOCK(&so->so_rcv);
525 so->so_upcallarg = NULL;
526 so->so_upcall = NULL;
527 so->so_rcv.sb_flags &= ~SB_UPCALL;
528 SOCKBUF_UNLOCK(&so->so_rcv);
529 soshutdown(so, SHUT_WR);
530 soclose(so);
531 }
532 }
533
534 void
535 nfs_safedisconnect(struct nfsmount *nmp)
536 {
537 struct nfsreq dummyreq;
538
539 bzero(&dummyreq, sizeof(dummyreq));
540 dummyreq.r_nmp = nmp;
541 nfs_disconnect(nmp);
542 }
543
544 /*
545 * This is the nfs send routine. For connection based socket types, it
546 * must be called with an nfs_sndlock() on the socket.
547 * - return EINTR if the RPC is terminated, 0 otherwise
548 * - set R_MUSTRESEND if the send fails for any reason
549 * - do any cleanup required by recoverable socket errors (?)
550 */
551 int
552 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
553 struct nfsreq *rep)
554 {
555 struct sockaddr *sendnam;
556 int error, error2, soflags, flags;
557
558 NET_ASSERT_GIANT();
559
560 KASSERT(rep, ("nfs_send: called with rep == NULL"));
561
562 error = nfs_sigintr(rep->r_nmp, rep, rep->r_td);
563 if (error) {
564 m_freem(top);
565 return (error);
566 }
567 if ((so = rep->r_nmp->nm_so) == NULL) {
568 rep->r_flags |= R_MUSTRESEND;
569 m_freem(top);
570 return (0);
571 }
572 rep->r_flags &= ~R_MUSTRESEND;
573 soflags = rep->r_nmp->nm_soflags;
574
575 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
576 sendnam = NULL;
577 else
578 sendnam = nam;
579 if (so->so_type == SOCK_SEQPACKET)
580 flags = MSG_EOR;
581 else
582 flags = 0;
583
584 error = so->so_proto->pr_usrreqs->pru_sosend(so, sendnam, 0, top, 0,
585 flags, curthread /*XXX*/);
586 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
587 error = 0;
588 rep->r_flags |= R_MUSTRESEND;
589 }
590
591 if (error) {
592 /*
593 * Don't report EPIPE errors on nfs sockets.
594 * These can be due to idle tcp mounts which will be closed by
595 * netapp, solaris, etc. if left idle too long.
596 */
597 if (error != EPIPE) {
598 log(LOG_INFO, "nfs send error %d for server %s\n",
599 error,
600 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
601 }
602 /*
603 * Deal with errors for the client side.
604 */
605 error2 = NFS_SIGREP(rep);
606 if (error2)
607 error = error2;
608 else
609 rep->r_flags |= R_MUSTRESEND;
610
611 /*
612 * Handle any recoverable (soft) socket errors here. (?)
613 * Make EWOULDBLOCK a recoverable error, we'll rexmit from nfs_timer().
614 */
615 if (error != EINTR && error != ERESTART && error != EIO && error != EPIPE)
616 error = 0;
617 }
618 return (error);
619 }
620
621 int
622 nfs_reply(struct nfsreq *rep)
623 {
624 register struct socket *so;
625 register struct mbuf *m;
626 int error = 0, sotype, slpflag;
627
628 NET_ASSERT_GIANT();
629
630 sotype = rep->r_nmp->nm_sotype;
631 /*
632 * For reliable protocols, lock against other senders/receivers
633 * in case a reconnect is necessary.
634 */
635 if (sotype != SOCK_DGRAM) {
636 error = nfs_sndlock(rep);
637 if (error)
638 return (error);
639 tryagain:
640 if (rep->r_mrep) {
641 nfs_sndunlock(rep);
642 return (0);
643 }
644 if (rep->r_flags & R_SOFTTERM) {
645 nfs_sndunlock(rep);
646 return (EINTR);
647 }
648 so = rep->r_nmp->nm_so;
649 mtx_lock(&rep->r_nmp->nm_nfstcpstate.mtx);
650 if (!so ||
651 (rep->r_nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT)) {
652 mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
653 error = nfs_reconnect(rep);
654 if (error) {
655 nfs_sndunlock(rep);
656 return (error);
657 }
658 goto tryagain;
659 } else
660 mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
661 while (rep->r_flags & R_MUSTRESEND) {
662 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT);
663 nfsstats.rpcretries++;
664 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
665 if (error) {
666 if (error == EINTR || error == ERESTART ||
667 (error = nfs_reconnect(rep)) != 0) {
668 nfs_sndunlock(rep);
669 return (error);
670 }
671 goto tryagain;
672 }
673 }
674 nfs_sndunlock(rep);
675 }
676 slpflag = 0;
677 if (rep->r_nmp->nm_flag & NFSMNT_INT)
678 slpflag = PCATCH;
679 mtx_lock(&nfs_reply_mtx);
680 while ((rep->r_mrep == NULL) && (error == 0) &&
681 ((rep->r_flags & R_SOFTTERM) == 0) &&
682 ((sotype == SOCK_DGRAM) || ((rep->r_flags & R_MUSTRESEND) == 0)))
683 error = msleep((caddr_t)rep, &nfs_reply_mtx,
684 slpflag | (PZERO - 1), "nfsreq", 0);
685 mtx_unlock(&nfs_reply_mtx);
686 if (error == EINTR || error == ERESTART)
687 /* NFS operations aren't restartable. Map ERESTART to EINTR */
688 return (EINTR);
689 if (rep->r_flags & R_SOFTTERM)
690 /* Request was terminated because we exceeded the retries (soft mount) */
691 return (ETIMEDOUT);
692 if (sotype == SOCK_STREAM) {
693 mtx_lock(&rep->r_nmp->nm_nfstcpstate.mtx);
694 if (((rep->r_nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) ||
695 (rep->r_flags & R_MUSTRESEND))) {
696 mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
697 error = nfs_sndlock(rep);
698 if (error)
699 return (error);
700 goto tryagain;
701 } else
702 mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
703 }
704 return (error);
705 }
706
707 /*
708 * XXX TO DO
709 * Make nfs_realign() non-blocking. Also make nfsm_dissect() nonblocking.
710 */
711 static void
712 nfs_clnt_match_xid(struct socket *so,
713 struct nfsmount *nmp,
714 struct mbuf *mrep)
715 {
716 struct mbuf *md;
717 caddr_t dpos;
718 u_int32_t rxid, *tl;
719 struct nfsreq *rep;
720 int error;
721
722 /*
723 * Search for any mbufs that are not a multiple of 4 bytes long
724 * or with m_data not longword aligned.
725 * These could cause pointer alignment problems, so copy them to
726 * well aligned mbufs.
727 */
728 if (nfs_realign(&mrep, 5 * NFSX_UNSIGNED) == ENOMEM) {
729 m_freem(mrep);
730 nfsstats.rpcinvalid++;
731 return;
732 }
733
734 /*
735 * Get the xid and check that it is an rpc reply
736 */
737 md = mrep;
738 dpos = mtod(md, caddr_t);
739 tl = nfsm_dissect_nonblock(u_int32_t *, 2*NFSX_UNSIGNED);
740 rxid = *tl++;
741 if (*tl != rpc_reply) {
742 m_freem(mrep);
743 nfsmout:
744 nfsstats.rpcinvalid++;
745 return;
746 }
747
748 mtx_lock(&nfs_reqq_mtx);
749 /*
750 * Loop through the request list to match up the reply
751 * Iff no match, just drop the datagram
752 */
753 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
754 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
755 /* Found it.. */
756 rep->r_mrep = mrep;
757 rep->r_md = md;
758 rep->r_dpos = dpos;
759 /*
760 * Update congestion window.
761 * Do the additive increase of
762 * one rpc/rtt.
763 */
764 if (nmp->nm_cwnd <= nmp->nm_sent) {
765 nmp->nm_cwnd +=
766 (NFS_CWNDSCALE * NFS_CWNDSCALE +
767 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
768 if (nmp->nm_cwnd > NFS_MAXCWND)
769 nmp->nm_cwnd = NFS_MAXCWND;
770 }
771 if (rep->r_flags & R_SENT) {
772 rep->r_flags &= ~R_SENT;
773 nmp->nm_sent -= NFS_CWNDSCALE;
774 }
775 if (rep->r_flags & R_TIMING)
776 nfs_update_rtt(rep);
777 nmp->nm_timeouts = 0;
778 break;
779 }
780 }
781 /*
782 * If not matched to a request, drop it.
783 * If it's mine, wake up requestor.
784 */
785 if (rep == 0) {
786 nfsstats.rpcunexpected++;
787 m_freem(mrep);
788 } else
789 wakeup_nfsreq(rep);
790 mtx_unlock(&nfs_reqq_mtx);
791 }
792
793 /*
794 * The wakeup of the requestor should be done under the mutex
795 * to avoid potential missed wakeups.
796 */
797 static void
798 wakeup_nfsreq(struct nfsreq *req)
799 {
800 mtx_lock(&nfs_reply_mtx);
801 wakeup((caddr_t)req);
802 mtx_unlock(&nfs_reply_mtx);
803 }
804
805 static void
806 nfs_mark_for_reconnect(struct nfsmount *nmp)
807 {
808 struct nfsreq *rp;
809
810 mtx_lock(&nmp->nm_nfstcpstate.mtx);
811 nmp->nm_nfstcpstate.flags |= NFS_TCP_FORCE_RECONNECT;
812 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
813 /*
814 * Wakeup all processes that are waiting for replies
815 * on this mount point. One of them does the reconnect.
816 */
817 mtx_lock(&nfs_reqq_mtx);
818 TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
819 if (rp->r_nmp == nmp) {
820 rp->r_flags |= R_MUSTRESEND;
821 wakeup_nfsreq(rp);
822 }
823 }
824 mtx_unlock(&nfs_reqq_mtx);
825 }
826
827 static int
828 nfstcp_readable(struct socket *so, int bytes)
829 {
830 int retval;
831
832 SOCKBUF_LOCK(&so->so_rcv);
833 retval = (so->so_rcv.sb_cc >= (bytes) ||
834 (so->so_rcv.sb_state & SBS_CANTRCVMORE) ||
835 so->so_error);
836 SOCKBUF_UNLOCK(&so->so_rcv);
837 return (retval);
838 }
839
840 #define nfstcp_marker_readable(so) nfstcp_readable(so, sizeof(u_int32_t))
841
842 static int
843 nfs_copy_len(struct mbuf *mp, char *buf, int len)
844 {
845 while (len > 0 && mp != NULL) {
846 int copylen = min(len, mp->m_len);
847
848 bcopy(mp->m_data, buf, copylen);
849 buf += copylen;
850 len -= copylen;
851 mp = mp->m_next;
852 }
853 return (len);
854 }
855
856 static void
857 nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag)
858 {
859 struct nfsmount *nmp = (struct nfsmount *)arg;
860 struct mbuf *mp = NULL;
861 struct uio auio;
862 int error;
863 u_int32_t len;
864 int rcvflg;
865
866 /*
867 * Don't pick any more data from the socket if we've marked the
868 * mountpoint for reconnect.
869 */
870 mtx_lock(&nmp->nm_nfstcpstate.mtx);
871 if (nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) {
872 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
873 return;
874 } else
875 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
876 auio.uio_td = curthread;
877 auio.uio_segflg = UIO_SYSSPACE;
878 auio.uio_rw = UIO_READ;
879 for ( ; ; ) {
880 if (nmp->nm_nfstcpstate.flags & NFS_TCP_EXPECT_RPCMARKER) {
881 int resid;
882
883 if (!nfstcp_marker_readable(so)) {
884 /* Marker is not readable */
885 return;
886 }
887 auio.uio_resid = sizeof(u_int32_t);
888 auio.uio_iov = NULL;
889 auio.uio_iovcnt = 0;
890 mp = NULL;
891 rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
892 error = so->so_proto->pr_usrreqs->pru_soreceive
893 (so, (struct sockaddr **)0,
894 &auio, &mp, (struct mbuf **)0, &rcvflg);
895 /*
896 * We've already tested that the socket is readable. 2 cases
897 * here, we either read 0 bytes (client closed connection),
898 * or got some other error. In both cases, we tear down the
899 * connection.
900 */
901 if (error || auio.uio_resid > 0) {
902 if (error && error != ECONNRESET) {
903 log(LOG_ERR,
904 "nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
905 error);
906 }
907 goto mark_reconnect;
908 }
909 if (mp == NULL)
910 panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
911 /*
912 * Sigh. We can't do the obvious thing here (which would
913 * be to have soreceive copy the length from mbufs for us).
914 * Calling uiomove() from the context of a socket callback
915 * (even for kernel-kernel copies) leads to LORs (since
916 * we hold network locks at this point).
917 */
918 if ((resid = nfs_copy_len(mp, (char *)&len,
919 sizeof(u_int32_t)))) {
920 log(LOG_ERR, "%s (%d) from nfs server %s\n",
921 "Bad RPC HDR length",
922 (int)(sizeof(u_int32_t) - resid),
923 nmp->nm_mountp->mnt_stat.f_mntfromname);
924 goto mark_reconnect;
925 }
926 len = ntohl(len) & ~0x80000000;
927 m_freem(mp);
928 /*
929 * This is SERIOUS! We are out of sync with the sender
930 * and forcing a disconnect/reconnect is all I can do.
931 */
932 if (len > NFS_MAXPACKET || len == 0) {
933 log(LOG_ERR, "%s (%d) from nfs server %s\n",
934 "impossible packet length",
935 len,
936 nmp->nm_mountp->mnt_stat.f_mntfromname);
937 goto mark_reconnect;
938 }
939 nmp->nm_nfstcpstate.rpcresid = len;
940 nmp->nm_nfstcpstate.flags &= ~(NFS_TCP_EXPECT_RPCMARKER);
941 }
942 /*
943 * Processed RPC marker or no RPC marker to process.
944 * Pull in and process data.
945 */
946 if (nmp->nm_nfstcpstate.rpcresid > 0) {
947 if (!nfstcp_readable(so, nmp->nm_nfstcpstate.rpcresid)) {
948 /* All data not readable */
949 return;
950 }
951 auio.uio_resid = nmp->nm_nfstcpstate.rpcresid;
952 auio.uio_iov = NULL;
953 auio.uio_iovcnt = 0;
954 mp = NULL;
955 rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
956 error = so->so_proto->pr_usrreqs->pru_soreceive
957 (so, (struct sockaddr **)0,
958 &auio, &mp, (struct mbuf **)0, &rcvflg);
959 if (error || auio.uio_resid > 0) {
960 if (error && error != ECONNRESET) {
961 log(LOG_ERR,
962 "nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
963 error);
964 }
965 goto mark_reconnect;
966 }
967 if (mp == NULL)
968 panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
969 nmp->nm_nfstcpstate.rpcresid = 0;
970 nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
971 /* We got the entire RPC reply. Match XIDs and wake up requestor */
972 nfs_clnt_match_xid(so, nmp, mp);
973 }
974 }
975
976 mark_reconnect:
977 nfs_mark_for_reconnect(nmp);
978 }
979
980 static void
981 nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag)
982 {
983 struct nfsmount *nmp = (struct nfsmount *)arg;
984 struct uio auio;
985 struct mbuf *mp = NULL;
986 struct mbuf *control = NULL;
987 int error, rcvflag;
988
989 auio.uio_resid = 1000000;
990 auio.uio_td = curthread;
991 rcvflag = MSG_DONTWAIT;
992 auio.uio_resid = 1000000000;
993 do {
994 mp = control = NULL;
995 error = so->so_proto->pr_usrreqs->pru_soreceive(so,
996 NULL, &auio, &mp,
997 &control, &rcvflag);
998 if (control)
999 m_freem(control);
1000 if (mp)
1001 nfs_clnt_match_xid(so, nmp, mp);
1002 } while (mp && !error);
1003 }
1004
1005 /*
1006 * nfs_request - goes something like this
1007 * - fill in request struct
1008 * - links it into list
1009 * - calls nfs_send() for first transmit
1010 * - calls nfs_receive() to get reply
1011 * - break down rpc header and return with nfs reply pointed to
1012 * by mrep or error
1013 * nb: always frees up mreq mbuf list
1014 */
1015 int
1016 nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
1017 struct thread *td, struct ucred *cred, struct mbuf **mrp,
1018 struct mbuf **mdp, caddr_t *dposp)
1019 {
1020 struct mbuf *mrep, *m2;
1021 struct nfsreq *rep;
1022 u_int32_t *tl;
1023 int i;
1024 struct nfsmount *nmp;
1025 struct mbuf *m, *md, *mheadend;
1026 time_t waituntil;
1027 caddr_t dpos;
1028 int s, error = 0, mrest_len, auth_len, auth_type;
1029 struct timeval now;
1030 u_int32_t *xidp;
1031
1032 /* Reject requests while attempting a forced unmount. */
1033 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
1034 m_freem(mrest);
1035 return (ESTALE);
1036 }
1037 nmp = VFSTONFS(vp->v_mount);
1038 if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
1039 return nfs4_request(vp, mrest, procnum, td, cred, mrp, mdp, dposp);
1040 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
1041 rep->r_mrep = rep->r_md = NULL;
1042 rep->r_nmp = nmp;
1043 rep->r_vp = vp;
1044 rep->r_td = td;
1045 rep->r_procnum = procnum;
1046
1047 getmicrouptime(&now);
1048 rep->r_lastmsg = now.tv_sec -
1049 ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
1050 mrest_len = m_length(mrest, NULL);
1051
1052 /*
1053 * Get the RPC header with authorization.
1054 */
1055 auth_type = RPCAUTH_UNIX;
1056 if (cred->cr_ngroups < 1)
1057 panic("nfsreq nogrps");
1058 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
1059 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
1060 5 * NFSX_UNSIGNED;
1061 m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len,
1062 mrest, mrest_len, &mheadend, &xidp);
1063
1064 /*
1065 * For stream protocols, insert a Sun RPC Record Mark.
1066 */
1067 if (nmp->nm_sotype == SOCK_STREAM) {
1068 M_PREPEND(m, NFSX_UNSIGNED, M_TRYWAIT);
1069 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1070 (m->m_pkthdr.len - NFSX_UNSIGNED));
1071 }
1072 rep->r_mreq = m;
1073 rep->r_xid = *xidp;
1074 tryagain:
1075 if (nmp->nm_flag & NFSMNT_SOFT)
1076 rep->r_retry = nmp->nm_retry;
1077 else
1078 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
1079 rep->r_rtt = rep->r_rexmit = 0;
1080 if (nfs_rto_timer(procnum) != NFS_DEFAULT_TIMER)
1081 rep->r_flags = R_TIMING;
1082 else
1083 rep->r_flags = 0;
1084 rep->r_mrep = NULL;
1085
1086 /*
1087 * Do the client side RPC.
1088 */
1089 nfsstats.rpcrequests++;
1090 /*
1091 * Chain request into list of outstanding requests. Be sure
1092 * to put it LAST so timer finds oldest requests first.
1093 */
1094 s = splsoftclock();
1095 mtx_lock(&nfs_reqq_mtx);
1096 if (TAILQ_EMPTY(&nfs_reqq))
1097 callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
1098 TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain);
1099 mtx_unlock(&nfs_reqq_mtx);
1100
1101 /*
1102 * If backing off another request or avoiding congestion, don't
1103 * send this one now but let timer do it. If not timing a request,
1104 * do it now.
1105 */
1106 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM ||
1107 (nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1108 nmp->nm_sent < nmp->nm_cwnd)) {
1109 splx(s);
1110 error = nfs_sndlock(rep);
1111 if (!error) {
1112 m2 = m_copym(m, 0, M_COPYALL, M_TRYWAIT);
1113 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1114 nfs_sndunlock(rep);
1115 }
1116 mtx_lock(&nfs_reqq_mtx);
1117 /*
1118 * nfs_timer() could've re-transmitted the request if we ended up
1119 * blocking on nfs_send() too long, so check for R_SENT here.
1120 */
1121 if (!error && (rep->r_flags & (R_SENT | R_MUSTRESEND)) == 0) {
1122 nmp->nm_sent += NFS_CWNDSCALE;
1123 rep->r_flags |= R_SENT;
1124 }
1125 mtx_unlock(&nfs_reqq_mtx);
1126 } else {
1127 splx(s);
1128 rep->r_rtt = -1;
1129 }
1130
1131 /*
1132 * Wait for the reply from our send or the timer's.
1133 */
1134 if (!error || error == EPIPE)
1135 error = nfs_reply(rep);
1136
1137 /*
1138 * RPC done, unlink the request.
1139 */
1140 s = splsoftclock();
1141 mtx_lock(&nfs_reqq_mtx);
1142 TAILQ_REMOVE(&nfs_reqq, rep, r_chain);
1143 if (TAILQ_EMPTY(&nfs_reqq))
1144 callout_stop(&nfs_callout);
1145 /*
1146 * Decrement the outstanding request count.
1147 */
1148 if (rep->r_flags & R_SENT) {
1149 rep->r_flags &= ~R_SENT; /* paranoia */
1150 nmp->nm_sent -= NFS_CWNDSCALE;
1151 }
1152 mtx_unlock(&nfs_reqq_mtx);
1153 splx(s);
1154
1155 /*
1156 * If there was a successful reply and a tprintf msg.
1157 * tprintf a response.
1158 */
1159 if (!error) {
1160 mtx_lock(&Giant);
1161 nfs_up(rep, nmp, rep->r_td, "is alive again", NFSSTA_TIMEO);
1162 mtx_unlock(&Giant);
1163 }
1164 mrep = rep->r_mrep;
1165 md = rep->r_md;
1166 dpos = rep->r_dpos;
1167 if (error) {
1168 /*
1169 * If we got interrupted by a signal in nfs_reply(), there's
1170 * a very small window where the reply could've come in before
1171 * this process got scheduled in. To handle that case, we need
1172 * to free the reply if it was delivered.
1173 */
1174 if (rep->r_mrep != NULL)
1175 m_freem(rep->r_mrep);
1176 m_freem(rep->r_mreq);
1177 free((caddr_t)rep, M_NFSREQ);
1178 return (error);
1179 }
1180
1181 if (rep->r_mrep == NULL)
1182 panic("nfs_request: rep->r_mrep shouldn't be NULL if no error\n");
1183
1184 /*
1185 * break down the rpc header and check if ok
1186 */
1187 tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
1188 if (*tl++ == rpc_msgdenied) {
1189 if (*tl == rpc_mismatch)
1190 error = EOPNOTSUPP;
1191 else
1192 error = EACCES;
1193 m_freem(mrep);
1194 m_freem(rep->r_mreq);
1195 free((caddr_t)rep, M_NFSREQ);
1196 return (error);
1197 }
1198
1199 /*
1200 * Just throw away any verifyer (ie: kerberos etc).
1201 */
1202 i = fxdr_unsigned(int, *tl++); /* verf type */
1203 i = fxdr_unsigned(int32_t, *tl); /* len */
1204 if (i > 0)
1205 nfsm_adv(nfsm_rndup(i));
1206 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
1207 /* 0 == ok */
1208 if (*tl == 0) {
1209 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
1210 if (*tl != 0) {
1211 error = fxdr_unsigned(int, *tl);
1212 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1213 error == NFSERR_TRYLATER) {
1214 m_freem(mrep);
1215 error = 0;
1216 waituntil = time_second + nfs3_jukebox_delay;
1217 while (time_second < waituntil)
1218 (void) tsleep(&lbolt,
1219 PSOCK, "nqnfstry", 0);
1220 if (++nfs_xid == 0)
1221 nfs_xid++;
1222 rep->r_xid = *xidp = txdr_unsigned(nfs_xid);
1223 goto tryagain;
1224 }
1225
1226 /*
1227 * If the File Handle was stale, invalidate the
1228 * lookup cache, just in case.
1229 */
1230 if (error == ESTALE)
1231 cache_purge(vp);
1232 /*
1233 * Skip wcc data on NFS errors for now. NetApp filers return corrupt
1234 * postop attrs in the wcc data for NFS err EROFS. Not sure if they
1235 * could return corrupt postop attrs for others errors.
1236 */
1237 if ((nmp->nm_flag & NFSMNT_NFSV3) && !nfs_skip_wcc_data_onerr) {
1238 *mrp = mrep;
1239 *mdp = md;
1240 *dposp = dpos;
1241 error |= NFSERR_RETERR;
1242 } else
1243 m_freem(mrep);
1244 m_freem(rep->r_mreq);
1245 free((caddr_t)rep, M_NFSREQ);
1246 return (error);
1247 }
1248
1249 *mrp = mrep;
1250 *mdp = md;
1251 *dposp = dpos;
1252 m_freem(rep->r_mreq);
1253 FREE((caddr_t)rep, M_NFSREQ);
1254 return (0);
1255 }
1256 m_freem(mrep);
1257 error = EPROTONOSUPPORT;
1258 nfsmout:
1259 m_freem(rep->r_mreq);
1260 free((caddr_t)rep, M_NFSREQ);
1261 return (error);
1262 }
1263
1264 /*
1265 * Nfs timer routine
1266 * Scan the nfsreq list and retranmit any requests that have timed out
1267 * To avoid retransmission attempts on STREAM sockets (in the future) make
1268 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1269 *
1270 * XXX -
1271 * For now, since we don't register MPSAFE callouts for the NFS client -
1272 * softclock() acquires Giant before calling us. That prevents req entries
1273 * from being removed from the list (from nfs_request()). But we still
1274 * acquire the nfs reqq mutex to make sure the state of individual req
1275 * entries is not modified from RPC reply handling (from socket callback)
1276 * while nfs_timer is walking the list of reqs.
1277 * The nfs reqq lock cannot be held while we do the pru_send() because of a
1278 * lock ordering violation. The NFS client socket callback acquires
1279 * inp_lock->nfsreq mutex and pru_send acquires inp_lock. So we drop the
1280 * reqq mutex (and reacquire it after the pru_send()). This won't work
1281 * when we move to fine grained locking for NFS. When we get to that point,
1282 * a rewrite of nfs_timer() will be needed.
1283 */
1284 void
1285 nfs_timer(void *arg)
1286 {
1287 struct nfsreq *rep;
1288 struct mbuf *m;
1289 struct socket *so;
1290 struct nfsmount *nmp;
1291 int timeo;
1292 int s, error;
1293 struct timeval now;
1294
1295 getmicrouptime(&now);
1296 s = splnet();
1297 mtx_lock(&Giant); /* nfs_down -> tprintf */
1298 mtx_lock(&nfs_reqq_mtx);
1299 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
1300 nmp = rep->r_nmp;
1301 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
1302 continue;
1303 if (nfs_sigintr(nmp, rep, rep->r_td))
1304 continue;
1305 if (nmp->nm_tprintf_initial_delay != 0 &&
1306 (rep->r_rexmit > 2 || (rep->r_flags & R_RESENDERR)) &&
1307 rep->r_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) {
1308 rep->r_lastmsg = now.tv_sec;
1309 nfs_down(rep, nmp, rep->r_td, "not responding",
1310 0, NFSSTA_TIMEO);
1311 #if 0
1312 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
1313 /* we're not yet completely mounted and */
1314 /* we can't complete an RPC, so we fail */
1315 nfsstats.rpctimeouts++;
1316 nfs_softterm(rep);
1317 continue;
1318 }
1319 #endif
1320 }
1321 if (rep->r_rtt >= 0) {
1322 rep->r_rtt++;
1323 if (nmp->nm_flag & NFSMNT_DUMBTIMR)
1324 timeo = nmp->nm_timeo;
1325 else
1326 timeo = nfs_estimate_rto(nmp, rep->r_procnum);
1327 if (nmp->nm_timeouts > 0)
1328 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1329 if (rep->r_rtt <= timeo)
1330 continue;
1331 if (nmp->nm_timeouts < NFS_NBACKOFF)
1332 nmp->nm_timeouts++;
1333 }
1334 if (rep->r_rexmit >= rep->r_retry) { /* too many */
1335 nfsstats.rpctimeouts++;
1336 nfs_softterm(rep);
1337 continue;
1338 }
1339 if (nmp->nm_sotype != SOCK_DGRAM) {
1340 if (++rep->r_rexmit > NFS_MAXREXMIT)
1341 rep->r_rexmit = NFS_MAXREXMIT;
1342 /*
1343 * For NFS/TCP, setting R_MUSTRESEND and waking up
1344 * the requester will cause the request to be
1345 * retransmitted (in nfs_reply()), re-connecting
1346 * if necessary.
1347 */
1348 rep->r_flags |= R_MUSTRESEND;
1349 wakeup_nfsreq(rep);
1350 rep->r_rtt = 0;
1351 continue;
1352 }
1353 if ((so = nmp->nm_so) == NULL)
1354 continue;
1355 /*
1356 * If there is enough space and the window allows..
1357 * Resend it
1358 * Set r_rtt to -1 in case we fail to send it now.
1359 */
1360 rep->r_rtt = -1;
1361 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
1362 ((nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1363 (rep->r_flags & R_SENT) ||
1364 nmp->nm_sent < nmp->nm_cwnd) &&
1365 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){
1366 mtx_unlock(&nfs_reqq_mtx);
1367 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1368 error = (*so->so_proto->pr_usrreqs->pru_send)
1369 (so, 0, m, NULL, NULL, curthread);
1370 else
1371 error = (*so->so_proto->pr_usrreqs->pru_send)
1372 (so, 0, m, nmp->nm_nam, NULL, curthread);
1373 mtx_lock(&nfs_reqq_mtx);
1374 if (error) {
1375 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1376 so->so_error = 0;
1377 rep->r_flags |= R_RESENDERR;
1378 } else {
1379 /*
1380 * Iff first send, start timing
1381 * else turn timing off, backoff timer
1382 * and divide congestion window by 2.
1383 */
1384 rep->r_flags &= ~R_RESENDERR;
1385 if (rep->r_flags & R_SENT) {
1386 rep->r_flags &= ~R_TIMING;
1387 if (++rep->r_rexmit > NFS_MAXREXMIT)
1388 rep->r_rexmit = NFS_MAXREXMIT;
1389 nmp->nm_cwnd >>= 1;
1390 if (nmp->nm_cwnd < NFS_CWNDSCALE)
1391 nmp->nm_cwnd = NFS_CWNDSCALE;
1392 nfsstats.rpcretries++;
1393 } else {
1394 rep->r_flags |= R_SENT;
1395 nmp->nm_sent += NFS_CWNDSCALE;
1396 }
1397 rep->r_rtt = 0;
1398 }
1399 }
1400 }
1401 mtx_unlock(&nfs_reqq_mtx);
1402 mtx_unlock(&Giant); /* nfs_down -> tprintf */
1403 splx(s);
1404 callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
1405 }
1406
1407 /*
1408 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1409 * wait for all requests to complete. This is used by forced unmounts
1410 * to terminate any outstanding RPCs.
1411 */
1412 int
1413 nfs_nmcancelreqs(nmp)
1414 struct nfsmount *nmp;
1415 {
1416 struct nfsreq *req;
1417 int i, s;
1418
1419 s = splnet();
1420 mtx_lock(&nfs_reqq_mtx);
1421 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1422 if (nmp != req->r_nmp || req->r_mrep != NULL ||
1423 (req->r_flags & R_SOFTTERM))
1424 continue;
1425 nfs_softterm(req);
1426 }
1427 mtx_unlock(&nfs_reqq_mtx);
1428 splx(s);
1429
1430 for (i = 0; i < 30; i++) {
1431 s = splnet();
1432 mtx_lock(&nfs_reqq_mtx);
1433 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1434 if (nmp == req->r_nmp)
1435 break;
1436 }
1437 mtx_unlock(&nfs_reqq_mtx);
1438 splx(s);
1439 if (req == NULL)
1440 return (0);
1441 tsleep(&lbolt, PSOCK, "nfscancel", 0);
1442 }
1443 return (EBUSY);
1444 }
1445
1446 /*
1447 * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT).
1448 * The nm_send count is decremented now to avoid deadlocks when the process in
1449 * soreceive() hasn't yet managed to send its own request.
1450 */
1451
1452 static void
1453 nfs_softterm(struct nfsreq *rep)
1454 {
1455
1456 rep->r_flags |= R_SOFTTERM;
1457 if (rep->r_flags & R_SENT) {
1458 rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
1459 rep->r_flags &= ~R_SENT;
1460 }
1461 /*
1462 * Request terminated, wakeup the blocked process, so that we
1463 * can return EINTR back.
1464 */
1465 wakeup_nfsreq(rep);
1466 }
1467
1468 /*
1469 * Any signal that can interrupt an NFS operation in an intr mount
1470 * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
1471 */
1472 int nfs_sig_set[] = {
1473 SIGINT,
1474 SIGTERM,
1475 SIGHUP,
1476 SIGKILL,
1477 SIGSTOP,
1478 SIGQUIT
1479 };
1480
1481 /*
1482 * Check to see if one of the signals in our subset is pending on
1483 * the process (in an intr mount).
1484 */
1485 static int
1486 nfs_sig_pending(sigset_t set)
1487 {
1488 int i;
1489
1490 for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++)
1491 if (SIGISMEMBER(set, nfs_sig_set[i]))
1492 return (1);
1493 return (0);
1494 }
1495
1496 /*
1497 * The set/restore sigmask functions are used to (temporarily) overwrite
1498 * the process p_sigmask during an RPC call (for example). These are also
1499 * used in other places in the NFS client that might tsleep().
1500 */
1501 void
1502 nfs_set_sigmask(struct thread *td, sigset_t *oldset)
1503 {
1504 sigset_t newset;
1505 int i;
1506 struct proc *p;
1507
1508 SIGFILLSET(newset);
1509 if (td == NULL)
1510 td = curthread; /* XXX */
1511 p = td->td_proc;
1512 /* Remove the NFS set of signals from newset */
1513 PROC_LOCK(p);
1514 mtx_lock(&p->p_sigacts->ps_mtx);
1515 for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) {
1516 /*
1517 * But make sure we leave the ones already masked
1518 * by the process, ie. remove the signal from the
1519 * temporary signalmask only if it wasn't already
1520 * in p_sigmask.
1521 */
1522 if (!SIGISMEMBER(td->td_sigmask, nfs_sig_set[i]) &&
1523 !SIGISMEMBER(p->p_sigacts->ps_sigignore, nfs_sig_set[i]))
1524 SIGDELSET(newset, nfs_sig_set[i]);
1525 }
1526 mtx_unlock(&p->p_sigacts->ps_mtx);
1527 PROC_UNLOCK(p);
1528 kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 0);
1529 }
1530
1531 void
1532 nfs_restore_sigmask(struct thread *td, sigset_t *set)
1533 {
1534 if (td == NULL)
1535 td = curthread; /* XXX */
1536 kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
1537 }
1538
1539 /*
1540 * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
1541 * old one after msleep() returns.
1542 */
1543 int
1544 nfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
1545 {
1546 sigset_t oldset;
1547 int error;
1548 struct proc *p;
1549
1550 if ((priority & PCATCH) == 0)
1551 return msleep(ident, mtx, priority, wmesg, timo);
1552 if (td == NULL)
1553 td = curthread; /* XXX */
1554 nfs_set_sigmask(td, &oldset);
1555 error = msleep(ident, mtx, priority, wmesg, timo);
1556 nfs_restore_sigmask(td, &oldset);
1557 p = td->td_proc;
1558 return (error);
1559 }
1560
1561 /*
1562 * NFS wrapper to tsleep(), that shoves a new p_sigmask and restores the
1563 * old one after tsleep() returns.
1564 */
1565 int
1566 nfs_tsleep(struct thread *td, void *ident, int priority, char *wmesg, int timo)
1567 {
1568 sigset_t oldset;
1569 int error;
1570 struct proc *p;
1571
1572 if ((priority & PCATCH) == 0)
1573 return tsleep(ident, priority, wmesg, timo);
1574 if (td == NULL)
1575 td = curthread; /* XXX */
1576 nfs_set_sigmask(td, &oldset);
1577 error = tsleep(ident, priority, wmesg, timo);
1578 nfs_restore_sigmask(td, &oldset);
1579 p = td->td_proc;
1580 return (error);
1581 }
1582
1583 /*
1584 * Test for a termination condition pending on the process.
1585 * This is used for NFSMNT_INT mounts.
1586 */
1587 int
1588 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
1589 {
1590 struct proc *p;
1591 sigset_t tmpset;
1592
1593 if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
1594 return nfs4_sigintr(nmp, rep, td);
1595 if (rep && (rep->r_flags & R_SOFTTERM))
1596 return (EIO);
1597 /* Terminate all requests while attempting a forced unmount. */
1598 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
1599 return (EIO);
1600 if (!(nmp->nm_flag & NFSMNT_INT))
1601 return (0);
1602 if (td == NULL)
1603 return (0);
1604
1605 p = td->td_proc;
1606 PROC_LOCK(p);
1607 tmpset = p->p_siglist;
1608 SIGSETOR(tmpset, td->td_siglist);
1609 SIGSETNAND(tmpset, td->td_sigmask);
1610 mtx_lock(&p->p_sigacts->ps_mtx);
1611 SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
1612 mtx_unlock(&p->p_sigacts->ps_mtx);
1613 if ((SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist))
1614 && nfs_sig_pending(tmpset)) {
1615 PROC_UNLOCK(p);
1616 return (EINTR);
1617 }
1618 PROC_UNLOCK(p);
1619
1620 return (0);
1621 }
1622
1623 /*
1624 * Lock a socket against others.
1625 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
1626 * and also to avoid race conditions between the processes with nfs requests
1627 * in progress when a reconnect is necessary.
1628 */
1629 int
1630 nfs_sndlock(struct nfsreq *rep)
1631 {
1632 int *statep = &rep->r_nmp->nm_state;
1633 struct thread *td;
1634 int error, slpflag = 0, slptimeo = 0;
1635
1636 td = rep->r_td;
1637 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1638 slpflag = PCATCH;
1639 while (*statep & NFSSTA_SNDLOCK) {
1640 error = nfs_sigintr(rep->r_nmp, rep, td);
1641 if (error)
1642 return (error);
1643 *statep |= NFSSTA_WANTSND;
1644 (void) tsleep(statep, slpflag | (PZERO - 1),
1645 "nfsndlck", slptimeo);
1646 if (slpflag == PCATCH) {
1647 slpflag = 0;
1648 slptimeo = 2 * hz;
1649 }
1650 }
1651 *statep |= NFSSTA_SNDLOCK;
1652 return (0);
1653 }
1654
1655 /*
1656 * Unlock the stream socket for others.
1657 */
1658 void
1659 nfs_sndunlock(struct nfsreq *rep)
1660 {
1661 int *statep = &rep->r_nmp->nm_state;
1662
1663 if ((*statep & NFSSTA_SNDLOCK) == 0)
1664 panic("nfs sndunlock");
1665 *statep &= ~NFSSTA_SNDLOCK;
1666 if (*statep & NFSSTA_WANTSND) {
1667 *statep &= ~NFSSTA_WANTSND;
1668 wakeup(statep);
1669 }
1670 }
1671
1672 /*
1673 * nfs_realign:
1674 *
1675 * Check for badly aligned mbuf data and realign by copying the unaligned
1676 * portion of the data into a new mbuf chain and freeing the portions
1677 * of the old chain that were replaced.
1678 *
1679 * We cannot simply realign the data within the existing mbuf chain
1680 * because the underlying buffers may contain other rpc commands and
1681 * we cannot afford to overwrite them.
1682 *
1683 * We would prefer to avoid this situation entirely. The situation does
1684 * not occur with NFS/UDP and is supposed to only occassionally occur
1685 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
1686 *
1687 */
1688 static int
1689 nfs_realign(struct mbuf **pm, int hsiz)
1690 {
1691 struct mbuf *m;
1692 struct mbuf *n = NULL;
1693 int off = 0;
1694
1695 ++nfs_realign_test;
1696 while ((m = *pm) != NULL) {
1697 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
1698 MGET(n, M_DONTWAIT, MT_DATA);
1699 if (n == NULL)
1700 return (ENOMEM);
1701 if (m->m_len >= MINCLSIZE) {
1702 MCLGET(n, M_DONTWAIT);
1703 if (n->m_ext.ext_buf == NULL) {
1704 m_freem(n);
1705 return (ENOMEM);
1706 }
1707 }
1708 n->m_len = 0;
1709 break;
1710 }
1711 pm = &m->m_next;
1712 }
1713 /*
1714 * If n is non-NULL, loop on m copying data, then replace the
1715 * portion of the chain that had to be realigned.
1716 */
1717 if (n != NULL) {
1718 ++nfs_realign_count;
1719 while (m) {
1720 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
1721 off += m->m_len;
1722 m = m->m_next;
1723 }
1724 m_freem(*pm);
1725 *pm = n;
1726 }
1727 return (0);
1728 }
1729
1730
1731 static int
1732 nfs_msg(struct thread *td, const char *server, const char *msg, int error)
1733 {
1734 struct proc *p;
1735
1736 GIANT_REQUIRED; /* tprintf */
1737
1738 p = td ? td->td_proc : NULL;
1739 if (error) {
1740 tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n", server,
1741 msg, error);
1742 } else {
1743 tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg);
1744 }
1745 return (0);
1746 }
1747
1748 void
1749 nfs_down(rep, nmp, td, msg, error, flags)
1750 struct nfsreq *rep;
1751 struct nfsmount *nmp;
1752 struct thread *td;
1753 const char *msg;
1754 int error, flags;
1755 {
1756
1757 GIANT_REQUIRED; /* nfs_msg */
1758
1759 if (nmp == NULL)
1760 return;
1761 if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
1762 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1763 VQ_NOTRESP, 0);
1764 nmp->nm_state |= NFSSTA_TIMEO;
1765 }
1766 #ifdef NFSSTA_LOCKTIMEO
1767 if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
1768 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1769 VQ_NOTRESPLOCK, 0);
1770 nmp->nm_state |= NFSSTA_LOCKTIMEO;
1771 }
1772 #endif
1773 if (rep)
1774 rep->r_flags |= R_TPRINTFMSG;
1775 nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
1776 }
1777
1778 void
1779 nfs_up(rep, nmp, td, msg, flags)
1780 struct nfsreq *rep;
1781 struct nfsmount *nmp;
1782 struct thread *td;
1783 const char *msg;
1784 int flags;
1785 {
1786
1787 GIANT_REQUIRED; /* nfs_msg */
1788
1789 if (nmp == NULL)
1790 return;
1791 if ((rep == NULL) || (rep->r_flags & R_TPRINTFMSG) != 0)
1792 nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
1793 if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
1794 nmp->nm_state &= ~NFSSTA_TIMEO;
1795 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1796 VQ_NOTRESP, 1);
1797 }
1798 #ifdef NFSSTA_LOCKTIMEO
1799 if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
1800 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
1801 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1802 VQ_NOTRESPLOCK, 1);
1803 }
1804 #endif
1805 }
1806
Cache object: 814af6d5074c824b2a9b7233fbd21797
|