FreeBSD/Linux Kernel Cross Reference
sys/nfs/nfs_socket.c
1 /*
2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
37 * $FreeBSD$
38 */
39
40 /*
41 * Socket operations for use by nfs
42 */
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/kernel.h>
50 #include <sys/mbuf.h>
51 #include <sys/vnode.h>
52 #include <sys/protosw.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/syslog.h>
56 #include <sys/tprintf.h>
57 #include <sys/sysctl.h>
58 #include <sys/signalvar.h>
59
60 #include <netinet/in.h>
61 #include <netinet/tcp.h>
62
63 #include <nfs/rpcv2.h>
64 #include <nfs/nfsproto.h>
65 #include <nfs/nfs.h>
66 #include <nfs/xdr_subs.h>
67 #include <nfs/nfsm_subs.h>
68 #include <nfs/nfsmount.h>
69 #include <nfs/nfsnode.h>
70 #include <nfs/nfsrtt.h>
71 #include <nfs/nqnfs.h>
72
73 #define TRUE 1
74 #define FALSE 0
75
76 /*
77 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
78 * Use the mean and mean deviation of rtt for the appropriate type of rpc
79 * for the frequent rpcs and a default for the others.
80 * The justification for doing "other" this way is that these rpcs
81 * happen so infrequently that timer est. would probably be stale.
82 * Also, since many of these rpcs are
83 * non-idempotent, a conservative timeout is desired.
84 * getattr, lookup - A+2D
85 * read, write - A+4D
86 * other - nm_timeo
87 */
88 #define NFS_RTO(n, t) \
89 ((t) == 0 ? (n)->nm_timeo : \
90 ((t) < 3 ? \
91 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
92 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
93 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
94 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
95 /*
96 * External data, mostly RPC constants in XDR form
97 */
98 extern u_int32_t rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers,
99 rpc_auth_unix, rpc_msgaccepted, rpc_call, rpc_autherr,
100 rpc_auth_kerb;
101 extern u_int32_t nfs_prog, nqnfs_prog;
102 extern time_t nqnfsstarttime;
103 extern struct nfsstats nfsstats;
104 extern int nfsv3_procid[NFS_NPROCS];
105 extern int nfs_ticks;
106
107 /*
108 * Defines which timer to use for the procnum.
109 * 0 - default
110 * 1 - getattr
111 * 2 - lookup
112 * 3 - read
113 * 4 - write
114 */
115 static int proct[NFS_NPROCS] = {
116 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0,
117 0, 0, 0,
118 };
119
120 static int nfs_realign_test;
121 static int nfs_realign_count;
122 static int nfs_bufpackets = 4;
123
124 SYSCTL_DECL(_vfs_nfs);
125
126 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
127 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
128 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
129
130
131 /*
132 * There is a congestion window for outstanding rpcs maintained per mount
133 * point. The cwnd size is adjusted in roughly the way that:
134 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
135 * SIGCOMM '88". ACM, August 1988.
136 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
137 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
138 * of rpcs is in progress.
139 * (The sent count and cwnd are scaled for integer arith.)
140 * Variants of "slow start" were tried and were found to be too much of a
141 * performance hit (ave. rtt 3 times larger),
142 * I suspect due to the large rtt that nfs rpcs have.
143 */
144 #define NFS_CWNDSCALE 256
145 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
146 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
147 int nfsrtton = 0;
148 struct nfsrtt nfsrtt;
149 struct callout_handle nfs_timer_handle;
150
151 static int nfs_msg __P((struct proc *,char *,char *));
152 static int nfs_rcvlock __P((struct nfsreq *));
153 static void nfs_rcvunlock __P((struct nfsreq *));
154 static void nfs_realign __P((struct mbuf **pm, int hsiz));
155 static int nfs_receive __P((struct nfsreq *rep, struct sockaddr **aname,
156 struct mbuf **mp));
157 static void nfs_softterm __P((struct nfsreq *rep));
158 static int nfs_reconnect __P((struct nfsreq *rep));
159 #ifndef NFS_NOSERVER
160 static int nfsrv_getstream __P((struct nfssvc_sock *,int));
161
162 int (*nfsrv3_procs[NFS_NPROCS]) __P((struct nfsrv_descript *nd,
163 struct nfssvc_sock *slp,
164 struct proc *procp,
165 struct mbuf **mreqp)) = {
166 nfsrv_null,
167 nfsrv_getattr,
168 nfsrv_setattr,
169 nfsrv_lookup,
170 nfsrv3_access,
171 nfsrv_readlink,
172 nfsrv_read,
173 nfsrv_write,
174 nfsrv_create,
175 nfsrv_mkdir,
176 nfsrv_symlink,
177 nfsrv_mknod,
178 nfsrv_remove,
179 nfsrv_rmdir,
180 nfsrv_rename,
181 nfsrv_link,
182 nfsrv_readdir,
183 nfsrv_readdirplus,
184 nfsrv_statfs,
185 nfsrv_fsinfo,
186 nfsrv_pathconf,
187 nfsrv_commit,
188 nqnfsrv_getlease,
189 nqnfsrv_vacated,
190 nfsrv_noop,
191 nfsrv_noop
192 };
193 #endif /* NFS_NOSERVER */
194
195 /*
196 * Initialize sockets and congestion for a new NFS connection.
197 * We do not free the sockaddr if error.
198 */
199 int
200 nfs_connect(nmp, rep)
201 register struct nfsmount *nmp;
202 struct nfsreq *rep;
203 {
204 register struct socket *so;
205 int s, error, rcvreserve, sndreserve;
206 int pktscale;
207 struct sockaddr *saddr;
208 struct sockaddr_in *sin;
209 struct proc *p = &proc0; /* only used for socreate and sobind */
210
211 nmp->nm_so = (struct socket *)0;
212 saddr = nmp->nm_nam;
213 error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
214 nmp->nm_soproto, p);
215 if (error)
216 goto bad;
217 so = nmp->nm_so;
218 nmp->nm_soflags = so->so_proto->pr_flags;
219
220 /*
221 * Some servers require that the client port be a reserved port number.
222 */
223 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) {
224 struct sockopt sopt;
225 int ip;
226 struct sockaddr_in ssin;
227
228 bzero(&sopt, sizeof sopt);
229 ip = IP_PORTRANGE_LOW;
230 sopt.sopt_dir = SOPT_SET;
231 sopt.sopt_level = IPPROTO_IP;
232 sopt.sopt_name = IP_PORTRANGE;
233 sopt.sopt_val = (void *)&ip;
234 sopt.sopt_valsize = sizeof(ip);
235 sopt.sopt_p = NULL;
236 error = sosetopt(so, &sopt);
237 if (error)
238 goto bad;
239 bzero(&ssin, sizeof ssin);
240 sin = &ssin;
241 sin->sin_len = sizeof (struct sockaddr_in);
242 sin->sin_family = AF_INET;
243 sin->sin_addr.s_addr = INADDR_ANY;
244 sin->sin_port = htons(0);
245 error = sobind(so, (struct sockaddr *)sin, p);
246 if (error)
247 goto bad;
248 bzero(&sopt, sizeof sopt);
249 ip = IP_PORTRANGE_DEFAULT;
250 sopt.sopt_dir = SOPT_SET;
251 sopt.sopt_level = IPPROTO_IP;
252 sopt.sopt_name = IP_PORTRANGE;
253 sopt.sopt_val = (void *)&ip;
254 sopt.sopt_valsize = sizeof(ip);
255 sopt.sopt_p = NULL;
256 error = sosetopt(so, &sopt);
257 if (error)
258 goto bad;
259 }
260
261 /*
262 * Protocols that do not require connections may be optionally left
263 * unconnected for servers that reply from a port other than NFS_PORT.
264 */
265 if (nmp->nm_flag & NFSMNT_NOCONN) {
266 if (nmp->nm_soflags & PR_CONNREQUIRED) {
267 error = ENOTCONN;
268 goto bad;
269 }
270 } else {
271 error = soconnect(so, nmp->nm_nam, p);
272 if (error)
273 goto bad;
274
275 /*
276 * Wait for the connection to complete. Cribbed from the
277 * connect system call but with the wait timing out so
278 * that interruptible mounts don't hang here for a long time.
279 */
280 s = splnet();
281 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
282 (void) tsleep((caddr_t)&so->so_timeo, PSOCK,
283 "nfscon", 2 * hz);
284 if ((so->so_state & SS_ISCONNECTING) &&
285 so->so_error == 0 && rep &&
286 (error = nfs_sigintr(nmp, rep, rep->r_procp)) != 0){
287 so->so_state &= ~SS_ISCONNECTING;
288 splx(s);
289 goto bad;
290 }
291 }
292 if (so->so_error) {
293 error = so->so_error;
294 so->so_error = 0;
295 splx(s);
296 goto bad;
297 }
298 splx(s);
299 }
300 so->so_rcv.sb_timeo = (5 * hz);
301 so->so_snd.sb_timeo = (5 * hz);
302
303 /*
304 * Get buffer reservation size from sysctl, but impose reasonable
305 * limits.
306 */
307 pktscale = nfs_bufpackets;
308 if (pktscale < 2)
309 pktscale = 2;
310 if (pktscale > 64)
311 pktscale = 64;
312
313 if (nmp->nm_sotype == SOCK_DGRAM) {
314 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
315 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
316 NFS_MAXPKTHDR) * pktscale;
317 } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
318 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
319 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
320 NFS_MAXPKTHDR) * pktscale;
321 } else {
322 if (nmp->nm_sotype != SOCK_STREAM)
323 panic("nfscon sotype");
324 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
325 struct sockopt sopt;
326 int val;
327
328 bzero(&sopt, sizeof sopt);
329 sopt.sopt_level = SOL_SOCKET;
330 sopt.sopt_name = SO_KEEPALIVE;
331 sopt.sopt_val = &val;
332 sopt.sopt_valsize = sizeof val;
333 val = 1;
334 sosetopt(so, &sopt);
335 }
336 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
337 struct sockopt sopt;
338 int val;
339
340 bzero(&sopt, sizeof sopt);
341 sopt.sopt_level = IPPROTO_TCP;
342 sopt.sopt_name = TCP_NODELAY;
343 sopt.sopt_val = &val;
344 sopt.sopt_valsize = sizeof val;
345 val = 1;
346 sosetopt(so, &sopt);
347 }
348 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
349 sizeof (u_int32_t)) * pktscale;
350 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
351 sizeof (u_int32_t)) * pktscale;
352 }
353 error = soreserve(so, sndreserve, rcvreserve);
354 if (error)
355 goto bad;
356 so->so_rcv.sb_flags |= SB_NOINTR;
357 so->so_snd.sb_flags |= SB_NOINTR;
358
359 /* Initialize other non-zero congestion variables */
360 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
361 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
362 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
363 nmp->nm_sdrtt[3] = 0;
364 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
365 nmp->nm_sent = 0;
366 nmp->nm_timeouts = 0;
367 return (0);
368
369 bad:
370 nfs_disconnect(nmp);
371 return (error);
372 }
373
374 /*
375 * Reconnect routine:
376 * Called when a connection is broken on a reliable protocol.
377 * - clean up the old socket
378 * - nfs_connect() again
379 * - set R_MUSTRESEND for all outstanding requests on mount point
380 * If this fails the mount point is DEAD!
381 * nb: Must be called with the nfs_sndlock() set on the mount point.
382 */
383 static int
384 nfs_reconnect(rep)
385 register struct nfsreq *rep;
386 {
387 register struct nfsreq *rp;
388 register struct nfsmount *nmp = rep->r_nmp;
389 int error;
390
391 nfs_disconnect(nmp);
392 while ((error = nfs_connect(nmp, rep)) != 0) {
393 if (error == EINTR || error == ERESTART)
394 return (EINTR);
395 (void) tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0);
396 }
397
398 /*
399 * Loop through outstanding request list and fix up all requests
400 * on old socket.
401 */
402 for (rp = nfs_reqq.tqh_first; rp != 0; rp = rp->r_chain.tqe_next) {
403 if (rp->r_nmp == nmp)
404 rp->r_flags |= R_MUSTRESEND;
405 }
406 return (0);
407 }
408
409 /*
410 * NFS disconnect. Clean up and unlink.
411 */
412 void
413 nfs_disconnect(nmp)
414 register struct nfsmount *nmp;
415 {
416 register struct socket *so;
417
418 if (nmp->nm_so) {
419 so = nmp->nm_so;
420 nmp->nm_so = (struct socket *)0;
421 soshutdown(so, 2);
422 soclose(so);
423 }
424 }
425
426 void
427 nfs_safedisconnect(nmp)
428 struct nfsmount *nmp;
429 {
430 struct nfsreq dummyreq;
431
432 bzero(&dummyreq, sizeof(dummyreq));
433 dummyreq.r_nmp = nmp;
434 nfs_rcvlock(&dummyreq);
435 nfs_disconnect(nmp);
436 nfs_rcvunlock(&dummyreq);
437 }
438
439 /*
440 * This is the nfs send routine. For connection based socket types, it
441 * must be called with an nfs_sndlock() on the socket.
442 * "rep == NULL" indicates that it has been called from a server.
443 * For the client side:
444 * - return EINTR if the RPC is terminated, 0 otherwise
445 * - set R_MUSTRESEND if the send fails for any reason
446 * - do any cleanup required by recoverable socket errors (?)
447 * For the server side:
448 * - return EINTR or ERESTART if interrupted by a signal
449 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
450 * - do any cleanup required by recoverable socket errors (?)
451 */
452 int
453 nfs_send(so, nam, top, rep)
454 register struct socket *so;
455 struct sockaddr *nam;
456 register struct mbuf *top;
457 struct nfsreq *rep;
458 {
459 struct sockaddr *sendnam;
460 int error, soflags, flags;
461
462 if (rep) {
463 if (rep->r_flags & R_SOFTTERM) {
464 m_freem(top);
465 return (EINTR);
466 }
467 if ((so = rep->r_nmp->nm_so) == NULL) {
468 rep->r_flags |= R_MUSTRESEND;
469 m_freem(top);
470 return (0);
471 }
472 rep->r_flags &= ~R_MUSTRESEND;
473 soflags = rep->r_nmp->nm_soflags;
474 } else
475 soflags = so->so_proto->pr_flags;
476 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
477 sendnam = (struct sockaddr *)0;
478 else
479 sendnam = nam;
480 if (so->so_type == SOCK_SEQPACKET)
481 flags = MSG_EOR;
482 else
483 flags = 0;
484
485 error = so->so_proto->pr_usrreqs->pru_sosend(so, sendnam, 0, top, 0,
486 flags, curproc /*XXX*/);
487 /*
488 * ENOBUFS for dgram sockets is transient and non fatal.
489 * No need to log, and no need to break a soft mount.
490 */
491 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
492 error = 0;
493 if (rep) /* do backoff retransmit on client */
494 rep->r_flags |= R_MUSTRESEND;
495 }
496
497 if (error) {
498 if (rep) {
499 log(LOG_INFO, "nfs send error %d for server %s\n",error,
500 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
501 /*
502 * Deal with errors for the client side.
503 */
504 if (rep->r_flags & R_SOFTTERM)
505 error = EINTR;
506 else
507 rep->r_flags |= R_MUSTRESEND;
508 } else
509 log(LOG_INFO, "nfsd send error %d\n", error);
510
511 /*
512 * Handle any recoverable (soft) socket errors here. (?)
513 */
514 if (error != EINTR && error != ERESTART &&
515 error != EWOULDBLOCK && error != EPIPE)
516 error = 0;
517 }
518 return (error);
519 }
520
521 /*
522 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
523 * done by soreceive(), but for SOCK_STREAM we must deal with the Record
524 * Mark and consolidate the data into a new mbuf list.
525 * nb: Sometimes TCP passes the data up to soreceive() in long lists of
526 * small mbufs.
527 * For SOCK_STREAM we must be very careful to read an entire record once
528 * we have read any of it, even if the system call has been interrupted.
529 */
530 static int
531 nfs_receive(rep, aname, mp)
532 register struct nfsreq *rep;
533 struct sockaddr **aname;
534 struct mbuf **mp;
535 {
536 register struct socket *so;
537 struct uio auio;
538 struct iovec aio;
539 register struct mbuf *m;
540 struct mbuf *control;
541 u_int32_t len;
542 struct sockaddr **getnam;
543 int error, sotype, rcvflg;
544 struct proc *p = curproc; /* XXX */
545
546 /*
547 * Set up arguments for soreceive()
548 */
549 *mp = (struct mbuf *)0;
550 *aname = (struct sockaddr *)0;
551 sotype = rep->r_nmp->nm_sotype;
552
553 /*
554 * For reliable protocols, lock against other senders/receivers
555 * in case a reconnect is necessary.
556 * For SOCK_STREAM, first get the Record Mark to find out how much
557 * more there is to get.
558 * We must lock the socket against other receivers
559 * until we have an entire rpc request/reply.
560 */
561 if (sotype != SOCK_DGRAM) {
562 error = nfs_sndlock(rep);
563 if (error)
564 return (error);
565 tryagain:
566 /*
567 * Check for fatal errors and resending request.
568 */
569 /*
570 * Ugh: If a reconnect attempt just happened, nm_so
571 * would have changed. NULL indicates a failed
572 * attempt that has essentially shut down this
573 * mount point.
574 */
575 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) {
576 nfs_sndunlock(rep);
577 return (EINTR);
578 }
579 so = rep->r_nmp->nm_so;
580 if (!so) {
581 error = nfs_reconnect(rep);
582 if (error) {
583 nfs_sndunlock(rep);
584 return (error);
585 }
586 goto tryagain;
587 }
588 while (rep->r_flags & R_MUSTRESEND) {
589 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT);
590 nfsstats.rpcretries++;
591 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
592 if (error) {
593 if (error == EINTR || error == ERESTART ||
594 (error = nfs_reconnect(rep)) != 0) {
595 nfs_sndunlock(rep);
596 return (error);
597 }
598 goto tryagain;
599 }
600 }
601 nfs_sndunlock(rep);
602 if (sotype == SOCK_STREAM) {
603 aio.iov_base = (caddr_t) &len;
604 aio.iov_len = sizeof(u_int32_t);
605 auio.uio_iov = &aio;
606 auio.uio_iovcnt = 1;
607 auio.uio_segflg = UIO_SYSSPACE;
608 auio.uio_rw = UIO_READ;
609 auio.uio_offset = 0;
610 auio.uio_resid = sizeof(u_int32_t);
611 auio.uio_procp = p;
612 do {
613 rcvflg = MSG_WAITALL;
614 error = so->so_proto->pr_usrreqs->pru_soreceive
615 (so, (struct sockaddr **)0, &auio,
616 (struct mbuf **)0, (struct mbuf **)0,
617 &rcvflg);
618 if (error == EWOULDBLOCK && rep) {
619 if (rep->r_flags & R_SOFTTERM)
620 return (EINTR);
621 }
622 } while (error == EWOULDBLOCK);
623 if (!error && auio.uio_resid > 0) {
624 /*
625 * Don't log a 0 byte receive; it means
626 * that the socket has been closed, and
627 * can happen during normal operation
628 * (forcible unmount or Solaris server).
629 */
630 if (auio.uio_resid != sizeof (u_int32_t))
631 log(LOG_INFO,
632 "short receive (%d/%d) from nfs server %s\n",
633 (int)(sizeof(u_int32_t) - auio.uio_resid),
634 (int)sizeof(u_int32_t),
635 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
636 error = EPIPE;
637 }
638 if (error)
639 goto errout;
640 len = ntohl(len) & ~0x80000000;
641 /*
642 * This is SERIOUS! We are out of sync with the sender
643 * and forcing a disconnect/reconnect is all I can do.
644 */
645 if (len > NFS_MAXPACKET) {
646 log(LOG_ERR, "%s (%d) from nfs server %s\n",
647 "impossible packet length",
648 len,
649 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
650 error = EFBIG;
651 goto errout;
652 }
653 auio.uio_resid = len;
654 do {
655 rcvflg = MSG_WAITALL;
656 error = so->so_proto->pr_usrreqs->pru_soreceive
657 (so, (struct sockaddr **)0,
658 &auio, mp, (struct mbuf **)0, &rcvflg);
659 } while (error == EWOULDBLOCK || error == EINTR ||
660 error == ERESTART);
661 if (!error && auio.uio_resid > 0) {
662 if (len != auio.uio_resid)
663 log(LOG_INFO,
664 "short receive (%d/%d) from nfs server %s\n",
665 len - auio.uio_resid, len,
666 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
667 error = EPIPE;
668 }
669 } else {
670 /*
671 * NB: Since uio_resid is big, MSG_WAITALL is ignored
672 * and soreceive() will return when it has either a
673 * control msg or a data msg.
674 * We have no use for control msg., but must grab them
675 * and then throw them away so we know what is going
676 * on.
677 */
678 auio.uio_resid = len = 100000000; /* Anything Big */
679 auio.uio_procp = p;
680 do {
681 rcvflg = 0;
682 error = so->so_proto->pr_usrreqs->pru_soreceive
683 (so, (struct sockaddr **)0,
684 &auio, mp, &control, &rcvflg);
685 if (control)
686 m_freem(control);
687 if (error == EWOULDBLOCK && rep) {
688 if (rep->r_flags & R_SOFTTERM)
689 return (EINTR);
690 }
691 } while (error == EWOULDBLOCK ||
692 (!error && *mp == NULL && control));
693 if ((rcvflg & MSG_EOR) == 0)
694 printf("Egad!!\n");
695 if (!error && *mp == NULL)
696 error = EPIPE;
697 len -= auio.uio_resid;
698 }
699 errout:
700 if (error && error != EINTR && error != ERESTART) {
701 m_freem(*mp);
702 *mp = (struct mbuf *)0;
703 if (error != EPIPE)
704 log(LOG_INFO,
705 "receive error %d from nfs server %s\n",
706 error,
707 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
708 error = nfs_sndlock(rep);
709 if (!error) {
710 error = nfs_reconnect(rep);
711 if (!error)
712 goto tryagain;
713 else
714 nfs_sndunlock(rep);
715 }
716 }
717 } else {
718 if ((so = rep->r_nmp->nm_so) == NULL)
719 return (EACCES);
720 if (so->so_state & SS_ISCONNECTED)
721 getnam = (struct sockaddr **)0;
722 else
723 getnam = aname;
724 auio.uio_resid = len = 1000000;
725 auio.uio_procp = p;
726 do {
727 rcvflg = 0;
728 error = so->so_proto->pr_usrreqs->pru_soreceive
729 (so, getnam, &auio, mp,
730 (struct mbuf **)0, &rcvflg);
731 if (error == EWOULDBLOCK &&
732 (rep->r_flags & R_SOFTTERM))
733 return (EINTR);
734 } while (error == EWOULDBLOCK);
735 len -= auio.uio_resid;
736 }
737 if (error) {
738 m_freem(*mp);
739 *mp = (struct mbuf *)0;
740 }
741 /*
742 * Search for any mbufs that are not a multiple of 4 bytes long
743 * or with m_data not longword aligned.
744 * These could cause pointer alignment problems, so copy them to
745 * well aligned mbufs.
746 */
747 nfs_realign(mp, 5 * NFSX_UNSIGNED);
748 return (error);
749 }
750
751 /*
752 * Implement receipt of reply on a socket.
753 * We must search through the list of received datagrams matching them
754 * with outstanding requests using the xid, until ours is found.
755 */
756 /* ARGSUSED */
757 int
758 nfs_reply(myrep)
759 struct nfsreq *myrep;
760 {
761 register struct nfsreq *rep;
762 register struct nfsmount *nmp = myrep->r_nmp;
763 register int32_t t1;
764 struct mbuf *mrep, *md;
765 struct sockaddr *nam;
766 u_int32_t rxid, *tl;
767 caddr_t dpos, cp2;
768 int error;
769
770 /*
771 * Loop around until we get our own reply
772 */
773 for (;;) {
774 /*
775 * Lock against other receivers so that I don't get stuck in
776 * sbwait() after someone else has received my reply for me.
777 * Also necessary for connection based protocols to avoid
778 * race conditions during a reconnect.
779 * If nfs_rcvlock() returns EALREADY, that means that
780 * the reply has already been recieved by another
781 * process and we can return immediately. In this
782 * case, the lock is not taken to avoid races with
783 * other processes.
784 */
785 error = nfs_rcvlock(myrep);
786 if (error == EALREADY)
787 return (0);
788 if (error)
789 return (error);
790 /*
791 * Get the next Rpc reply off the socket
792 */
793 error = nfs_receive(myrep, &nam, &mrep);
794 nfs_rcvunlock(myrep);
795 if (error) {
796
797 /*
798 * Ignore routing errors on connectionless protocols??
799 */
800 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) {
801 nmp->nm_so->so_error = 0;
802 if (myrep->r_flags & R_GETONEREP)
803 return (0);
804 continue;
805 }
806 return (error);
807 }
808 if (nam)
809 FREE(nam, M_SONAME);
810
811 /*
812 * Get the xid and check that it is an rpc reply
813 */
814 md = mrep;
815 dpos = mtod(md, caddr_t);
816 nfsm_dissect(tl, u_int32_t *, 2*NFSX_UNSIGNED);
817 rxid = *tl++;
818 if (*tl != rpc_reply) {
819 #ifndef NFS_NOSERVER
820 if (nmp->nm_flag & NFSMNT_NQNFS) {
821 if (nqnfs_callback(nmp, mrep, md, dpos))
822 nfsstats.rpcinvalid++;
823 } else {
824 nfsstats.rpcinvalid++;
825 m_freem(mrep);
826 }
827 #else
828 nfsstats.rpcinvalid++;
829 m_freem(mrep);
830 #endif
831 nfsmout:
832 if (myrep->r_flags & R_GETONEREP)
833 return (0);
834 continue;
835 }
836
837 /*
838 * Loop through the request list to match up the reply
839 * Iff no match, just drop the datagram
840 */
841 for (rep = nfs_reqq.tqh_first; rep != 0;
842 rep = rep->r_chain.tqe_next) {
843 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
844 /* Found it.. */
845 rep->r_mrep = mrep;
846 rep->r_md = md;
847 rep->r_dpos = dpos;
848 if (nfsrtton) {
849 struct rttl *rt;
850
851 rt = &nfsrtt.rttl[nfsrtt.pos];
852 rt->proc = rep->r_procnum;
853 rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]);
854 rt->sent = nmp->nm_sent;
855 rt->cwnd = nmp->nm_cwnd;
856 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
857 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
858 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid;
859 getmicrotime(&rt->tstamp);
860 if (rep->r_flags & R_TIMING)
861 rt->rtt = rep->r_rtt;
862 else
863 rt->rtt = 1000000;
864 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ;
865 }
866 /*
867 * Update congestion window.
868 * Do the additive increase of
869 * one rpc/rtt.
870 */
871 if (nmp->nm_cwnd <= nmp->nm_sent) {
872 nmp->nm_cwnd +=
873 (NFS_CWNDSCALE * NFS_CWNDSCALE +
874 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
875 if (nmp->nm_cwnd > NFS_MAXCWND)
876 nmp->nm_cwnd = NFS_MAXCWND;
877 }
878 if (rep->r_flags & R_SENT) {
879 rep->r_flags &= ~R_SENT;
880 nmp->nm_sent -= NFS_CWNDSCALE;
881 }
882 /*
883 * Update rtt using a gain of 0.125 on the mean
884 * and a gain of 0.25 on the deviation.
885 */
886 if (rep->r_flags & R_TIMING) {
887 /*
888 * Since the timer resolution of
889 * NFS_HZ is so course, it can often
890 * result in r_rtt == 0. Since
891 * r_rtt == N means that the actual
892 * rtt is between N+dt and N+2-dt ticks,
893 * add 1.
894 */
895 t1 = rep->r_rtt + 1;
896 t1 -= (NFS_SRTT(rep) >> 3);
897 NFS_SRTT(rep) += t1;
898 if (t1 < 0)
899 t1 = -t1;
900 t1 -= (NFS_SDRTT(rep) >> 2);
901 NFS_SDRTT(rep) += t1;
902 }
903 nmp->nm_timeouts = 0;
904 break;
905 }
906 }
907 /*
908 * If not matched to a request, drop it.
909 * If it's mine, get out.
910 */
911 if (rep == 0) {
912 nfsstats.rpcunexpected++;
913 m_freem(mrep);
914 } else if (rep == myrep) {
915 if (rep->r_mrep == NULL)
916 panic("nfsreply nil");
917 return (0);
918 }
919 if (myrep->r_flags & R_GETONEREP)
920 return (0);
921 }
922 }
923
924 /*
925 * nfs_request - goes something like this
926 * - fill in request struct
927 * - links it into list
928 * - calls nfs_send() for first transmit
929 * - calls nfs_receive() to get reply
930 * - break down rpc header and return with nfs reply pointed to
931 * by mrep or error
932 * nb: always frees up mreq mbuf list
933 */
934 int
935 nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp)
936 struct vnode *vp;
937 struct mbuf *mrest;
938 int procnum;
939 struct proc *procp;
940 struct ucred *cred;
941 struct mbuf **mrp;
942 struct mbuf **mdp;
943 caddr_t *dposp;
944 {
945 register struct mbuf *mrep, *m2;
946 register struct nfsreq *rep;
947 register u_int32_t *tl;
948 register int i;
949 struct nfsmount *nmp;
950 struct mbuf *m, *md, *mheadend;
951 struct nfsnode *np;
952 char nickv[RPCX_NICKVERF];
953 time_t reqtime, waituntil;
954 caddr_t dpos, cp2;
955 int t1, nqlflag, cachable, s, error = 0, mrest_len, auth_len, auth_type;
956 int trylater_delay = NQ_TRYLATERDEL, trylater_cnt = 0, failed_auth = 0;
957 int verf_len, verf_type;
958 u_int32_t *xidp;
959 u_quad_t frev;
960 char *auth_str, *verf_str;
961 NFSKERBKEY_T key; /* save session key */
962
963 /* Reject requests while attempting a forced unmount. */
964 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
965 m_freem(mrest);
966 return (ESTALE);
967 }
968 nmp = VFSTONFS(vp->v_mount);
969 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
970 rep->r_nmp = nmp;
971 rep->r_vp = vp;
972 rep->r_procp = procp;
973 rep->r_procnum = procnum;
974 i = 0;
975 m = mrest;
976 while (m) {
977 i += m->m_len;
978 m = m->m_next;
979 }
980 mrest_len = i;
981
982 /*
983 * Get the RPC header with authorization.
984 */
985 kerbauth:
986 verf_str = auth_str = (char *)0;
987 if (nmp->nm_flag & NFSMNT_KERB) {
988 verf_str = nickv;
989 verf_len = sizeof (nickv);
990 auth_type = RPCAUTH_KERB4;
991 bzero((caddr_t)key, sizeof (key));
992 if (failed_auth || nfs_getnickauth(nmp, cred, &auth_str,
993 &auth_len, verf_str, verf_len)) {
994 error = nfs_getauth(nmp, rep, cred, &auth_str,
995 &auth_len, verf_str, &verf_len, key);
996 if (error) {
997 free((caddr_t)rep, M_NFSREQ);
998 m_freem(mrest);
999 return (error);
1000 }
1001 }
1002 } else {
1003 auth_type = RPCAUTH_UNIX;
1004 if (cred->cr_ngroups < 1)
1005 panic("nfsreq nogrps");
1006 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
1007 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
1008 5 * NFSX_UNSIGNED;
1009 }
1010 m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len,
1011 auth_str, verf_len, verf_str, mrest, mrest_len, &mheadend, &xidp);
1012 if (auth_str)
1013 free(auth_str, M_TEMP);
1014
1015 /*
1016 * For stream protocols, insert a Sun RPC Record Mark.
1017 */
1018 if (nmp->nm_sotype == SOCK_STREAM) {
1019 M_PREPEND(m, NFSX_UNSIGNED, M_WAIT);
1020 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1021 (m->m_pkthdr.len - NFSX_UNSIGNED));
1022 }
1023 rep->r_mreq = m;
1024 rep->r_xid = *xidp;
1025 tryagain:
1026 if (nmp->nm_flag & NFSMNT_SOFT)
1027 rep->r_retry = nmp->nm_retry;
1028 else
1029 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
1030 rep->r_rtt = rep->r_rexmit = 0;
1031 if (proct[procnum] > 0)
1032 rep->r_flags = R_TIMING;
1033 else
1034 rep->r_flags = 0;
1035 rep->r_mrep = NULL;
1036
1037 /*
1038 * Do the client side RPC.
1039 */
1040 nfsstats.rpcrequests++;
1041 /*
1042 * Chain request into list of outstanding requests. Be sure
1043 * to put it LAST so timer finds oldest requests first.
1044 */
1045 s = splsoftclock();
1046 TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain);
1047
1048 /* Get send time for nqnfs */
1049 reqtime = time_second;
1050
1051 /*
1052 * If backing off another request or avoiding congestion, don't
1053 * send this one now but let timer do it. If not timing a request,
1054 * do it now.
1055 */
1056 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM ||
1057 (nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1058 nmp->nm_sent < nmp->nm_cwnd)) {
1059 splx(s);
1060 if (nmp->nm_soflags & PR_CONNREQUIRED)
1061 error = nfs_sndlock(rep);
1062 if (!error) {
1063 m2 = m_copym(m, 0, M_COPYALL, M_WAIT);
1064 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1065 if (nmp->nm_soflags & PR_CONNREQUIRED)
1066 nfs_sndunlock(rep);
1067 }
1068 if (!error && (rep->r_flags & R_MUSTRESEND) == 0) {
1069 nmp->nm_sent += NFS_CWNDSCALE;
1070 rep->r_flags |= R_SENT;
1071 }
1072 } else {
1073 splx(s);
1074 rep->r_rtt = -1;
1075 }
1076
1077 /*
1078 * Wait for the reply from our send or the timer's.
1079 */
1080 if (!error || error == EPIPE)
1081 error = nfs_reply(rep);
1082
1083 /*
1084 * RPC done, unlink the request.
1085 */
1086 s = splsoftclock();
1087 TAILQ_REMOVE(&nfs_reqq, rep, r_chain);
1088 splx(s);
1089
1090 /*
1091 * Decrement the outstanding request count.
1092 */
1093 if (rep->r_flags & R_SENT) {
1094 rep->r_flags &= ~R_SENT; /* paranoia */
1095 nmp->nm_sent -= NFS_CWNDSCALE;
1096 }
1097
1098 /*
1099 * If there was a successful reply and a tprintf msg.
1100 * tprintf a response.
1101 */
1102 if (!error && (rep->r_flags & R_TPRINTFMSG))
1103 nfs_msg(rep->r_procp, nmp->nm_mountp->mnt_stat.f_mntfromname,
1104 "is alive again");
1105 mrep = rep->r_mrep;
1106 md = rep->r_md;
1107 dpos = rep->r_dpos;
1108 if (error) {
1109 m_freem(rep->r_mreq);
1110 free((caddr_t)rep, M_NFSREQ);
1111 return (error);
1112 }
1113
1114 /*
1115 * break down the rpc header and check if ok
1116 */
1117 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1118 if (*tl++ == rpc_msgdenied) {
1119 if (*tl == rpc_mismatch)
1120 error = EOPNOTSUPP;
1121 else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) {
1122 if (!failed_auth) {
1123 failed_auth++;
1124 mheadend->m_next = (struct mbuf *)0;
1125 m_freem(mrep);
1126 m_freem(rep->r_mreq);
1127 goto kerbauth;
1128 } else
1129 error = EAUTH;
1130 } else
1131 error = EACCES;
1132 m_freem(mrep);
1133 m_freem(rep->r_mreq);
1134 free((caddr_t)rep, M_NFSREQ);
1135 return (error);
1136 }
1137
1138 /*
1139 * Grab any Kerberos verifier, otherwise just throw it away.
1140 */
1141 verf_type = fxdr_unsigned(int, *tl++);
1142 i = fxdr_unsigned(int32_t, *tl);
1143 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) {
1144 error = nfs_savenickauth(nmp, cred, i, key, &md, &dpos, mrep);
1145 if (error)
1146 goto nfsmout;
1147 } else if (i > 0)
1148 nfsm_adv(nfsm_rndup(i));
1149 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1150 /* 0 == ok */
1151 if (*tl == 0) {
1152 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1153 if (*tl != 0) {
1154 error = fxdr_unsigned(int, *tl);
1155 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1156 error == NFSERR_TRYLATER) {
1157 m_freem(mrep);
1158 error = 0;
1159 waituntil = time_second + trylater_delay;
1160 while (time_second < waituntil)
1161 (void) tsleep((caddr_t)&lbolt,
1162 PSOCK, "nqnfstry", 0);
1163 trylater_delay *= nfs_backoff[trylater_cnt];
1164 if (trylater_cnt < 7)
1165 trylater_cnt++;
1166 /*
1167 * Generate a new RPC XID before retrying the request
1168 * on an NFSv3 JUKEBOX error. If we don't do this, the
1169 * duplicate request cache on the server will simply
1170 * replay the cached reply from the dupreq cache, and
1171 * the client request hangs in this loop forever.
1172 */
1173 rep->r_xid = *xidp = txdr_unsigned(nfs_xid_gen());
1174 goto tryagain;
1175 }
1176
1177 /*
1178 * If the File Handle was stale, invalidate the
1179 * lookup cache, just in case.
1180 */
1181 if (error == ESTALE)
1182 cache_purge(vp);
1183 if (nmp->nm_flag & NFSMNT_NFSV3) {
1184 *mrp = mrep;
1185 *mdp = md;
1186 *dposp = dpos;
1187 error |= NFSERR_RETERR;
1188 } else
1189 m_freem(mrep);
1190 m_freem(rep->r_mreq);
1191 free((caddr_t)rep, M_NFSREQ);
1192 return (error);
1193 }
1194
1195 /*
1196 * For nqnfs, get any lease in reply
1197 */
1198 if (nmp->nm_flag & NFSMNT_NQNFS) {
1199 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1200 if (*tl) {
1201 np = VTONFS(vp);
1202 nqlflag = fxdr_unsigned(int, *tl);
1203 nfsm_dissect(tl, u_int32_t *, 4*NFSX_UNSIGNED);
1204 cachable = fxdr_unsigned(int, *tl++);
1205 reqtime += fxdr_unsigned(int, *tl++);
1206 if (reqtime > time_second) {
1207 frev = fxdr_hyper(tl);
1208 nqnfs_clientlease(nmp, np, nqlflag,
1209 cachable, reqtime, frev);
1210 }
1211 }
1212 }
1213 *mrp = mrep;
1214 *mdp = md;
1215 *dposp = dpos;
1216 m_freem(rep->r_mreq);
1217 FREE((caddr_t)rep, M_NFSREQ);
1218 return (0);
1219 }
1220 m_freem(mrep);
1221 error = EPROTONOSUPPORT;
1222 nfsmout:
1223 m_freem(rep->r_mreq);
1224 free((caddr_t)rep, M_NFSREQ);
1225 return (error);
1226 }
1227
1228 #ifndef NFS_NOSERVER
1229 /*
1230 * Generate the rpc reply header
1231 * siz arg. is used to decide if adding a cluster is worthwhile
1232 */
1233 int
1234 nfs_rephead(siz, nd, slp, err, cache, frev, mrq, mbp, bposp)
1235 int siz;
1236 struct nfsrv_descript *nd;
1237 struct nfssvc_sock *slp;
1238 int err;
1239 int cache;
1240 u_quad_t *frev;
1241 struct mbuf **mrq;
1242 struct mbuf **mbp;
1243 caddr_t *bposp;
1244 {
1245 register u_int32_t *tl;
1246 register struct mbuf *mreq;
1247 caddr_t bpos;
1248 struct mbuf *mb, *mb2;
1249
1250 MGETHDR(mreq, M_WAIT, MT_DATA);
1251 mb = mreq;
1252 /*
1253 * If this is a big reply, use a cluster else
1254 * try and leave leading space for the lower level headers.
1255 */
1256 siz += RPC_REPLYSIZ;
1257 if ((max_hdr + siz) >= MINCLSIZE) {
1258 MCLGET(mreq, M_WAIT);
1259 } else
1260 mreq->m_data += max_hdr;
1261 tl = mtod(mreq, u_int32_t *);
1262 mreq->m_len = 6 * NFSX_UNSIGNED;
1263 bpos = ((caddr_t)tl) + mreq->m_len;
1264 *tl++ = txdr_unsigned(nd->nd_retxid);
1265 *tl++ = rpc_reply;
1266 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
1267 *tl++ = rpc_msgdenied;
1268 if (err & NFSERR_AUTHERR) {
1269 *tl++ = rpc_autherr;
1270 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
1271 mreq->m_len -= NFSX_UNSIGNED;
1272 bpos -= NFSX_UNSIGNED;
1273 } else {
1274 *tl++ = rpc_mismatch;
1275 *tl++ = txdr_unsigned(RPC_VER2);
1276 *tl = txdr_unsigned(RPC_VER2);
1277 }
1278 } else {
1279 *tl++ = rpc_msgaccepted;
1280
1281 /*
1282 * For Kerberos authentication, we must send the nickname
1283 * verifier back, otherwise just RPCAUTH_NULL.
1284 */
1285 if (nd->nd_flag & ND_KERBFULL) {
1286 register struct nfsuid *nuidp;
1287 struct timeval ktvin, ktvout;
1288
1289 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first;
1290 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1291 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid &&
1292 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp),
1293 &nuidp->nu_haddr, nd->nd_nam2)))
1294 break;
1295 }
1296 if (nuidp) {
1297 ktvin.tv_sec =
1298 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1);
1299 ktvin.tv_usec =
1300 txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1301
1302 /*
1303 * Encrypt the timestamp in ecb mode using the
1304 * session key.
1305 */
1306 #ifdef NFSKERB
1307 XXX
1308 #endif
1309
1310 *tl++ = rpc_auth_kerb;
1311 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED);
1312 *tl = ktvout.tv_sec;
1313 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1314 *tl++ = ktvout.tv_usec;
1315 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid);
1316 } else {
1317 *tl++ = 0;
1318 *tl++ = 0;
1319 }
1320 } else {
1321 *tl++ = 0;
1322 *tl++ = 0;
1323 }
1324 switch (err) {
1325 case EPROGUNAVAIL:
1326 *tl = txdr_unsigned(RPC_PROGUNAVAIL);
1327 break;
1328 case EPROGMISMATCH:
1329 *tl = txdr_unsigned(RPC_PROGMISMATCH);
1330 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1331 if (nd->nd_flag & ND_NQNFS) {
1332 *tl++ = txdr_unsigned(3);
1333 *tl = txdr_unsigned(3);
1334 } else {
1335 *tl++ = txdr_unsigned(2);
1336 *tl = txdr_unsigned(3);
1337 }
1338 break;
1339 case EPROCUNAVAIL:
1340 *tl = txdr_unsigned(RPC_PROCUNAVAIL);
1341 break;
1342 case EBADRPC:
1343 *tl = txdr_unsigned(RPC_GARBAGE);
1344 break;
1345 default:
1346 *tl = 0;
1347 if (err != NFSERR_RETVOID) {
1348 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1349 if (err)
1350 *tl = txdr_unsigned(nfsrv_errmap(nd, err));
1351 else
1352 *tl = 0;
1353 }
1354 break;
1355 };
1356 }
1357
1358 /*
1359 * For nqnfs, piggyback lease as requested.
1360 */
1361 if ((nd->nd_flag & ND_NQNFS) && err == 0) {
1362 if (nd->nd_flag & ND_LEASE) {
1363 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1364 *tl++ = txdr_unsigned(nd->nd_flag & ND_LEASE);
1365 *tl++ = txdr_unsigned(cache);
1366 *tl++ = txdr_unsigned(nd->nd_duration);
1367 txdr_hyper(*frev, tl);
1368 } else {
1369 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1370 *tl = 0;
1371 }
1372 }
1373 if (mrq != NULL)
1374 *mrq = mreq;
1375 *mbp = mb;
1376 *bposp = bpos;
1377 if (err != 0 && err != NFSERR_RETVOID)
1378 nfsstats.srvrpc_errs++;
1379 return (0);
1380 }
1381
1382
1383 #endif /* NFS_NOSERVER */
1384 /*
1385 * Nfs timer routine
1386 * Scan the nfsreq list and retranmit any requests that have timed out
1387 * To avoid retransmission attempts on STREAM sockets (in the future) make
1388 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1389 */
1390 void
1391 nfs_timer(arg)
1392 void *arg; /* never used */
1393 {
1394 register struct nfsreq *rep;
1395 register struct mbuf *m;
1396 register struct socket *so;
1397 register struct nfsmount *nmp;
1398 register int timeo;
1399 int s, error;
1400 #ifndef NFS_NOSERVER
1401 static long lasttime = 0;
1402 register struct nfssvc_sock *slp;
1403 u_quad_t cur_usec;
1404 #endif /* NFS_NOSERVER */
1405 struct proc *p = &proc0; /* XXX for credentials, will break if sleep */
1406
1407 s = splnet();
1408 for (rep = nfs_reqq.tqh_first; rep != 0; rep = rep->r_chain.tqe_next) {
1409 nmp = rep->r_nmp;
1410 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
1411 continue;
1412 if (nfs_sigintr(nmp, rep, rep->r_procp)) {
1413 nfs_softterm(rep);
1414 continue;
1415 }
1416 if (rep->r_rtt >= 0) {
1417 rep->r_rtt++;
1418 if (nmp->nm_flag & NFSMNT_DUMBTIMR)
1419 timeo = nmp->nm_timeo;
1420 else
1421 timeo = NFS_RTO(nmp, proct[rep->r_procnum]);
1422 if (nmp->nm_timeouts > 0)
1423 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1424 if (rep->r_rtt <= timeo)
1425 continue;
1426 if (nmp->nm_timeouts < 8)
1427 nmp->nm_timeouts++;
1428 }
1429 /*
1430 * Check for server not responding
1431 */
1432 if ((rep->r_flags & R_TPRINTFMSG) == 0 &&
1433 rep->r_rexmit > nmp->nm_deadthresh) {
1434 nfs_msg(rep->r_procp,
1435 nmp->nm_mountp->mnt_stat.f_mntfromname,
1436 "not responding");
1437 rep->r_flags |= R_TPRINTFMSG;
1438 }
1439 if (rep->r_rexmit >= rep->r_retry) { /* too many */
1440 nfsstats.rpctimeouts++;
1441 nfs_softterm(rep);
1442 continue;
1443 }
1444 if (nmp->nm_sotype != SOCK_DGRAM) {
1445 if (++rep->r_rexmit > NFS_MAXREXMIT)
1446 rep->r_rexmit = NFS_MAXREXMIT;
1447 continue;
1448 }
1449 if ((so = nmp->nm_so) == NULL)
1450 continue;
1451
1452 /*
1453 * If there is enough space and the window allows..
1454 * Resend it
1455 * Set r_rtt to -1 in case we fail to send it now.
1456 */
1457 rep->r_rtt = -1;
1458 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
1459 ((nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1460 (rep->r_flags & R_SENT) ||
1461 nmp->nm_sent < nmp->nm_cwnd) &&
1462 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){
1463 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1464 error = (*so->so_proto->pr_usrreqs->pru_send)
1465 (so, 0, m, (struct sockaddr *)0,
1466 (struct mbuf *)0, p);
1467 else
1468 error = (*so->so_proto->pr_usrreqs->pru_send)
1469 (so, 0, m, nmp->nm_nam, (struct mbuf *)0,
1470 p);
1471 if (error) {
1472 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1473 so->so_error = 0;
1474 } else {
1475 /*
1476 * Iff first send, start timing
1477 * else turn timing off, backoff timer
1478 * and divide congestion window by 2.
1479 */
1480 if (rep->r_flags & R_SENT) {
1481 rep->r_flags &= ~R_TIMING;
1482 if (++rep->r_rexmit > NFS_MAXREXMIT)
1483 rep->r_rexmit = NFS_MAXREXMIT;
1484 nmp->nm_cwnd >>= 1;
1485 if (nmp->nm_cwnd < NFS_CWNDSCALE)
1486 nmp->nm_cwnd = NFS_CWNDSCALE;
1487 nfsstats.rpcretries++;
1488 } else {
1489 rep->r_flags |= R_SENT;
1490 nmp->nm_sent += NFS_CWNDSCALE;
1491 }
1492 rep->r_rtt = 0;
1493 }
1494 }
1495 }
1496 #ifndef NFS_NOSERVER
1497 /*
1498 * Call the nqnfs server timer once a second to handle leases.
1499 */
1500 if (lasttime != time_second) {
1501 lasttime = time_second;
1502 nqnfs_serverd();
1503 }
1504
1505 /*
1506 * Scan the write gathering queues for writes that need to be
1507 * completed now.
1508 */
1509 cur_usec = nfs_curusec();
1510 for (slp = nfssvc_sockhead.tqh_first; slp != 0;
1511 slp = slp->ns_chain.tqe_next) {
1512 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec)
1513 nfsrv_wakenfsd(slp);
1514 }
1515 #endif /* NFS_NOSERVER */
1516 splx(s);
1517 nfs_timer_handle = timeout(nfs_timer, (void *)0, nfs_ticks);
1518 }
1519
1520 /*
1521 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1522 * wait for all requests to complete. This is used by forced unmounts
1523 * to terminate any outstanding RPCs.
1524 */
1525 int
1526 nfs_nmcancelreqs(nmp)
1527 struct nfsmount *nmp;
1528 {
1529 struct nfsreq *req;
1530 int i, s;
1531
1532 s = splnet();
1533 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1534 if (nmp != req->r_nmp || req->r_mrep != NULL ||
1535 (req->r_flags & R_SOFTTERM))
1536 continue;
1537 nfs_softterm(req);
1538 }
1539 splx(s);
1540
1541 for (i = 0; i < 30; i++) {
1542 s = splnet();
1543 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1544 if (nmp == req->r_nmp)
1545 break;
1546 }
1547 splx(s);
1548 if (req == NULL)
1549 return (0);
1550 tsleep(&lbolt, PSOCK, "nfscancel", 0);
1551 }
1552 return (EBUSY);
1553 }
1554
1555 /*
1556 * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT).
1557 * The nm_send count is decremented now to avoid deadlocks when the process in
1558 * soreceive() hasn't yet managed to send its own request.
1559 */
1560
1561 static void
1562 nfs_softterm(rep)
1563 struct nfsreq *rep;
1564 {
1565 rep->r_flags |= R_SOFTTERM;
1566
1567 if (rep->r_flags & R_SENT) {
1568 rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
1569 rep->r_flags &= ~R_SENT;
1570 }
1571 }
1572
1573 /*
1574 * Test for a termination condition pending on the process.
1575 * This is used for NFSMNT_INT mounts.
1576 */
1577 int
1578 nfs_sigintr(nmp, rep, p)
1579 struct nfsmount *nmp;
1580 struct nfsreq *rep;
1581 register struct proc *p;
1582 {
1583 sigset_t tmpset;
1584
1585 if (rep && (rep->r_flags & R_SOFTTERM))
1586 return (EINTR);
1587 /* Terminate all requests while attempting a forced unmount. */
1588 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
1589 return (EINTR);
1590 if (!(nmp->nm_flag & NFSMNT_INT))
1591 return (0);
1592 if (p == NULL)
1593 return (0);
1594
1595 tmpset = p->p_siglist;
1596 SIGSETNAND(tmpset, p->p_sigmask);
1597 SIGSETNAND(tmpset, p->p_sigignore);
1598 if (SIGNOTEMPTY(p->p_siglist) && NFSINT_SIGMASK(tmpset))
1599 return (EINTR);
1600
1601 return (0);
1602 }
1603
1604 /*
1605 * Lock a socket against others.
1606 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
1607 * and also to avoid race conditions between the processes with nfs requests
1608 * in progress when a reconnect is necessary.
1609 */
1610 int
1611 nfs_sndlock(rep)
1612 struct nfsreq *rep;
1613 {
1614 register int *statep = &rep->r_nmp->nm_state;
1615 struct proc *p;
1616 int slpflag = 0, slptimeo = 0;
1617
1618 p = rep->r_procp;
1619 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1620 slpflag = PCATCH;
1621 while (*statep & NFSSTA_SNDLOCK) {
1622 if (nfs_sigintr(rep->r_nmp, rep, p))
1623 return (EINTR);
1624 *statep |= NFSSTA_WANTSND;
1625 (void) tsleep((caddr_t)statep, slpflag | (PZERO - 1),
1626 "nfsndlck", slptimeo);
1627 if (slpflag == PCATCH) {
1628 slpflag = 0;
1629 slptimeo = 2 * hz;
1630 }
1631 }
1632 /* Always fail if our request has been cancelled. */
1633 if ((rep->r_flags & R_SOFTTERM))
1634 return (EINTR);
1635 *statep |= NFSSTA_SNDLOCK;
1636 return (0);
1637 }
1638
1639 /*
1640 * Unlock the stream socket for others.
1641 */
1642 void
1643 nfs_sndunlock(rep)
1644 struct nfsreq *rep;
1645 {
1646 register int *statep = &rep->r_nmp->nm_state;
1647
1648 if ((*statep & NFSSTA_SNDLOCK) == 0)
1649 panic("nfs sndunlock");
1650 *statep &= ~NFSSTA_SNDLOCK;
1651 if (*statep & NFSSTA_WANTSND) {
1652 *statep &= ~NFSSTA_WANTSND;
1653 wakeup((caddr_t)statep);
1654 }
1655 }
1656
1657 static int
1658 nfs_rcvlock(rep)
1659 register struct nfsreq *rep;
1660 {
1661 register int *statep = &rep->r_nmp->nm_state;
1662 int slpflag, slptimeo = 0;
1663
1664 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1665 slpflag = PCATCH;
1666 else
1667 slpflag = 0;
1668 while (*statep & NFSSTA_RCVLOCK) {
1669 if (nfs_sigintr(rep->r_nmp, rep, rep->r_procp))
1670 return (EINTR);
1671 *statep |= NFSSTA_WANTRCV;
1672 (void) tsleep((caddr_t)statep, slpflag | (PZERO - 1), "nfsrcvlk",
1673 slptimeo);
1674 /*
1675 * If our reply was recieved while we were sleeping,
1676 * then just return without taking the lock to avoid a
1677 * situation where a single iod could 'capture' the
1678 * recieve lock.
1679 */
1680 if (rep->r_mrep != NULL)
1681 return (EALREADY);
1682 if (slpflag == PCATCH) {
1683 slpflag = 0;
1684 slptimeo = 2 * hz;
1685 }
1686 }
1687 *statep |= NFSSTA_RCVLOCK;
1688 return (0);
1689 }
1690
1691 /*
1692 * Unlock the stream socket for others.
1693 */
1694 static void
1695 nfs_rcvunlock(rep)
1696 register struct nfsreq *rep;
1697 {
1698 register int *statep = &rep->r_nmp->nm_state;
1699
1700 if ((*statep & NFSSTA_RCVLOCK) == 0)
1701 panic("nfs rcvunlock");
1702 *statep &= ~NFSSTA_RCVLOCK;
1703 if (*statep & NFSSTA_WANTRCV) {
1704 *statep &= ~NFSSTA_WANTRCV;
1705 wakeup((caddr_t)statep);
1706 }
1707 }
1708
1709 /*
1710 * nfs_realign:
1711 *
1712 * Check for badly aligned mbuf data and realign by copying the unaligned
1713 * portion of the data into a new mbuf chain and freeing the portions
1714 * of the old chain that were replaced.
1715 *
1716 * We cannot simply realign the data within the existing mbuf chain
1717 * because the underlying buffers may contain other rpc commands and
1718 * we cannot afford to overwrite them.
1719 *
1720 * We would prefer to avoid this situation entirely. The situation does
1721 * not occur with NFS/UDP and is supposed to only occassionally occur
1722 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
1723 */
1724 static void
1725 nfs_realign(pm, hsiz)
1726 register struct mbuf **pm;
1727 int hsiz;
1728 {
1729 struct mbuf *m;
1730 struct mbuf *n = NULL;
1731 int off = 0;
1732
1733 ++nfs_realign_test;
1734
1735 while ((m = *pm) != NULL) {
1736 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
1737 MGET(n, M_WAIT, MT_DATA);
1738 if (m->m_len >= MINCLSIZE) {
1739 MCLGET(n, M_WAIT);
1740 }
1741 n->m_len = 0;
1742 break;
1743 }
1744 pm = &m->m_next;
1745 }
1746
1747 /*
1748 * If n is non-NULL, loop on m copying data, then replace the
1749 * portion of the chain that had to be realigned.
1750 */
1751 if (n != NULL) {
1752 ++nfs_realign_count;
1753 while (m) {
1754 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
1755 off += m->m_len;
1756 m = m->m_next;
1757 }
1758 m_freem(*pm);
1759 *pm = n;
1760 }
1761 }
1762
1763 #ifndef NFS_NOSERVER
1764
1765 /*
1766 * Parse an RPC request
1767 * - verify it
1768 * - fill in the cred struct.
1769 */
1770 int
1771 nfs_getreq(nd, nfsd, has_header)
1772 register struct nfsrv_descript *nd;
1773 struct nfsd *nfsd;
1774 int has_header;
1775 {
1776 register int len, i;
1777 register u_int32_t *tl;
1778 register int32_t t1;
1779 struct uio uio;
1780 struct iovec iov;
1781 caddr_t dpos, cp2, cp;
1782 u_int32_t nfsvers, auth_type;
1783 uid_t nickuid;
1784 int error = 0, nqnfs = 0, ticklen;
1785 struct mbuf *mrep, *md;
1786 register struct nfsuid *nuidp;
1787 struct timeval tvin, tvout;
1788 #if 0 /* until encrypted keys are implemented */
1789 NFSKERBKEYSCHED_T keys; /* stores key schedule */
1790 #endif
1791
1792 mrep = nd->nd_mrep;
1793 md = nd->nd_md;
1794 dpos = nd->nd_dpos;
1795 if (has_header) {
1796 nfsm_dissect(tl, u_int32_t *, 10 * NFSX_UNSIGNED);
1797 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++);
1798 if (*tl++ != rpc_call) {
1799 m_freem(mrep);
1800 return (EBADRPC);
1801 }
1802 } else
1803 nfsm_dissect(tl, u_int32_t *, 8 * NFSX_UNSIGNED);
1804 nd->nd_repstat = 0;
1805 nd->nd_flag = 0;
1806 if (*tl++ != rpc_vers) {
1807 nd->nd_repstat = ERPCMISMATCH;
1808 nd->nd_procnum = NFSPROC_NOOP;
1809 return (0);
1810 }
1811 if (*tl != nfs_prog) {
1812 if (*tl == nqnfs_prog)
1813 nqnfs++;
1814 else {
1815 nd->nd_repstat = EPROGUNAVAIL;
1816 nd->nd_procnum = NFSPROC_NOOP;
1817 return (0);
1818 }
1819 }
1820 tl++;
1821 nfsvers = fxdr_unsigned(u_int32_t, *tl++);
1822 if (((nfsvers < NFS_VER2 || nfsvers > NFS_VER3) && !nqnfs) ||
1823 (nfsvers != NQNFS_VER3 && nqnfs)) {
1824 nd->nd_repstat = EPROGMISMATCH;
1825 nd->nd_procnum = NFSPROC_NOOP;
1826 return (0);
1827 }
1828 if (nqnfs)
1829 nd->nd_flag = (ND_NFSV3 | ND_NQNFS);
1830 else if (nfsvers == NFS_VER3)
1831 nd->nd_flag = ND_NFSV3;
1832 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++);
1833 if (nd->nd_procnum == NFSPROC_NULL)
1834 return (0);
1835 if (nd->nd_procnum >= NFS_NPROCS ||
1836 (!nqnfs && nd->nd_procnum >= NQNFSPROC_GETLEASE) ||
1837 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) {
1838 nd->nd_repstat = EPROCUNAVAIL;
1839 nd->nd_procnum = NFSPROC_NOOP;
1840 return (0);
1841 }
1842 if ((nd->nd_flag & ND_NFSV3) == 0)
1843 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
1844 auth_type = *tl++;
1845 len = fxdr_unsigned(int, *tl++);
1846 if (len < 0 || len > RPCAUTH_MAXSIZ) {
1847 m_freem(mrep);
1848 return (EBADRPC);
1849 }
1850
1851 nd->nd_flag &= ~ND_KERBAUTH;
1852 /*
1853 * Handle auth_unix or auth_kerb.
1854 */
1855 if (auth_type == rpc_auth_unix) {
1856 len = fxdr_unsigned(int, *++tl);
1857 if (len < 0 || len > NFS_MAXNAMLEN) {
1858 m_freem(mrep);
1859 return (EBADRPC);
1860 }
1861 nfsm_adv(nfsm_rndup(len));
1862 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1863 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred));
1864 nd->nd_cr.cr_ref = 1;
1865 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++);
1866 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++);
1867 len = fxdr_unsigned(int, *tl);
1868 if (len < 0 || len > RPCAUTH_UNIXGIDS) {
1869 m_freem(mrep);
1870 return (EBADRPC);
1871 }
1872 nfsm_dissect(tl, u_int32_t *, (len + 2) * NFSX_UNSIGNED);
1873 for (i = 1; i <= len; i++)
1874 if (i < NGROUPS)
1875 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
1876 else
1877 tl++;
1878 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
1879 if (nd->nd_cr.cr_ngroups > 1)
1880 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups);
1881 len = fxdr_unsigned(int, *++tl);
1882 if (len < 0 || len > RPCAUTH_MAXSIZ) {
1883 m_freem(mrep);
1884 return (EBADRPC);
1885 }
1886 if (len > 0)
1887 nfsm_adv(nfsm_rndup(len));
1888 } else if (auth_type == rpc_auth_kerb) {
1889 switch (fxdr_unsigned(int, *tl++)) {
1890 case RPCAKN_FULLNAME:
1891 ticklen = fxdr_unsigned(int, *tl);
1892 *((u_int32_t *)nfsd->nfsd_authstr) = *tl;
1893 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED;
1894 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED;
1895 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) {
1896 m_freem(mrep);
1897 return (EBADRPC);
1898 }
1899 uio.uio_offset = 0;
1900 uio.uio_iov = &iov;
1901 uio.uio_iovcnt = 1;
1902 uio.uio_segflg = UIO_SYSSPACE;
1903 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4];
1904 iov.iov_len = RPCAUTH_MAXSIZ - 4;
1905 nfsm_mtouio(&uio, uio.uio_resid);
1906 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1907 if (*tl++ != rpc_auth_kerb ||
1908 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) {
1909 printf("Bad kerb verifier\n");
1910 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1911 nd->nd_procnum = NFSPROC_NOOP;
1912 return (0);
1913 }
1914 nfsm_dissect(cp, caddr_t, 4 * NFSX_UNSIGNED);
1915 tl = (u_int32_t *)cp;
1916 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) {
1917 printf("Not fullname kerb verifier\n");
1918 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1919 nd->nd_procnum = NFSPROC_NOOP;
1920 return (0);
1921 }
1922 cp += NFSX_UNSIGNED;
1923 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED);
1924 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED;
1925 nd->nd_flag |= ND_KERBFULL;
1926 nfsd->nfsd_flag |= NFSD_NEEDAUTH;
1927 break;
1928 case RPCAKN_NICKNAME:
1929 if (len != 2 * NFSX_UNSIGNED) {
1930 printf("Kerb nickname short\n");
1931 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED);
1932 nd->nd_procnum = NFSPROC_NOOP;
1933 return (0);
1934 }
1935 nickuid = fxdr_unsigned(uid_t, *tl);
1936 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1937 if (*tl++ != rpc_auth_kerb ||
1938 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) {
1939 printf("Kerb nick verifier bad\n");
1940 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1941 nd->nd_procnum = NFSPROC_NOOP;
1942 return (0);
1943 }
1944 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1945 tvin.tv_sec = *tl++;
1946 tvin.tv_usec = *tl;
1947
1948 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first;
1949 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1950 if (nuidp->nu_cr.cr_uid == nickuid &&
1951 (!nd->nd_nam2 ||
1952 netaddr_match(NU_NETFAM(nuidp),
1953 &nuidp->nu_haddr, nd->nd_nam2)))
1954 break;
1955 }
1956 if (!nuidp) {
1957 nd->nd_repstat =
1958 (NFSERR_AUTHERR|AUTH_REJECTCRED);
1959 nd->nd_procnum = NFSPROC_NOOP;
1960 return (0);
1961 }
1962
1963 /*
1964 * Now, decrypt the timestamp using the session key
1965 * and validate it.
1966 */
1967 #ifdef NFSKERB
1968 XXX
1969 #endif
1970
1971 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec);
1972 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec);
1973 if (nuidp->nu_expire < time_second ||
1974 nuidp->nu_timestamp.tv_sec > tvout.tv_sec ||
1975 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec &&
1976 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) {
1977 nuidp->nu_expire = 0;
1978 nd->nd_repstat =
1979 (NFSERR_AUTHERR|AUTH_REJECTVERF);
1980 nd->nd_procnum = NFSPROC_NOOP;
1981 return (0);
1982 }
1983 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr);
1984 nd->nd_flag |= ND_KERBNICK;
1985 };
1986 } else {
1987 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
1988 nd->nd_procnum = NFSPROC_NOOP;
1989 return (0);
1990 }
1991
1992 /*
1993 * For nqnfs, get piggybacked lease request.
1994 */
1995 if (nqnfs && nd->nd_procnum != NQNFSPROC_EVICTED) {
1996 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1997 nd->nd_flag |= fxdr_unsigned(int, *tl);
1998 if (nd->nd_flag & ND_LEASE) {
1999 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2000 nd->nd_duration = fxdr_unsigned(int32_t, *tl);
2001 } else
2002 nd->nd_duration = NQ_MINLEASE;
2003 } else
2004 nd->nd_duration = NQ_MINLEASE;
2005 nd->nd_md = md;
2006 nd->nd_dpos = dpos;
2007 return (0);
2008 nfsmout:
2009 return (error);
2010 }
2011
2012 #endif
2013
2014 static int
2015 nfs_msg(p, server, msg)
2016 struct proc *p;
2017 char *server, *msg;
2018 {
2019 tpr_t tpr;
2020
2021 if (p)
2022 tpr = tprintf_open(p);
2023 else
2024 tpr = NULL;
2025 tprintf(tpr, "nfs server %s: %s\n", server, msg);
2026 tprintf_close(tpr);
2027 return (0);
2028 }
2029
2030 #ifndef NFS_NOSERVER
2031 /*
2032 * Socket upcall routine for the nfsd sockets.
2033 * The caddr_t arg is a pointer to the "struct nfssvc_sock".
2034 * Essentially do as much as possible non-blocking, else punt and it will
2035 * be called with M_WAIT from an nfsd.
2036 */
2037 void
2038 nfsrv_rcv(so, arg, waitflag)
2039 struct socket *so;
2040 void *arg;
2041 int waitflag;
2042 {
2043 register struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2044 register struct mbuf *m;
2045 struct mbuf *mp;
2046 struct sockaddr *nam;
2047 struct uio auio;
2048 int flags, error;
2049
2050 if ((slp->ns_flag & SLP_VALID) == 0)
2051 return;
2052 #ifdef notdef
2053 /*
2054 * Define this to test for nfsds handling this under heavy load.
2055 */
2056 if (waitflag == M_DONTWAIT) {
2057 slp->ns_flag |= SLP_NEEDQ; goto dorecs;
2058 }
2059 #endif
2060 auio.uio_procp = NULL;
2061 if (so->so_type == SOCK_STREAM) {
2062 /*
2063 * If there are already records on the queue, defer soreceive()
2064 * to an nfsd so that there is feedback to the TCP layer that
2065 * the nfs servers are heavily loaded.
2066 */
2067 if (STAILQ_FIRST(&slp->ns_rec) && waitflag == M_DONTWAIT) {
2068 slp->ns_flag |= SLP_NEEDQ;
2069 goto dorecs;
2070 }
2071
2072 /*
2073 * Do soreceive().
2074 */
2075 auio.uio_resid = 1000000000;
2076 flags = MSG_DONTWAIT;
2077 error = so->so_proto->pr_usrreqs->pru_soreceive
2078 (so, &nam, &auio, &mp, (struct mbuf **)0, &flags);
2079 if (error || mp == (struct mbuf *)0) {
2080 if (error == EWOULDBLOCK)
2081 slp->ns_flag |= SLP_NEEDQ;
2082 else
2083 slp->ns_flag |= SLP_DISCONN;
2084 goto dorecs;
2085 }
2086 m = mp;
2087 if (slp->ns_rawend) {
2088 slp->ns_rawend->m_next = m;
2089 slp->ns_cc += 1000000000 - auio.uio_resid;
2090 } else {
2091 slp->ns_raw = m;
2092 slp->ns_cc = 1000000000 - auio.uio_resid;
2093 }
2094 while (m->m_next)
2095 m = m->m_next;
2096 slp->ns_rawend = m;
2097
2098 /*
2099 * Now try and parse record(s) out of the raw stream data.
2100 */
2101 error = nfsrv_getstream(slp, waitflag);
2102 if (error) {
2103 if (error == EPERM)
2104 slp->ns_flag |= SLP_DISCONN;
2105 else
2106 slp->ns_flag |= SLP_NEEDQ;
2107 }
2108 } else {
2109 do {
2110 auio.uio_resid = 1000000000;
2111 flags = MSG_DONTWAIT;
2112 error = so->so_proto->pr_usrreqs->pru_soreceive
2113 (so, &nam, &auio, &mp,
2114 (struct mbuf **)0, &flags);
2115 if (mp) {
2116 struct nfsrv_rec *rec;
2117 rec = malloc(sizeof(struct nfsrv_rec),
2118 M_NFSRVDESC, waitflag);
2119 if (!rec) {
2120 if (nam)
2121 FREE(nam, M_SONAME);
2122 m_freem(mp);
2123 continue;
2124 }
2125 nfs_realign(&mp, 10 * NFSX_UNSIGNED);
2126 rec->nr_address = nam;
2127 rec->nr_packet = mp;
2128 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2129 }
2130 if (error) {
2131 if ((so->so_proto->pr_flags & PR_CONNREQUIRED)
2132 && error != EWOULDBLOCK) {
2133 slp->ns_flag |= SLP_DISCONN;
2134 goto dorecs;
2135 }
2136 }
2137 } while (mp);
2138 }
2139
2140 /*
2141 * Now try and process the request records, non-blocking.
2142 */
2143 dorecs:
2144 if (waitflag == M_DONTWAIT &&
2145 (STAILQ_FIRST(&slp->ns_rec)
2146 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN))))
2147 nfsrv_wakenfsd(slp);
2148 }
2149
2150 /*
2151 * Try and extract an RPC request from the mbuf data list received on a
2152 * stream socket. The "waitflag" argument indicates whether or not it
2153 * can sleep.
2154 */
2155 static int
2156 nfsrv_getstream(slp, waitflag)
2157 register struct nfssvc_sock *slp;
2158 int waitflag;
2159 {
2160 register struct mbuf *m, **mpp;
2161 register char *cp1, *cp2;
2162 register int len;
2163 struct mbuf *om, *m2, *recm;
2164 u_int32_t recmark;
2165
2166 if (slp->ns_flag & SLP_GETSTREAM)
2167 panic("nfs getstream");
2168 slp->ns_flag |= SLP_GETSTREAM;
2169 for (;;) {
2170 if (slp->ns_reclen == 0) {
2171 if (slp->ns_cc < NFSX_UNSIGNED) {
2172 slp->ns_flag &= ~SLP_GETSTREAM;
2173 return (0);
2174 }
2175 m = slp->ns_raw;
2176 if (m->m_len >= NFSX_UNSIGNED) {
2177 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED);
2178 m->m_data += NFSX_UNSIGNED;
2179 m->m_len -= NFSX_UNSIGNED;
2180 } else {
2181 cp1 = (caddr_t)&recmark;
2182 cp2 = mtod(m, caddr_t);
2183 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
2184 while (m->m_len == 0) {
2185 m = m->m_next;
2186 cp2 = mtod(m, caddr_t);
2187 }
2188 *cp1++ = *cp2++;
2189 m->m_data++;
2190 m->m_len--;
2191 }
2192 }
2193 slp->ns_cc -= NFSX_UNSIGNED;
2194 recmark = ntohl(recmark);
2195 slp->ns_reclen = recmark & ~0x80000000;
2196 if (recmark & 0x80000000)
2197 slp->ns_flag |= SLP_LASTFRAG;
2198 else
2199 slp->ns_flag &= ~SLP_LASTFRAG;
2200 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) {
2201 slp->ns_flag &= ~SLP_GETSTREAM;
2202 return (EPERM);
2203 }
2204 }
2205
2206 /*
2207 * Now get the record part.
2208 *
2209 * Note that slp->ns_reclen may be 0. Linux sometimes
2210 * generates 0-length RPCs
2211 */
2212 recm = NULL;
2213 if (slp->ns_cc == slp->ns_reclen) {
2214 recm = slp->ns_raw;
2215 slp->ns_raw = slp->ns_rawend = (struct mbuf *)0;
2216 slp->ns_cc = slp->ns_reclen = 0;
2217 } else if (slp->ns_cc > slp->ns_reclen) {
2218 len = 0;
2219 m = slp->ns_raw;
2220 om = (struct mbuf *)0;
2221
2222 while (len < slp->ns_reclen) {
2223 if ((len + m->m_len) > slp->ns_reclen) {
2224 m2 = m_copym(m, 0, slp->ns_reclen - len,
2225 waitflag);
2226 if (m2) {
2227 if (om) {
2228 om->m_next = m2;
2229 recm = slp->ns_raw;
2230 } else
2231 recm = m2;
2232 m->m_data += slp->ns_reclen - len;
2233 m->m_len -= slp->ns_reclen - len;
2234 len = slp->ns_reclen;
2235 } else {
2236 slp->ns_flag &= ~SLP_GETSTREAM;
2237 return (EWOULDBLOCK);
2238 }
2239 } else if ((len + m->m_len) == slp->ns_reclen) {
2240 om = m;
2241 len += m->m_len;
2242 m = m->m_next;
2243 recm = slp->ns_raw;
2244 om->m_next = (struct mbuf *)0;
2245 } else {
2246 om = m;
2247 len += m->m_len;
2248 m = m->m_next;
2249 }
2250 }
2251 slp->ns_raw = m;
2252 slp->ns_cc -= len;
2253 slp->ns_reclen = 0;
2254 } else {
2255 slp->ns_flag &= ~SLP_GETSTREAM;
2256 return (0);
2257 }
2258
2259 /*
2260 * Accumulate the fragments into a record.
2261 */
2262 mpp = &slp->ns_frag;
2263 while (*mpp)
2264 mpp = &((*mpp)->m_next);
2265 *mpp = recm;
2266 if (slp->ns_flag & SLP_LASTFRAG) {
2267 struct nfsrv_rec *rec;
2268 rec = malloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, waitflag);
2269 if (!rec) {
2270 m_freem(slp->ns_frag);
2271 } else {
2272 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED);
2273 rec->nr_address = (struct sockaddr *)0;
2274 rec->nr_packet = slp->ns_frag;
2275 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2276 }
2277 slp->ns_frag = (struct mbuf *)0;
2278 }
2279 }
2280 }
2281
2282 /*
2283 * Parse an RPC header.
2284 */
2285 int
2286 nfsrv_dorec(slp, nfsd, ndp)
2287 register struct nfssvc_sock *slp;
2288 struct nfsd *nfsd;
2289 struct nfsrv_descript **ndp;
2290 {
2291 struct nfsrv_rec *rec;
2292 register struct mbuf *m;
2293 struct sockaddr *nam;
2294 register struct nfsrv_descript *nd;
2295 int error;
2296
2297 *ndp = NULL;
2298 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec))
2299 return (ENOBUFS);
2300 rec = STAILQ_FIRST(&slp->ns_rec);
2301 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link);
2302 nam = rec->nr_address;
2303 m = rec->nr_packet;
2304 free(rec, M_NFSRVDESC);
2305 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript),
2306 M_NFSRVDESC, M_WAITOK);
2307 nd->nd_md = nd->nd_mrep = m;
2308 nd->nd_nam2 = nam;
2309 nd->nd_dpos = mtod(m, caddr_t);
2310 error = nfs_getreq(nd, nfsd, TRUE);
2311 if (error) {
2312 if (nam) {
2313 FREE(nam, M_SONAME);
2314 }
2315 free((caddr_t)nd, M_NFSRVDESC);
2316 return (error);
2317 }
2318 *ndp = nd;
2319 nfsd->nfsd_nd = nd;
2320 return (0);
2321 }
2322
2323 /*
2324 * Search for a sleeping nfsd and wake it up.
2325 * SIDE EFFECT: If none found, set NFSD_CHECKSLP flag, so that one of the
2326 * running nfsds will go look for the work in the nfssvc_sock list.
2327 */
2328 void
2329 nfsrv_wakenfsd(slp)
2330 struct nfssvc_sock *slp;
2331 {
2332 register struct nfsd *nd;
2333
2334 if ((slp->ns_flag & SLP_VALID) == 0)
2335 return;
2336 for (nd = nfsd_head.tqh_first; nd != 0; nd = nd->nfsd_chain.tqe_next) {
2337 if (nd->nfsd_flag & NFSD_WAITING) {
2338 nd->nfsd_flag &= ~NFSD_WAITING;
2339 if (nd->nfsd_slp)
2340 panic("nfsd wakeup");
2341 slp->ns_sref++;
2342 nd->nfsd_slp = slp;
2343 wakeup((caddr_t)nd);
2344 return;
2345 }
2346 }
2347 slp->ns_flag |= SLP_DOREC;
2348 nfsd_head_flag |= NFSD_CHECKSLP;
2349 }
2350 #endif /* NFS_NOSERVER */
Cache object: 4afb9c2f4de1f4e1da4250c83d02d1c1
|