FreeBSD/Linux Kernel Cross Reference
sys/rpc/rpcclnt.c
1 /* $FreeBSD: releng/5.4/sys/rpc/rpcclnt.c 141090 2005-01-31 23:27:04Z imp $ */
2 /* $Id: rpcclnt.c,v 1.9 2003/11/05 14:59:03 rees Exp $ */
3
4 /*-
5 * copyright (c) 2003
6 * the regents of the university of michigan
7 * all rights reserved
8 *
9 * permission is granted to use, copy, create derivative works and redistribute
10 * this software and such derivative works for any purpose, so long as the name
11 * of the university of michigan is not used in any advertising or publicity
12 * pertaining to the use or distribution of this software without specific,
13 * written prior authorization. if the above copyright notice or any other
14 * identification of the university of michigan is included in any copy of any
15 * portion of this software, then the disclaimer below must also be included.
16 *
17 * this software is provided as is, without representation from the university
18 * of michigan as to its fitness for any purpose, and without warranty by the
19 * university of michigan of any kind, either express or implied, including
20 * without limitation the implied warranties of merchantability and fitness for
21 * a particular purpose. the regents of the university of michigan shall not be
22 * liable for any damages, including special, indirect, incidental, or
23 * consequential damages, with respect to any claim arising out of or in
24 * connection with the use of the software, even if it has been or is hereafter
25 * advised of the possibility of such damages.
26 */
27
28 /*-
29 * Copyright (c) 1989, 1991, 1993, 1995 The Regents of the University of
30 * California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by Rick Macklem at
33 * The University of Guelph.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions are
37 * met: 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer. 2.
39 * Redistributions in binary form must reproduce the above copyright notice,
40 * this list of conditions and the following disclaimer in the documentation
41 * and/or other materials provided with the distribution. 3. All advertising
42 * materials mentioning features or use of this software must display the
43 * following acknowledgement: This product includes software developed by the
44 * University of California, Berkeley and its contributors. 4. Neither the
45 * name of the University nor the names of its contributors may be used to
46 * endorse or promote products derived from this software without specific
47 * prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
50 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
51 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
52 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
53 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
55 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
62 */
63
64 /* XXX: kill ugly debug strings */
65 /* XXX: get rid of proct, as it is not even being used... (or keep it so v{2,3}
66 * can run, but clean it up! */
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/proc.h>
71 #include <sys/mount.h>
72 #include <sys/kernel.h>
73 #include <sys/mbuf.h>
74 #include <sys/syslog.h>
75 #include <sys/malloc.h>
76 #include <sys/uio.h>
77 #include <sys/lock.h>
78 #include <sys/signalvar.h>
79 #include <sys/sysent.h>
80 #include <sys/syscall.h>
81 #include <sys/sysctl.h>
82
83 #include <sys/domain.h>
84 #include <sys/protosw.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/mutex.h>
88
89 #include <netinet/in.h>
90 #include <netinet/tcp.h>
91
92 #include <nfs/rpcv2.h>
93
94 #include <rpc/rpcm_subs.h>
95 #include <rpc/rpcclnt.h>
96
97 /* memory management */
98 #ifdef __OpenBSD__
99 struct pool rpctask_pool;
100 struct pool rpcclnt_pool;
101 #define RPCTASKPOOL_LWM 10
102 #define RPCTASKPOOL_HWM 40
103 #else
104 static MALLOC_DEFINE(M_RPC, "rpcclnt", "rpc state");
105 #endif
106
107 #define RPC_RETURN(X) do { RPCDEBUG("returning %d", X); return X; }while(0)
108
109 /*
110 * Estimate rto for an nfs rpc sent via. an unreliable datagram. Use the mean
111 * and mean deviation of rtt for the appropriate type of rpc for the frequent
112 * rpcs and a default for the others. The justification for doing "other"
113 * this way is that these rpcs happen so infrequently that timer est. would
114 * probably be stale. Also, since many of these rpcs are non-idempotent, a
115 * conservative timeout is desired. getattr, lookup - A+2D read, write -
116 * A+4D other - nm_timeo
117 */
118 #define RPC_RTO(n, t) \
119 ((t) == 0 ? (n)->rc_timeo : \
120 ((t) < 3 ? \
121 (((((n)->rc_srtt[t-1] + 3) >> 2) + (n)->rc_sdrtt[t-1] + 1) >> 1) : \
122 ((((n)->rc_srtt[t-1] + 7) >> 3) + (n)->rc_sdrtt[t-1] + 1)))
123
124 #define RPC_SRTT(s,r) (r)->r_rpcclnt->rc_srtt[rpcclnt_proct((s),\
125 (r)->r_procnum) - 1]
126
127 #define RPC_SDRTT(s,r) (r)->r_rpcclnt->rc_sdrtt[rpcclnt_proct((s),\
128 (r)->r_procnum) - 1]
129
130
131 /*
132 * There is a congestion window for outstanding rpcs maintained per mount
133 * point. The cwnd size is adjusted in roughly the way that: Van Jacobson,
134 * Congestion avoidance and Control, In "Proceedings of SIGCOMM '88". ACM,
135 * August 1988. describes for TCP. The cwnd size is chopped in half on a
136 * retransmit timeout and incremented by 1/cwnd when each rpc reply is
137 * received and a full cwnd of rpcs is in progress. (The sent count and cwnd
138 * are scaled for integer arith.) Variants of "slow start" were tried and
139 * were found to be too much of a performance hit (ave. rtt 3 times larger),
140 * I suspect due to the large rtt that nfs rpcs have.
141 */
142 #define RPC_CWNDSCALE 256
143 #define RPC_MAXCWND (RPC_CWNDSCALE * 32)
144 static const int rpcclnt_backoff[8] = {2, 4, 8, 16, 32, 64, 128, 256,};
145
146 /* XXX ugly debug strings */
147 #define RPC_ERRSTR_ACCEPTED_SIZE 6
148 char *rpc_errstr_accepted[RPC_ERRSTR_ACCEPTED_SIZE] = {
149 "", /* no good message... */
150 "remote server hasn't exported program.",
151 "remote server can't support version number.",
152 "program can't support procedure.",
153 "procedure can't decode params.",
154 "remote error. remote side memory allocation failure?"
155 };
156
157 char *rpc_errstr_denied[2] = {
158 "remote server doesnt support rpc version 2!",
159 "remote server authentication error."
160 };
161
162 #define RPC_ERRSTR_AUTH_SIZE 6
163 char *rpc_errstr_auth[RPC_ERRSTR_AUTH_SIZE] = {
164 "",
165 "auth error: bad credential (seal broken).",
166 "auth error: client must begin new session.",
167 "auth error: bad verifier (seal broken).",
168 "auth error: verifier expired or replayed.",
169 "auth error: rejected for security reasons.",
170 };
171
172 /*
173 * Static data, mostly RPC constants in XDR form
174 */
175 static u_int32_t rpc_reply, rpc_call, rpc_vers;
176
177 /*
178 * rpc_msgdenied, rpc_mismatch, rpc_auth_unix, rpc_msgaccepted,
179 * rpc_autherr, rpc_auth_kerb;
180 */
181
182 static u_int32_t rpcclnt_xid = 0;
183 static u_int32_t rpcclnt_xid_touched = 0;
184 struct rpcstats rpcstats;
185 int rpcclnt_ticks;
186
187 SYSCTL_NODE(_kern, OID_AUTO, rpc, CTLFLAG_RD, 0, "RPC Subsystem");
188
189 SYSCTL_UINT(_kern_rpc, OID_AUTO, retries, CTLFLAG_RD, &rpcstats.rpcretries, 0, "retries");
190 SYSCTL_UINT(_kern_rpc, OID_AUTO, request, CTLFLAG_RD, &rpcstats.rpcrequests, 0, "request");
191 SYSCTL_UINT(_kern_rpc, OID_AUTO, timeouts, CTLFLAG_RD, &rpcstats.rpctimeouts, 0, "timeouts");
192 SYSCTL_UINT(_kern_rpc, OID_AUTO, unexpected, CTLFLAG_RD, &rpcstats.rpcunexpected, 0, "unexpected");
193 SYSCTL_UINT(_kern_rpc, OID_AUTO, invalid, CTLFLAG_RD, &rpcstats.rpcinvalid, 0, "invalid");
194
195
196 #ifdef RPCCLNT_DEBUG
197 int rpcdebugon = 0;
198 SYSCTL_UINT(_kern_rpc, OID_AUTO, debug_on, CTLFLAG_RW, &rpcdebugon, 0, "RPC Debug messages");
199 #endif
200
201 /*
202 * Queue head for rpctask's
203 */
204 static
205 TAILQ_HEAD(, rpctask) rpctask_q;
206 struct callout rpcclnt_callout;
207
208 #ifdef __OpenBSD__
209 static int rpcclnt_send(struct socket *, struct mbuf *, struct mbuf *, struct rpctask *);
210 static int rpcclnt_receive(struct rpctask *, struct mbuf **, struct mbuf **, RPC_EXEC_CTX);
211 #else
212 static int rpcclnt_send(struct socket *, struct sockaddr *, struct mbuf *, struct rpctask *);
213 static int rpcclnt_receive(struct rpctask *, struct sockaddr **, struct mbuf **, RPC_EXEC_CTX);
214 #endif
215
216 static int rpcclnt_msg(RPC_EXEC_CTX, const char *, char *);
217
218 static int rpcclnt_reply(struct rpctask *, RPC_EXEC_CTX);
219 static void rpcclnt_timer(void *);
220 static int rpcclnt_sndlock(int *, struct rpctask *);
221 static void rpcclnt_sndunlock(int *);
222 static int rpcclnt_rcvlock(struct rpctask *);
223 static void rpcclnt_rcvunlock(int *);
224 #if 0
225 void rpcclnt_realign(struct mbuf *, int);
226 #else
227 static void rpcclnt_realign(struct mbuf **, int);
228 #endif
229
230 static struct mbuf *rpcclnt_buildheader(struct rpcclnt *, int, struct mbuf *, u_int32_t, int *, struct mbuf **, struct ucred *);
231 static int rpcm_disct(struct mbuf **, caddr_t *, int, int, caddr_t *);
232 static u_int32_t rpcclnt_proct(struct rpcclnt *, u_int32_t);
233 static int rpc_adv(struct mbuf **, caddr_t *, int, int);
234 static void rpcclnt_softterm(struct rpctask * task);
235
236 static int rpcauth_buildheader(struct rpc_auth * auth, struct ucred *, struct mbuf **, caddr_t *);
237
238 void
239 rpcclnt_init(void)
240 {
241 #ifdef __OpenBSD__
242 static struct timeout rpcclnt_timer_to;
243 #endif
244
245 rpcclnt_ticks = (hz * RPC_TICKINTVL + 500) / 1000;
246 if (rpcclnt_ticks < 1)
247 rpcclnt_ticks = 1;
248 rpcstats.rpcretries = 0;
249 rpcstats.rpcrequests = 0;
250 rpcstats.rpctimeouts = 0;
251 rpcstats.rpcunexpected = 0;
252 rpcstats.rpcinvalid = 0;
253
254 /*
255 * rpc constants how about actually using more than one of these!
256 */
257
258 rpc_reply = txdr_unsigned(RPC_REPLY);
259 rpc_vers = txdr_unsigned(RPC_VER2);
260 rpc_call = txdr_unsigned(RPC_CALL);
261 #if 0
262 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
263 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
264 rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
265 rpc_autherr = txdr_unsigned(RPC_AUTHERR);
266 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
267 rpc_auth_kerb = txdr_unsigned(RPCAUTH_KERB4);
268 #endif
269
270 /* initialize rpctask queue */
271 TAILQ_INIT(&rpctask_q);
272
273 #ifdef __OpenBSD__
274 /* initialize pools */
275 pool_init(&rpctask_pool, sizeof(struct rpctask), 0, 0, RPCTASKPOOL_LWM,
276 "rpctask_p", NULL);
277 pool_setlowat(&rpctask_pool, RPCTASKPOOL_LWM);
278 pool_sethiwat(&rpctask_pool, RPCTASKPOOL_HWM);
279
280 pool_init(&rpcclnt_pool, sizeof(struct rpcclnt), 0, 0, 1, "rpcclnt_p", NULL);
281
282 /* initialize timers */
283 timeout_set(&rpcclnt_timer_to, rpcclnt_timer, &rpcclnt_timer_to);
284 rpcclnt_timer(&rpcclnt_timer_to);
285 #else /* !__OpenBSD__ */
286 callout_init(&rpcclnt_callout, 0);
287 #endif /* !__OpenBSD__ */
288
289 RPCDEBUG("rpc initialed");
290
291 return;
292 }
293
294 void
295 rpcclnt_uninit(void)
296 {
297 RPCDEBUG("uninit");
298 /* XXX delete sysctl variables? */
299 callout_stop(&rpcclnt_callout);
300 }
301
302 int
303 rpcclnt_setup(clnt, program, addr, sotype, soproto, auth, max_read_size, max_write_size, flags)
304 struct rpcclnt * clnt;
305 struct rpc_program * program;
306 struct sockaddr * addr;
307 int sotype;
308 int soproto;
309 struct rpc_auth * auth;
310 int max_read_size;
311 int max_write_size;
312 int flags;
313 {
314 if (clnt == NULL || program == NULL || addr == NULL || auth == NULL)
315 RPC_RETURN (EFAULT);
316
317 if (program->prog_name == NULL)
318 RPC_RETURN (EFAULT);
319 clnt->rc_prog = program;
320
321 clnt->rc_name = addr;
322 clnt->rc_sotype = sotype;
323 clnt->rc_soproto = soproto;
324 clnt->rc_auth = auth;
325 clnt->rc_rsize = max_read_size;
326 clnt->rc_wsize = max_write_size;
327 clnt->rc_flag = flags;
328
329 clnt->rc_proctlen = 0;
330 clnt->rc_proct = NULL;
331
332 RPC_RETURN (0);
333 }
334
335 /*
336 * Initialize sockets and congestion for a new RPC connection. We do not free
337 * the sockaddr if error.
338 */
339 int
340 rpcclnt_connect(rpc, td)
341 struct rpcclnt *rpc;
342 RPC_EXEC_CTX td;
343 {
344 struct socket *so;
345 int s, error, rcvreserve, sndreserve;
346 struct sockaddr *saddr;
347
348 #ifdef __OpenBSD__
349 struct sockaddr_in *sin;
350 struct mbuf *m;
351 #else
352 struct sockaddr_in sin;
353
354 int soarg;
355 struct sockopt opt;
356 #endif
357
358 if (rpc == NULL) {
359 RPCDEBUG("no rpcclnt struct!\n");
360 RPC_RETURN(EFAULT);
361 }
362
363 GIANT_REQUIRED; /* XXX until socket locking done */
364
365 /* create the socket */
366 rpc->rc_so = NULL;
367
368 saddr = rpc->rc_name;
369
370 error = socreate(saddr->sa_family, &rpc->rc_so, rpc->rc_sotype,
371 rpc->rc_soproto, td->td_ucred, td);
372
373 if (error) {
374 RPCDEBUG("error %d in socreate()", error);
375 RPC_RETURN(error);
376 }
377 so = rpc->rc_so;
378 rpc->rc_soflags = so->so_proto->pr_flags;
379
380 /*
381 * Some servers require that the client port be a reserved port
382 * number. We always allocate a reserved port, as this prevents
383 * filehandle disclosure through UDP port capture.
384 */
385 if (saddr->sa_family == AF_INET) {
386 #ifdef __OpenBSD__
387 struct mbuf *mopt;
388 int *ip;
389 #endif
390
391 #ifdef __OpenBSD__
392 MGET(mopt, M_TRYWAIT, MT_SOOPTS);
393 mopt->m_len = sizeof(int);
394 ip = mtod(mopt, int *);
395 *ip = IP_PORTRANGE_LOW;
396
397 error = sosetopt(so, IPPROTO_IP, IP_PORTRANGE, mopt);
398 #else
399 soarg = IP_PORTRANGE_LOW;
400 bzero(&opt, sizeof(struct sockopt));
401 opt.sopt_dir = SOPT_SET;
402 opt.sopt_level = IPPROTO_IP;
403 opt.sopt_name = IP_PORTRANGE;
404 opt.sopt_val = &soarg;
405 opt.sopt_valsize = sizeof(soarg);
406
407 error = sosetopt(so, &opt);
408 #endif
409 if (error)
410 goto bad;
411
412 #if __OpenBSD__
413 MGET(m, M_TRYWAIT, MT_SONAME);
414 sin = mtod(m, struct sockaddr_in *);
415 sin->sin_len = m->m_len = sizeof(struct sockaddr_in);
416 sin->sin_family = AF_INET;
417 sin->sin_addr.s_addr = INADDR_ANY;
418 sin->sin_port = htons(0);
419 error = sobind(so, m);
420 m_freem(m);
421 #else
422 sin.sin_len = sizeof(struct sockaddr_in);
423 sin.sin_family = AF_INET;
424 sin.sin_addr.s_addr = INADDR_ANY;
425 sin.sin_port = htons(0);
426 error = sobind(so, (struct sockaddr *) & sin, td);
427 #endif
428 if (error)
429 goto bad;
430
431 #if __OpenBSD__
432 MGET(mopt, M_TRYWAIT, MT_SOOPTS);
433 mopt->m_len = sizeof(int);
434 ip = mtod(mopt, int *);
435 *ip = IP_PORTRANGE_DEFAULT;
436 error = sosetopt(so, IPPROTO_IP, IP_PORTRANGE, mopt);
437 #else
438 soarg = IP_PORTRANGE_DEFAULT;
439 bzero(&opt, sizeof(struct sockopt));
440 opt.sopt_dir = SOPT_SET;
441 opt.sopt_level = IPPROTO_IP;
442 opt.sopt_name = IP_PORTRANGE;
443 opt.sopt_val = &soarg;
444 opt.sopt_valsize = sizeof(soarg);
445 error = sosetopt(so, &opt);
446 #endif
447 if (error)
448 goto bad;
449 }
450 /*
451 * Protocols that do not require connections may be optionally left
452 * unconnected for servers that reply from a port other than
453 * NFS_PORT.
454 */
455 if (rpc->rc_flag & RPCCLNT_NOCONN) {
456 if (rpc->rc_soflags & PR_CONNREQUIRED) {
457 error = ENOTCONN;
458 goto bad;
459 }
460 } else {
461 error = soconnect(so, saddr, td);
462 if (error)
463 goto bad;
464
465 /*
466 * Wait for the connection to complete. Cribbed from the
467 * connect system call but with the wait timing out so that
468 * interruptible mounts don't hang here for a long time.
469 */
470 #ifdef __OpenBSD__
471 s = splsoftnet();
472 #else
473 s = splnet();
474 #endif
475 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
476 (void)tsleep((caddr_t) & so->so_timeo, PSOCK,
477 "rpc", 2 * hz);
478
479 /*
480 * XXX needs to catch interrupt signals. something
481 * like this: if ((so->so_state & SS_ISCONNECTING) &&
482 * so->so_error == 0 && rep && (error =
483 * nfs_sigintr(nmp, rep, rep->r_td)) != 0) {
484 * so->so_state &= ~SS_ISCONNECTING; splx(s); goto
485 * bad; }
486 */
487 }
488 if (so->so_error) {
489 error = so->so_error;
490 so->so_error = 0;
491 splx(s);
492 goto bad;
493 }
494 splx(s);
495 }
496 if (rpc->rc_flag & (RPCCLNT_SOFT | RPCCLNT_INT)) {
497 so->so_rcv.sb_timeo = (5 * hz);
498 so->so_snd.sb_timeo = (5 * hz);
499 } else {
500 so->so_rcv.sb_timeo = 0;
501 so->so_snd.sb_timeo = 0;
502 }
503
504
505 if (rpc->rc_sotype == SOCK_DGRAM) {
506 sndreserve = rpc->rc_wsize + RPC_MAXPKTHDR;
507 rcvreserve = rpc->rc_rsize + RPC_MAXPKTHDR;
508 } else if (rpc->rc_sotype == SOCK_SEQPACKET) {
509 sndreserve = (rpc->rc_wsize + RPC_MAXPKTHDR) * 2;
510 rcvreserve = (rpc->rc_rsize + RPC_MAXPKTHDR) * 2;
511 } else {
512 if (rpc->rc_sotype != SOCK_STREAM)
513 panic("rpcclnt_connect() bad sotype");
514 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
515 #ifdef __OpenBSD__
516 MGET(m, M_TRYWAIT, MT_SOOPTS);
517 *mtod(m, int32_t *) = 1;
518 m->m_len = sizeof(int32_t);
519 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m);
520 #else
521 soarg = 1;
522
523 bzero(&opt, sizeof(struct sockopt));
524 opt.sopt_dir = SOPT_SET;
525 opt.sopt_level = SOL_SOCKET;
526 opt.sopt_name = SO_KEEPALIVE;
527 opt.sopt_val = &soarg;
528 opt.sopt_valsize = sizeof(soarg);
529 sosetopt(so, &opt);
530 #endif
531 }
532 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
533 #ifdef __OpenBSD__
534 MGET(m, M_TRYWAIT, MT_SOOPTS);
535 *mtod(m, int32_t *) = 1;
536 m->m_len = sizeof(int32_t);
537 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m);
538 #else
539 soarg = 1;
540
541 bzero(&opt, sizeof(struct sockopt));
542 opt.sopt_dir = SOPT_SET;
543 opt.sopt_level = IPPROTO_TCP;
544 opt.sopt_name = TCP_NODELAY;
545 opt.sopt_val = &soarg;
546 opt.sopt_valsize = sizeof(soarg);
547 sosetopt(so, &opt);
548 #endif
549 }
550 sndreserve = (rpc->rc_wsize + RPC_MAXPKTHDR +
551 sizeof(u_int32_t)) * 2;
552 rcvreserve = (rpc->rc_rsize + RPC_MAXPKTHDR +
553 sizeof(u_int32_t)) * 2;
554 }
555 error = soreserve(so, sndreserve, rcvreserve);
556 if (error)
557 goto bad;
558 so->so_rcv.sb_flags |= SB_NOINTR;
559 so->so_snd.sb_flags |= SB_NOINTR;
560
561 /* Initialize other non-zero congestion variables */
562 rpc->rc_srtt[0] = rpc->rc_srtt[1] = rpc->rc_srtt[2] =
563 rpc->rc_srtt[3] = (RPC_TIMEO << 3);
564 rpc->rc_sdrtt[0] = rpc->rc_sdrtt[1] = rpc->rc_sdrtt[2] =
565 rpc->rc_sdrtt[3] = 0;
566 rpc->rc_cwnd = RPC_MAXCWND / 2; /* Initial send window */
567 rpc->rc_sent = 0;
568 rpc->rc_timeouts = 0;
569 RPC_RETURN(0);
570
571 bad:
572 rpcclnt_disconnect(rpc);
573 RPC_RETURN(error);
574 }
575
576
577 /*
578 * Reconnect routine: Called when a connection is broken on a reliable
579 * protocol. - clean up the old socket - nfs_connect() again - set
580 * R_MUSTRESEND for all outstanding requests on mount point If this fails the
581 * mount point is DEAD! nb: Must be called with the nfs_sndlock() set on the
582 * mount point.
583 */
584 int
585 rpcclnt_reconnect(rep, td)
586 struct rpctask *rep;
587 RPC_EXEC_CTX td;
588 {
589 struct rpctask *rp;
590 struct rpcclnt *rpc = rep->r_rpcclnt;
591 int error;
592
593 rpcclnt_disconnect(rpc);
594 while ((error = rpcclnt_connect(rpc, td)) != 0) {
595 if (error == EINTR || error == ERESTART)
596 RPC_RETURN(EINTR);
597 tsleep(&lbolt, PSOCK, "rpccon", 0);
598 }
599
600 /*
601 * Loop through outstanding request list and fix up all requests on
602 * old socket.
603 */
604 for (rp = TAILQ_FIRST(&rpctask_q); rp != NULL;
605 rp = TAILQ_NEXT(rp, r_chain)) {
606 if (rp->r_rpcclnt == rpc)
607 rp->r_flags |= R_MUSTRESEND;
608 }
609 RPC_RETURN(0);
610 }
611
612 /*
613 * NFS disconnect. Clean up and unlink.
614 */
615 void
616 rpcclnt_disconnect(rpc)
617 struct rpcclnt *rpc;
618 {
619 struct socket *so;
620
621 GIANT_REQUIRED; /* XXX until socket locking done */
622
623 if (rpc->rc_so) {
624 so = rpc->rc_so;
625 rpc->rc_so = NULL;
626 soshutdown(so, 2);
627 soclose(so);
628 }
629 }
630
631 void
632 rpcclnt_safedisconnect(struct rpcclnt * rpc)
633 {
634 struct rpctask dummytask;
635
636 bzero(&dummytask, sizeof(dummytask));
637 dummytask.r_rpcclnt = rpc;
638 rpcclnt_rcvlock(&dummytask);
639 rpcclnt_disconnect(rpc);
640 rpcclnt_rcvunlock(&rpc->rc_flag);
641 }
642
643 /*
644 * This is the nfs send routine. For connection based socket types, it must
645 * be called with an nfs_sndlock() on the socket. "rep == NULL" indicates
646 * that it has been called from a server. For the client side: - return EINTR
647 * if the RPC is terminated, 0 otherwise - set R_MUSTRESEND if the send fails
648 * for any reason - do any cleanup required by recoverable socket errors
649 * (???) For the server side: - return EINTR or ERESTART if interrupted by a
650 * signal - return EPIPE if a connection is lost for connection based sockets
651 * (TCP...) - do any cleanup required by recoverable socket errors (???)
652 */
653 static int
654 rpcclnt_send(so, nam, top, rep)
655 struct socket *so;
656 #ifdef __OpenBSD__
657 struct mbuf *nam;
658 #else
659 struct sockaddr *nam;
660 #endif
661 struct mbuf *top;
662 struct rpctask *rep;
663 {
664 #ifdef __OpenBSD__
665 struct mbuf *sendnam;
666 #else
667 struct sockaddr *sendnam;
668 struct thread *td = curthread;
669 #endif
670 int error, soflags, flags;
671
672 GIANT_REQUIRED; /* XXX until socket locking done */
673
674 if (rep) {
675 if (rep->r_flags & R_SOFTTERM) {
676 m_freem(top);
677 RPC_RETURN(EINTR);
678 }
679 if ((so = rep->r_rpcclnt->rc_so) == NULL) {
680 rep->r_flags |= R_MUSTRESEND;
681 m_freem(top);
682 RPC_RETURN(0);
683 }
684 rep->r_flags &= ~R_MUSTRESEND;
685 soflags = rep->r_rpcclnt->rc_soflags;
686 } else
687 soflags = so->so_proto->pr_flags;
688
689 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
690 sendnam = NULL;
691 else
692 sendnam = nam;
693
694 if (so->so_type == SOCK_SEQPACKET)
695 flags = MSG_EOR;
696 else
697 flags = 0;
698
699 error = sosend(so, sendnam, NULL, top, NULL, flags, td);
700
701 if (error) {
702 if (rep) {
703 log(LOG_INFO, "rpc send error %d for service %s\n", error,
704 rep->r_rpcclnt->rc_prog->prog_name);
705 /*
706 * Deal with errors for the client side.
707 */
708 if (rep->r_flags & R_SOFTTERM)
709 error = EINTR;
710 else
711 rep->r_flags |= R_MUSTRESEND;
712 } else
713 log(LOG_INFO, "rpc service send error %d\n", error);
714
715 /*
716 * Handle any recoverable (soft) socket errors here.
717 */
718 if (error != EINTR && error != ERESTART &&
719 error != EWOULDBLOCK && error != EPIPE)
720 error = 0;
721 }
722 RPC_RETURN(error);
723 }
724
725 /*
726 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all done by
727 * soreceive(), but for SOCK_STREAM we must deal with the Record Mark and
728 * consolidate the data into a new mbuf list. nb: Sometimes TCP passes the
729 * data up to soreceive() in long lists of small mbufs. For SOCK_STREAM we
730 * must be very careful to read an entire record once we have read any of it,
731 * even if the system call has been interrupted.
732 */
733 static int
734 rpcclnt_receive(rep, aname, mp, td)
735 struct rpctask *rep;
736 #ifdef __OpenBSD__
737 struct mbuf **aname;
738 #else
739 struct sockaddr **aname;
740 #endif
741 struct mbuf **mp;
742 RPC_EXEC_CTX td;
743 {
744 struct socket *so;
745 struct uio auio;
746 struct iovec aio;
747 struct mbuf *m;
748 struct mbuf *control;
749 u_int32_t len;
750 #ifdef __OpenBSD__
751 struct mbuf **getnam;
752 #else
753 struct sockaddr **getnam;
754 #endif
755 int error, sotype, rcvflg;
756
757 GIANT_REQUIRED; /* XXX until socket locking done */
758
759 /*
760 * Set up arguments for soreceive()
761 */
762 *mp = NULL;
763 *aname = NULL;
764 sotype = rep->r_rpcclnt->rc_sotype;
765
766 /*
767 * For reliable protocols, lock against other senders/receivers in
768 * case a reconnect is necessary. For SOCK_STREAM, first get the
769 * Record Mark to find out how much more there is to get. We must
770 * lock the socket against other receivers until we have an entire
771 * rpc request/reply.
772 */
773 if (sotype != SOCK_DGRAM) {
774 error = rpcclnt_sndlock(&rep->r_rpcclnt->rc_flag, rep);
775 if (error)
776 RPC_RETURN(error);
777 tryagain:
778 /*
779 * Check for fatal errors and resending request.
780 */
781 /*
782 * Ugh: If a reconnect attempt just happened, rc_so would
783 * have changed. NULL indicates a failed attempt that has
784 * essentially shut down this mount point.
785 */
786 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) {
787 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
788 RPC_RETURN(EINTR);
789 }
790 so = rep->r_rpcclnt->rc_so;
791 if (!so) {
792 error = rpcclnt_reconnect(rep, td);
793 if (error) {
794 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
795 RPC_RETURN(error);
796 }
797 goto tryagain;
798 }
799 while (rep->r_flags & R_MUSTRESEND) {
800 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_TRYWAIT);
801 rpcstats.rpcretries++;
802 error = rpcclnt_send(so, rep->r_rpcclnt->rc_name, m, rep);
803 if (error) {
804 if (error == EINTR || error == ERESTART ||
805 (error = rpcclnt_reconnect(rep, td)) != 0) {
806 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
807 RPC_RETURN(error);
808 }
809 goto tryagain;
810 }
811 }
812 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
813 if (sotype == SOCK_STREAM) {
814 aio.iov_base = (caddr_t) & len;
815 aio.iov_len = sizeof(u_int32_t);
816 auio.uio_iov = &aio;
817 auio.uio_iovcnt = 1;
818 auio.uio_segflg = UIO_SYSSPACE;
819 auio.uio_rw = UIO_READ;
820 auio.uio_offset = 0;
821 auio.uio_resid = sizeof(u_int32_t);
822 #ifdef __OpenBSD__
823 auio.uio_procp = td;
824 #else
825 auio.uio_td = td;
826 #endif
827 do {
828 rcvflg = MSG_WAITALL;
829 error = soreceive(so, NULL, &auio, NULL, NULL, &rcvflg);
830 if (error == EWOULDBLOCK && rep) {
831 if (rep->r_flags & R_SOFTTERM)
832 RPC_RETURN(EINTR);
833 }
834 } while (error == EWOULDBLOCK);
835 if (!error && auio.uio_resid > 0) {
836 log(LOG_INFO,
837 "short receive (%zu/%zu) from rpc server %s\n",
838 sizeof(u_int32_t) - auio.uio_resid,
839 sizeof(u_int32_t),
840 rep->r_rpcclnt->rc_prog->prog_name);
841 error = EPIPE;
842 }
843 if (error)
844 goto errout;
845 len = ntohl(len) & ~0x80000000;
846 /*
847 * This is SERIOUS! We are out of sync with the
848 * sender and forcing a disconnect/reconnect is all I
849 * can do.
850 */
851 if (len > RPC_MAXPACKET) {
852 log(LOG_ERR, "%s (%d) from rpc server %s\n",
853 "impossible packet length",
854 len,
855 rep->r_rpcclnt->rc_prog->prog_name);
856 error = EFBIG;
857 goto errout;
858 }
859 auio.uio_resid = len;
860 do {
861 rcvflg = MSG_WAITALL;
862 error = soreceive(so, NULL, &auio, mp, NULL, &rcvflg);
863 } while (error == EWOULDBLOCK || error == EINTR ||
864 error == ERESTART);
865 if (!error && auio.uio_resid > 0) {
866 log(LOG_INFO,
867 "short receive (%d/%d) from rpc server %s\n",
868 len - auio.uio_resid, len,
869 rep->r_rpcclnt->rc_prog->prog_name);
870 error = EPIPE;
871 }
872 } else {
873 /*
874 * NB: Since uio_resid is big, MSG_WAITALL is ignored
875 * and soreceive() will return when it has either a
876 * control msg or a data msg. We have no use for
877 * control msg., but must grab them and then throw
878 * them away so we know what is going on.
879 */
880 auio.uio_resid = len = 100000000; /* Anything Big */
881 #ifdef __OpenBSD__
882 auio.uio_procp = td;
883 #else
884 auio.uio_td = td;
885 #endif
886 do {
887 rcvflg = 0;
888 error = soreceive(so, NULL, &auio, mp, &control, &rcvflg);
889 if (control)
890 m_freem(control);
891 if (error == EWOULDBLOCK && rep) {
892 if (rep->r_flags & R_SOFTTERM)
893 RPC_RETURN(EINTR);
894 }
895 } while (error == EWOULDBLOCK ||
896 (!error && *mp == NULL && control));
897 if ((rcvflg & MSG_EOR) == 0)
898 printf("Egad!!\n");
899 if (!error && *mp == NULL)
900 error = EPIPE;
901 len -= auio.uio_resid;
902 }
903 errout:
904 if (error && error != EINTR && error != ERESTART) {
905 m_freem(*mp);
906 *mp = (struct mbuf *) 0;
907 if (error != EPIPE)
908 log(LOG_INFO,
909 "receive error %d from rpc server %s\n",
910 error,
911 rep->r_rpcclnt->rc_prog->prog_name);
912 error = rpcclnt_sndlock(&rep->r_rpcclnt->rc_flag, rep);
913 if (!error)
914 error = rpcclnt_reconnect(rep, td);
915 if (!error)
916 goto tryagain;
917 }
918 } else {
919 if ((so = rep->r_rpcclnt->rc_so) == NULL)
920 RPC_RETURN(EACCES);
921 if (so->so_state & SS_ISCONNECTED)
922 getnam = NULL;
923 else
924 getnam = aname;
925 auio.uio_resid = len = 1000000;
926 #ifdef __OpenBSD__
927 auio.uio_procp = td;
928 #else
929 auio.uio_td = td;
930 #endif
931
932 do {
933 rcvflg = 0;
934 error = soreceive(so, getnam, &auio, mp, NULL, &rcvflg);
935 RPCDEBUG("soreceivce returns %d", error);
936 if (error == EWOULDBLOCK && (rep->r_flags & R_SOFTTERM)) {
937 RPCDEBUG("wouldblock && softerm -> EINTR");
938 RPC_RETURN(EINTR);
939 }
940 } while (error == EWOULDBLOCK);
941 len -= auio.uio_resid;
942 }
943 if (error) {
944 m_freem(*mp);
945 *mp = NULL;
946 }
947
948 /*
949 * Search for any mbufs that are not a multiple of 4 bytes
950 * long or with m_data not longword aligned. These could
951 * cause pointer alignment problems, so copy them to well
952 * aligned mbufs.
953 */
954 rpcclnt_realign(mp, 5 * RPCX_UNSIGNED);
955 RPC_RETURN(error);
956 }
957
958
959 /*
960 * Implement receipt of reply on a socket. We must search through the list of
961 * received datagrams matching them with outstanding requests using the xid,
962 * until ours is found.
963 */
964 /* ARGSUSED */
965 static int
966 rpcclnt_reply(myrep, td)
967 struct rpctask *myrep;
968 RPC_EXEC_CTX td;
969 {
970 struct rpctask *rep;
971 struct rpcclnt *rpc = myrep->r_rpcclnt;
972 int32_t t1;
973 struct mbuf *mrep, *md;
974 #ifdef __OpenBSD__
975 struct mbuf *nam;
976 #else
977 struct sockaddr *nam;
978 #endif
979 u_int32_t rxid, *tl;
980 caddr_t dpos, cp2;
981 int error;
982
983 /*
984 * Loop around until we get our own reply
985 */
986 for (;;) {
987 /*
988 * Lock against other receivers so that I don't get stuck in
989 * sbwait() after someone else has received my reply for me.
990 * Also necessary for connection based protocols to avoid
991 * race conditions during a reconnect.
992 */
993 error = rpcclnt_rcvlock(myrep);
994 if (error)
995 RPC_RETURN(error);
996 /* Already received, bye bye */
997 if (myrep->r_mrep != NULL) {
998 rpcclnt_rcvunlock(&rpc->rc_flag);
999 RPC_RETURN(0);
1000 }
1001 /*
1002 * Get the next Rpc reply off the socket
1003 */
1004 error = rpcclnt_receive(myrep, &nam, &mrep, td);
1005
1006 rpcclnt_rcvunlock(&rpc->rc_flag);
1007
1008 if (error) {
1009 /*
1010 * Ignore routing errors on connectionless
1011 * protocols??
1012 */
1013 if (RPCIGNORE_SOERROR(rpc->rc_soflags, error)) {
1014 rpc->rc_so->so_error = 0;
1015 if (myrep->r_flags & R_GETONEREP)
1016 RPC_RETURN(0);
1017 RPCDEBUG("ingoring routing error on connectionless protocol.");
1018 continue;
1019 }
1020 RPC_RETURN(error);
1021 }
1022 #ifdef __OpenBSD__
1023 if (nam)
1024 m_freem(nam);
1025 #else
1026 if (nam)
1027 FREE(nam, M_SONAME);
1028 #endif
1029
1030 /*
1031 * Get the xid and check that it is an rpc reply
1032 */
1033 md = mrep;
1034 dpos = mtod(md, caddr_t);
1035 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1036 rxid = *tl++;
1037 if (*tl != rpc_reply) {
1038 rpcstats.rpcinvalid++;
1039 m_freem(mrep);
1040 rpcmout:
1041 if (myrep->r_flags & R_GETONEREP)
1042 RPC_RETURN(0);
1043 continue;
1044 }
1045 /*
1046 * Loop through the request list to match up the reply Iff no
1047 * match, just drop the datagram
1048 */
1049 TAILQ_FOREACH(rep, &rpctask_q, r_chain) {
1050 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
1051 /* Found it.. */
1052 rep->r_mrep = mrep;
1053 rep->r_md = md;
1054 rep->r_dpos = dpos;
1055
1056 /*
1057 * Update congestion window. Do the additive
1058 * increase of one rpc/rtt.
1059 */
1060 if (rpc->rc_cwnd <= rpc->rc_sent) {
1061 rpc->rc_cwnd +=
1062 (RPC_CWNDSCALE * RPC_CWNDSCALE +
1063 (rpc->rc_cwnd >> 1)) / rpc->rc_cwnd;
1064 if (rpc->rc_cwnd > RPC_MAXCWND)
1065 rpc->rc_cwnd = RPC_MAXCWND;
1066 }
1067 rep->r_flags &= ~R_SENT;
1068 rpc->rc_sent -= RPC_CWNDSCALE;
1069 /*
1070 * Update rtt using a gain of 0.125 on the
1071 * mean and a gain of 0.25 on the deviation.
1072 */
1073 if (rep->r_flags & R_TIMING) {
1074 /*
1075 * Since the timer resolution of
1076 * NFS_HZ is so course, it can often
1077 * result in r_rtt == 0. Since r_rtt
1078 * == N means that the actual rtt is
1079 * between N+dt and N+2-dt ticks, add
1080 * 1.
1081 */
1082 t1 = rep->r_rtt + 1;
1083 t1 -= (RPC_SRTT(rpc, rep) >> 3);
1084 RPC_SRTT(rpc, rep) += t1;
1085 if (t1 < 0)
1086 t1 = -t1;
1087 t1 -= (RPC_SDRTT(rpc, rep) >> 2);
1088 RPC_SDRTT(rpc, rep) += t1;
1089 }
1090 rpc->rc_timeouts = 0;
1091 break;
1092 }
1093 }
1094 /*
1095 * If not matched to a request, drop it. If it's mine, get
1096 * out.
1097 */
1098 if (rep == 0) {
1099 rpcstats.rpcunexpected++;
1100 RPCDEBUG("rpc reply not matched\n");
1101 m_freem(mrep);
1102 } else if (rep == myrep) {
1103 if (rep->r_mrep == NULL)
1104 panic("rpcreply nil");
1105 RPC_RETURN(0);
1106 }
1107 if (myrep->r_flags & R_GETONEREP)
1108 RPC_RETURN(0);
1109 }
1110 }
1111
1112 /* XXX: ignores tryagain! */
1113 /*
1114 * code from nfs_request - goes something like this - fill in task struct -
1115 * links task into list - calls nfs_send() for first transmit - calls
1116 * nfs_receive() to get reply - fills in reply (which should be initialized
1117 * prior to calling), which is valid when 0 is returned and is NEVER freed in
1118 * this function
1119 *
1120 * always frees the request header, but NEVER frees 'mrest'
1121 *
1122 */
1123 /*
1124 * ruthtype
1125 * pcclnt_setauth() should be used before calling this. EAUTH is returned if
1126 * authentication fails.
1127 */
1128 /*
1129 * note that reply->result_* are invalid unless reply->type ==
1130 * RPC_MSGACCEPTED and reply->status == RPC_SUCCESS and that reply->verf_*
1131 * are invalid unless reply->type == RPC_MSGACCEPTED
1132 */
1133 int
1134 rpcclnt_request(rpc, mrest, procnum, td, cred, reply)
1135 struct rpcclnt *rpc;
1136 struct mbuf *mrest;
1137 int procnum;
1138 RPC_EXEC_CTX td;
1139 struct ucred *cred;
1140 struct rpc_reply *reply;
1141 {
1142 struct mbuf *m, *mrep;
1143 struct rpctask *task;
1144 u_int32_t *tl;
1145 struct mbuf *md, *mheadend;
1146 caddr_t dpos, cp2;
1147 int t1, s, error = 0, mrest_len;
1148 u_int32_t xid;
1149
1150 #ifdef __OpenBSD__
1151 task = pool_get(&rpctask_pool, PR_WAITOK);
1152 #else
1153 MALLOC(task, struct rpctask *, sizeof(struct rpctask), M_RPC, (M_WAITOK | M_ZERO));
1154 #endif
1155
1156 task->r_rpcclnt = rpc;
1157 task->r_procnum = procnum;
1158 task->r_td = td;
1159
1160 mrest_len = m_length(mrest, NULL);
1161
1162 m = rpcclnt_buildheader(rpc, procnum, mrest, mrest_len, &xid, &mheadend,
1163 cred);
1164
1165 /*
1166 * For stream protocols, insert a Sun RPC Record Mark.
1167 */
1168 if (rpc->rc_sotype == SOCK_STREAM) {
1169 M_PREPEND(m, RPCX_UNSIGNED, M_TRYWAIT);
1170 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1171 (m->m_pkthdr.len - RPCX_UNSIGNED));
1172 }
1173 task->r_mreq = m;
1174 task->r_xid = xid;
1175
1176 if (rpc->rc_flag & RPCCLNT_SOFT)
1177 task->r_retry = rpc->rc_retry;
1178 else
1179 task->r_retry = RPC_MAXREXMIT + 1; /* past clip limit */
1180 task->r_rtt = task->r_rexmit = 0;
1181
1182 if (rpcclnt_proct(rpc, procnum) > 0)
1183 task->r_flags = R_TIMING;
1184 else
1185 task->r_flags = 0;
1186 task->r_mrep = NULL;
1187
1188 /*
1189 * Do the client side RPC.
1190 */
1191 rpcstats.rpcrequests++;
1192
1193 /*
1194 * Chain request into list of outstanding requests. Be sure to put it
1195 * LAST so timer finds oldest requests first.
1196 */
1197 s = splsoftclock();
1198 if (TAILQ_EMPTY(&rpctask_q))
1199 callout_reset(&rpcclnt_callout, rpcclnt_ticks, rpcclnt_timer,
1200 NULL);
1201 TAILQ_INSERT_TAIL(&rpctask_q, task, r_chain);
1202
1203 /*
1204 * If backing off another request or avoiding congestion, don't send
1205 * this one now but let timer do it. If not timing a request, do it
1206 * now.
1207 */
1208 if (rpc->rc_so && (rpc->rc_sotype != SOCK_DGRAM ||
1209 (rpc->rc_flag & RPCCLNT_DUMBTIMR) ||
1210 rpc->rc_sent < rpc->rc_cwnd)) {
1211 splx(s);
1212
1213 if (rpc->rc_soflags & PR_CONNREQUIRED)
1214 error = rpcclnt_sndlock(&rpc->rc_flag, task);
1215 if (!error) {
1216 error = rpcclnt_send(rpc->rc_so, rpc->rc_name,
1217 m_copym(m, 0, M_COPYALL, M_TRYWAIT),
1218 task);
1219 if (rpc->rc_soflags & PR_CONNREQUIRED)
1220 rpcclnt_sndunlock(&rpc->rc_flag);
1221 }
1222 if (!error && (task->r_flags & R_MUSTRESEND) == 0) {
1223 rpc->rc_sent += RPC_CWNDSCALE;
1224 task->r_flags |= R_SENT;
1225 }
1226 } else {
1227 splx(s);
1228 task->r_rtt = -1;
1229 }
1230
1231 /*
1232 * Wait for the reply from our send or the timer's.
1233 */
1234 if (!error || error == EPIPE)
1235 error = rpcclnt_reply(task, td);
1236
1237 /*
1238 * RPC done, unlink the request.
1239 */
1240 s = splsoftclock();
1241 TAILQ_REMOVE(&rpctask_q, task, r_chain);
1242 if (TAILQ_EMPTY(&rpctask_q))
1243 callout_stop(&rpcclnt_callout);
1244 splx(s);
1245
1246 /*
1247 * Decrement the outstanding request count.
1248 */
1249 if (task->r_flags & R_SENT) {
1250 task->r_flags &= ~R_SENT; /* paranoia */
1251 rpc->rc_sent -= RPC_CWNDSCALE;
1252 }
1253 /*
1254 * If there was a successful reply and a tprintf msg. tprintf a
1255 * response.
1256 */
1257 if (!error && (task->r_flags & R_TPRINTFMSG))
1258 rpcclnt_msg(task->r_td, rpc->rc_prog->prog_name,
1259 "is alive again");
1260
1261 /* free request header (leaving mrest) */
1262 mheadend->m_next = NULL;
1263 m_freem(task->r_mreq);
1264
1265 /* initialize reply */
1266 reply->mrep = task->r_mrep;
1267 reply->verf_md = NULL;
1268 reply->result_md = NULL;
1269
1270 mrep = task->r_mrep;
1271 md = task->r_md;
1272 dpos = task->r_dpos;
1273
1274 /* task structure is no longer needed */
1275 #ifdef __OpenBSD__
1276 pool_put(&rpctask_pool, task);
1277 #else
1278 FREE(task, M_RPC);
1279 #endif
1280
1281 if (error)
1282 goto rpcmout;
1283
1284 /*
1285 * break down the rpc header and check if ok
1286 */
1287
1288 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1289 reply->stat.type = fxdr_unsigned(u_int32_t, *tl);
1290
1291 if (reply->stat.type == RPC_MSGDENIED) {
1292 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1293 reply->stat.status = fxdr_unsigned(u_int32_t, *tl);
1294
1295 switch (reply->stat.status) {
1296 case RPC_MISMATCH:
1297 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1298 reply->stat.mismatch_info.low = fxdr_unsigned(u_int32_t, *tl++);
1299 reply->stat.mismatch_info.high = fxdr_unsigned(u_int32_t, *tl);
1300 error = EOPNOTSUPP;
1301 break;
1302 case RPC_AUTHERR:
1303 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1304 reply->stat.autherr = fxdr_unsigned(u_int32_t, *tl);
1305 error = EACCES;
1306 break;
1307 default:
1308 error = EBADRPC;
1309 break;
1310 }
1311 goto rpcmout;
1312 } else if (reply->stat.type != RPC_MSGACCEPTED) {
1313 error = EBADRPC;
1314 goto rpcmout;
1315 }
1316
1317 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1318
1319 reply->verf_md = md;
1320 reply->verf_dpos = dpos;
1321
1322 reply->verf_type = fxdr_unsigned(u_int32_t, *tl++);
1323 reply->verf_size = fxdr_unsigned(u_int32_t, *tl);
1324
1325 if (reply->verf_size != 0)
1326 rpcm_adv(rpcm_rndup(reply->verf_size));
1327
1328 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1329 reply->stat.status = fxdr_unsigned(u_int32_t, *tl);
1330
1331 if (reply->stat.status == RPC_SUCCESS) {
1332 if ((uint32_t)(dpos - mtod(md, caddr_t)) >= md->m_len) {
1333 RPCDEBUG("where is the next mbuf?");
1334 RPCDEBUG("%d -> %d",
1335 (int)(dpos - mtod(md, caddr_t)), md->m_len);
1336 if (md->m_next == NULL) {
1337 error = EBADRPC;
1338 goto rpcmout;
1339 } else {
1340 reply->result_md = md->m_next;
1341 reply->result_dpos = mtod(reply->result_md,
1342 caddr_t);
1343 }
1344 } else {
1345 reply->result_md = md;
1346 reply->result_dpos = dpos;
1347 }
1348 } else if (reply->stat.status == RPC_PROGMISMATCH) {
1349 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1350 reply->stat.mismatch_info.low = fxdr_unsigned(u_int32_t, *tl++);
1351 reply->stat.mismatch_info.high = fxdr_unsigned(u_int32_t, *tl);
1352 error = EOPNOTSUPP;
1353 goto rpcmout;
1354 } else {
1355 error = EPROTONOSUPPORT;
1356 goto rpcmout;
1357 }
1358 error = 0;
1359
1360 rpcmout:
1361 RPC_RETURN(error);
1362 }
1363
1364
1365 /*
1366 * Nfs timer routine Scan the nfsreq list and retranmit any requests that
1367 * have timed out To avoid retransmission attempts on STREAM sockets (in the
1368 * future) make sure to set the r_retry field to 0 (implies nm_retry == 0).
1369 */
1370 void
1371 rpcclnt_timer(arg)
1372 void *arg;
1373 {
1374 #ifdef __OpenBSD__
1375 struct timeout *to = (struct timeout *) arg;
1376 #endif
1377 struct rpctask *rep;
1378 struct mbuf *m;
1379 struct socket *so;
1380 struct rpcclnt *rpc;
1381 int timeo;
1382 int s, error;
1383
1384 #ifndef __OpenBSD__
1385 struct thread *td = curthread;
1386 #endif
1387
1388 #if __OpenBSD__
1389 s = splsoftnet();
1390 #else
1391 s = splnet();
1392 #endif
1393 TAILQ_FOREACH(rep, &rpctask_q, r_chain) {
1394 rpc = rep->r_rpcclnt;
1395 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
1396 continue;
1397 if (rpcclnt_sigintr(rpc, rep, rep->r_td)) {
1398 rep->r_flags |= R_SOFTTERM;
1399 continue;
1400 }
1401 if (rep->r_rtt >= 0) {
1402 rep->r_rtt++;
1403 if (rpc->rc_flag & RPCCLNT_DUMBTIMR)
1404 timeo = rpc->rc_timeo;
1405 else
1406 timeo = RPC_RTO(rpc, rpcclnt_proct(rep->r_rpcclnt,
1407 rep->r_procnum));
1408 if (rpc->rc_timeouts > 0)
1409 timeo *= rpcclnt_backoff[rpc->rc_timeouts - 1];
1410 if (rep->r_rtt <= timeo)
1411 continue;
1412 if (rpc->rc_timeouts < 8)
1413 rpc->rc_timeouts++;
1414 }
1415 /*
1416 * Check for server not responding
1417 */
1418 if ((rep->r_flags & R_TPRINTFMSG) == 0 &&
1419 rep->r_rexmit > rpc->rc_deadthresh) {
1420 rpcclnt_msg(rep->r_td, rpc->rc_prog->prog_name,
1421 "not responding");
1422 rep->r_flags |= R_TPRINTFMSG;
1423 }
1424 if (rep->r_rexmit >= rep->r_retry) { /* too many */
1425 rpcstats.rpctimeouts++;
1426 rep->r_flags |= R_SOFTTERM;
1427 continue;
1428 }
1429 if (rpc->rc_sotype != SOCK_DGRAM) {
1430 if (++rep->r_rexmit > RPC_MAXREXMIT)
1431 rep->r_rexmit = RPC_MAXREXMIT;
1432 continue;
1433 }
1434 if ((so = rpc->rc_so) == NULL)
1435 continue;
1436
1437 /*
1438 * If there is enough space and the window allows.. Resend it
1439 * Set r_rtt to -1 in case we fail to send it now.
1440 */
1441 rep->r_rtt = -1;
1442 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
1443 ((rpc->rc_flag & RPCCLNT_DUMBTIMR) ||
1444 (rep->r_flags & R_SENT) ||
1445 rpc->rc_sent < rpc->rc_cwnd) &&
1446 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))) {
1447 if ((rpc->rc_flag & RPCCLNT_NOCONN) == 0)
1448 error = (*so->so_proto->pr_usrreqs->pru_send) (so, 0, m,
1449 NULL, NULL, td);
1450 else
1451 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, rpc->rc_name, NULL, td);
1452 if (error) {
1453 if (RPCIGNORE_SOERROR(rpc->rc_soflags, error))
1454 so->so_error = 0;
1455 } else {
1456 /*
1457 * Iff first send, start timing else turn
1458 * timing off, backoff timer and divide
1459 * congestion window by 2.
1460 */
1461 if (rep->r_flags & R_SENT) {
1462 rep->r_flags &= ~R_TIMING;
1463 if (++rep->r_rexmit > RPC_MAXREXMIT)
1464 rep->r_rexmit = RPC_MAXREXMIT;
1465 rpc->rc_cwnd >>= 1;
1466 if (rpc->rc_cwnd < RPC_CWNDSCALE)
1467 rpc->rc_cwnd = RPC_CWNDSCALE;
1468 rpcstats.rpcretries++;
1469 } else {
1470 rep->r_flags |= R_SENT;
1471 rpc->rc_sent += RPC_CWNDSCALE;
1472 }
1473 rep->r_rtt = 0;
1474 }
1475 }
1476 }
1477 splx(s);
1478
1479 #ifdef __OpenBSD__
1480 timeout_add(rpcclnt_timer, to, rpcclnt_ticks);
1481 #else
1482 callout_reset(&rpcclnt_callout, rpcclnt_ticks, rpcclnt_timer, NULL);
1483 #endif
1484 }
1485
1486 /*
1487 * Test for a termination condition pending on the process. This is used for
1488 * RPCCLNT_INT mounts.
1489 */
1490 int
1491 rpcclnt_sigintr(rpc, task, pr)
1492 struct rpcclnt *rpc;
1493 struct rpctask *task;
1494 RPC_EXEC_CTX pr;
1495 {
1496 struct proc *p;
1497
1498 sigset_t tmpset;
1499
1500 if (rpc == NULL)
1501 return EFAULT;
1502
1503 /* XXX deal with forced unmounts */
1504
1505 if (task && (task->r_flags & R_SOFTTERM))
1506 RPC_RETURN(EINTR);
1507
1508 if (!(rpc->rc_flag & RPCCLNT_INT))
1509 RPC_RETURN(0);
1510
1511 if (pr == NULL)
1512 return (0);
1513
1514 #ifdef __OpenBSD__
1515 p = pr;
1516 if (p && p->p_siglist &&
1517 (((p->p_siglist & ~p->p_sigmask) & ~p->p_sigignore) &
1518 RPCINT_SIGMASK))
1519 RPC_RETURN(EINTR);
1520 #else
1521 p = pr->td_proc;
1522 PROC_LOCK(p);
1523 tmpset = p->p_siglist;
1524 SIGSETNAND(tmpset, pr->td_sigmask);
1525 mtx_lock(&p->p_sigacts->ps_mtx);
1526 SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
1527 mtx_unlock(&p->p_sigacts->ps_mtx);
1528 if (SIGNOTEMPTY(p->p_siglist) && RPCCLNTINT_SIGMASK(tmpset)) {
1529 PROC_UNLOCK(p);
1530 RPC_RETURN(EINTR);
1531 }
1532 PROC_UNLOCK(p);
1533 #endif
1534 RPC_RETURN(0);
1535 }
1536
1537 /*
1538 * Lock a socket against others. Necessary for STREAM sockets to ensure you
1539 * get an entire rpc request/reply and also to avoid race conditions between
1540 * the processes with nfs requests in progress when a reconnect is necessary.
1541 */
1542 static int
1543 rpcclnt_sndlock(flagp, task)
1544 int *flagp;
1545 struct rpctask *task;
1546 {
1547 RPC_EXEC_CTX p;
1548 int slpflag = 0, slptimeo = 0;
1549
1550 if (task) {
1551 p = task->r_td;
1552 if (task->r_rpcclnt->rc_flag & RPCCLNT_INT)
1553 slpflag = PCATCH;
1554 } else
1555 p = NULL;
1556 while (*flagp & RPCCLNT_SNDLOCK) {
1557 if (rpcclnt_sigintr(task->r_rpcclnt, task, p))
1558 RPC_RETURN(EINTR);
1559 *flagp |= RPCCLNT_WANTSND;
1560 (void)tsleep((caddr_t) flagp, slpflag | (PZERO - 1), "rpcsndlck",
1561 slptimeo);
1562 if (slpflag == PCATCH) {
1563 slpflag = 0;
1564 slptimeo = 2 * hz;
1565 }
1566 }
1567 *flagp |= RPCCLNT_SNDLOCK;
1568 RPC_RETURN(0);
1569 }
1570
1571 /*
1572 * Unlock the stream socket for others.
1573 */
1574 static void
1575 rpcclnt_sndunlock(flagp)
1576 int *flagp;
1577 {
1578
1579 if ((*flagp & RPCCLNT_SNDLOCK) == 0)
1580 panic("rpc sndunlock");
1581 *flagp &= ~RPCCLNT_SNDLOCK;
1582 if (*flagp & RPCCLNT_WANTSND) {
1583 *flagp &= ~RPCCLNT_WANTSND;
1584 wakeup((caddr_t) flagp);
1585 }
1586 }
1587
1588 static int
1589 rpcclnt_rcvlock(task)
1590 struct rpctask *task;
1591 {
1592 int *flagp = &task->r_rpcclnt->rc_flag;
1593 int slpflag, slptimeo = 0;
1594
1595 if (*flagp & RPCCLNT_INT)
1596 slpflag = PCATCH;
1597 else
1598 slpflag = 0;
1599 while (*flagp & RPCCLNT_RCVLOCK) {
1600 if (rpcclnt_sigintr(task->r_rpcclnt, task, task->r_td))
1601 RPC_RETURN(EINTR);
1602 *flagp |= RPCCLNT_WANTRCV;
1603 (void)tsleep((caddr_t) flagp, slpflag | (PZERO - 1), "rpcrcvlk",
1604 slptimeo);
1605 if (slpflag == PCATCH) {
1606 slpflag = 0;
1607 slptimeo = 2 * hz;
1608 }
1609 }
1610 *flagp |= RPCCLNT_RCVLOCK;
1611 RPC_RETURN(0);
1612 }
1613
1614 /*
1615 * Unlock the stream socket for others.
1616 */
1617 static void
1618 rpcclnt_rcvunlock(flagp)
1619 int *flagp;
1620 {
1621
1622 if ((*flagp & RPCCLNT_RCVLOCK) == 0)
1623 panic("nfs rcvunlock");
1624 *flagp &= ~RPCCLNT_RCVLOCK;
1625 if (*flagp & RPCCLNT_WANTRCV) {
1626 *flagp &= ~RPCCLNT_WANTRCV;
1627 wakeup((caddr_t) flagp);
1628 }
1629 }
1630
1631 #if 0
1632 /*
1633 * Check for badly aligned mbuf data areas and realign data in an mbuf list
1634 * by copying the data areas up, as required.
1635 */
1636 void
1637 rpcclnt_realign(m, hsiz)
1638 struct mbuf *m;
1639 int hsiz;
1640 {
1641 struct mbuf *m2;
1642 int siz, mlen, olen;
1643 caddr_t tcp, fcp;
1644 struct mbuf *mnew;
1645
1646 while (m) {
1647 /*
1648 * This never happens for UDP, rarely happens for TCP but
1649 * frequently happens for iso transport.
1650 */
1651 if ((m->m_len & 0x3) || (mtod(m, long)&0x3)) {
1652 olen = m->m_len;
1653 fcp = mtod(m, caddr_t);
1654 if ((long)fcp & 0x3) {
1655 if (m->m_flags & M_PKTHDR)
1656 m_tag_delete_chain(m, NULL);
1657 m->m_flags &= ~M_PKTHDR;
1658 if (m->m_flags & M_EXT)
1659 m->m_data = m->m_ext.ext_buf +
1660 ((m->m_ext.ext_size - olen) & ~0x3);
1661 else
1662 m->m_data = m->m_dat;
1663 }
1664 m->m_len = 0;
1665 tcp = mtod(m, caddr_t);
1666 mnew = m;
1667 m2 = m->m_next;
1668
1669 /*
1670 * If possible, only put the first invariant part of
1671 * the RPC header in the first mbuf.
1672 */
1673 mlen = M_TRAILINGSPACE(m);
1674 if (olen <= hsiz && mlen > hsiz)
1675 mlen = hsiz;
1676
1677 /* Loop through the mbuf list consolidating data. */
1678 while (m) {
1679 while (olen > 0) {
1680 if (mlen == 0) {
1681 if (m2->m_flags & M_PKTHDR)
1682 m_tag_delete_chain(m2, NULL);
1683 m2->m_flags &= ~M_PKTHDR;
1684 if (m2->m_flags & M_EXT)
1685 m2->m_data = m2->m_ext.ext_buf;
1686 else
1687 m2->m_data = m2->m_dat;
1688 m2->m_len = 0;
1689 mlen = M_TRAILINGSPACE(m2);
1690 tcp = mtod(m2, caddr_t);
1691 mnew = m2;
1692 m2 = m2->m_next;
1693 }
1694 siz = min(mlen, olen);
1695 if (tcp != fcp)
1696 bcopy(fcp, tcp, siz);
1697 mnew->m_len += siz;
1698 mlen -= siz;
1699 olen -= siz;
1700 tcp += siz;
1701 fcp += siz;
1702 }
1703 m = m->m_next;
1704 if (m) {
1705 olen = m->m_len;
1706 fcp = mtod(m, caddr_t);
1707 }
1708 }
1709
1710 /*
1711 * Finally, set m_len == 0 for any trailing mbufs
1712 * that have been copied out of.
1713 */
1714 while (m2) {
1715 m2->m_len = 0;
1716 m2 = m2->m_next;
1717 }
1718 return;
1719 }
1720 m = m->m_next;
1721 }
1722 }
1723 #else
1724 static void
1725 rpcclnt_realign(struct mbuf **pm, int hsiz)
1726 {
1727 struct mbuf *m;
1728 struct mbuf *n = NULL;
1729 int off = 0;
1730
1731 RPCDEBUG("in rpcclnt_realign()");
1732
1733 while ((m = *pm) != NULL) {
1734 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
1735 MGET(n, M_TRYWAIT, MT_DATA);
1736 if (m->m_len >= MINCLSIZE) {
1737 MCLGET(n, M_TRYWAIT);
1738 }
1739 n->m_len = 0;
1740 break;
1741 }
1742 pm = &m->m_next;
1743 }
1744
1745 /*
1746 * If n is non-NULL, loop on m copying data, then replace the
1747 * portion of the chain that had to be realigned.
1748 */
1749 if (n != NULL) {
1750 while (m) {
1751 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
1752 off += m->m_len;
1753 m = m->m_next;
1754 }
1755 m_freem(*pm);
1756 *pm = n;
1757 }
1758
1759 RPCDEBUG("leave rpcclnt_realign()");
1760 }
1761 #endif
1762
1763 static int
1764 rpcclnt_msg(p, server, msg)
1765 RPC_EXEC_CTX p;
1766 const char *server;
1767 char *msg;
1768 {
1769 #ifdef __OpenBSD__
1770 tpr_t tpr;
1771 struct proc *pr = p;
1772
1773 if (p)
1774 tpr = tprintf_open(p);
1775 else
1776 tpr = NULL;
1777 tprintf(tpr, "rpc server %s: %s\n", server, msg);
1778 tprintf_close(tpr);
1779 RPC_RETURN(0);
1780 #else
1781 tprintf(p ? p->td_proc : NULL, LOG_INFO,
1782 "nfs server %s: %s\n", server, msg);
1783 RPC_RETURN(0);
1784 #endif
1785 }
1786
1787 /*
1788 * Build the RPC header and fill in the authorization info. The authorization
1789 * string argument is only used when the credentials come from outside of the
1790 * kernel (AUTH_KERB). (likewise, the ucred is only used when inside the
1791 * kernel) Returns the head of the mbuf list.
1792 */
1793 static struct mbuf *
1794 rpcclnt_buildheader(rc, procid, mrest, mrest_len, xidp, mheadend, cred)
1795 struct rpcclnt *rc;
1796 int procid;
1797 struct mbuf *mrest;
1798 u_int32_t mrest_len;
1799 int *xidp;
1800 struct mbuf **mheadend;
1801 struct ucred * cred;
1802 {
1803 /* register */ struct mbuf *mb;
1804 register u_int32_t *tl;
1805 /* register */ caddr_t bpos;
1806 struct mbuf *mreq, *mb2;
1807 int error;
1808
1809 MGETHDR(mb, M_TRYWAIT, MT_DATA);
1810 if (6 * RPCX_UNSIGNED >= MINCLSIZE) {
1811 MCLGET(mb, M_TRYWAIT);
1812 } else if (6 * RPCX_UNSIGNED < MHLEN) {
1813 MH_ALIGN(mb, 6 * RPCX_UNSIGNED);
1814 } else {
1815 RPCDEBUG("mbuf too small");
1816 panic("cheap bailout");
1817 }
1818 mb->m_len = 0;
1819 mreq = mb;
1820 bpos = mtod(mb, caddr_t);
1821
1822 /*
1823 * First the RPC header.
1824 */
1825 rpcm_build(tl, u_int32_t *, 6 * RPCX_UNSIGNED);
1826
1827 /* Get a new (non-zero) xid */
1828 if ((rpcclnt_xid == 0) && (rpcclnt_xid_touched == 0)) {
1829 rpcclnt_xid = arc4random();
1830 rpcclnt_xid_touched = 1;
1831 } else {
1832 while ((*xidp = arc4random() % 256) == 0);
1833 rpcclnt_xid += *xidp;
1834 }
1835
1836 /* XXX: funky... */
1837 *tl++ = *xidp = txdr_unsigned(rpcclnt_xid);
1838
1839 *tl++ = rpc_call;
1840 *tl++ = rpc_vers;
1841 *tl++ = txdr_unsigned(rc->rc_prog->prog_id);
1842 *tl++ = txdr_unsigned(rc->rc_prog->prog_version);
1843 *tl++ = txdr_unsigned(procid);
1844
1845 if ((error = rpcauth_buildheader(rc->rc_auth, cred, &mb, &bpos))) {
1846 RPCDEBUG("rpcauth_buildheader failed %d", error);
1847 return NULL;
1848 }
1849
1850 mb->m_next = mrest;
1851 *mheadend = mb;
1852 mreq->m_pkthdr.len = m_length(mreq, NULL);
1853 mreq->m_pkthdr.rcvif = NULL;
1854 return (mreq);
1855 }
1856
1857 /*
1858 * Help break down an mbuf chain by setting the first siz bytes contiguous
1859 * pointed to by returned val. This is used by the macros rpcm_dissect and
1860 * rpcm_dissecton for tough cases. (The macros use the vars. dpos and dpos2)
1861 */
1862 static int
1863 rpcm_disct(mdp, dposp, siz, left, cp2)
1864 struct mbuf **mdp;
1865 caddr_t *dposp;
1866 int siz;
1867 int left;
1868 caddr_t *cp2;
1869 {
1870 struct mbuf *mp, *mp2;
1871 int siz2, xfer;
1872 caddr_t p;
1873
1874 mp = *mdp;
1875 while (left == 0) {
1876 *mdp = mp = mp->m_next;
1877 if (mp == NULL)
1878 RPC_RETURN(EBADRPC);
1879 left = mp->m_len;
1880 *dposp = mtod(mp, caddr_t);
1881 }
1882 if (left >= siz) {
1883 *cp2 = *dposp;
1884 *dposp += siz;
1885 } else if (mp->m_next == NULL) {
1886 RPC_RETURN(EBADRPC);
1887 } else if (siz > MHLEN) {
1888 panic("rpc S too big");
1889 } else {
1890 MGET(mp2, M_TRYWAIT, MT_DATA);
1891 mp2->m_next = mp->m_next;
1892 mp->m_next = mp2;
1893 mp->m_len -= left;
1894 mp = mp2;
1895 *cp2 = p = mtod(mp, caddr_t);
1896 bcopy(*dposp, p, left); /* Copy what was left */
1897 siz2 = siz - left;
1898 p += left;
1899 mp2 = mp->m_next;
1900 /* Loop around copying up the siz2 bytes */
1901 while (siz2 > 0) {
1902 if (mp2 == NULL)
1903 RPC_RETURN(EBADRPC);
1904 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
1905 if (xfer > 0) {
1906 bcopy(mtod(mp2, caddr_t), p, xfer);
1907 RPCMADV(mp2, xfer);
1908 mp2->m_len -= xfer;
1909 p += xfer;
1910 siz2 -= xfer;
1911 }
1912 if (siz2 > 0)
1913 mp2 = mp2->m_next;
1914 }
1915 mp->m_len = siz;
1916 *mdp = mp2;
1917 *dposp = mtod(mp2, caddr_t);
1918 }
1919 RPC_RETURN(0);
1920 }
1921
1922
1923
1924 static u_int32_t
1925 rpcclnt_proct(rpc, procid)
1926 struct rpcclnt *rpc;
1927 u_int32_t procid;
1928 {
1929 if (rpc->rc_proctlen != 0 && rpc->rc_proct != NULL &&
1930 procid < rpc->rc_proctlen) {
1931 return rpc->rc_proct[procid];
1932 }
1933 return (0);
1934 }
1935
1936 static int
1937 rpc_adv(mdp, dposp, offs, left)
1938 struct mbuf **mdp;
1939 caddr_t *dposp;
1940 int offs;
1941 int left;
1942 {
1943 struct mbuf *m;
1944 int s;
1945
1946 m = *mdp;
1947 s = left;
1948 while (s < offs) {
1949 offs -= s;
1950 m = m->m_next;
1951 if (m == NULL)
1952 RPC_RETURN(EBADRPC);
1953 s = m->m_len;
1954 }
1955 *mdp = m;
1956 *dposp = mtod(m, caddr_t) + offs;
1957 RPC_RETURN(0);
1958 }
1959
1960 int
1961 rpcclnt_cancelreqs(rpc)
1962 struct rpcclnt *rpc;
1963 {
1964 struct rpctask *task;
1965 int i, s;
1966
1967 s = splnet();
1968 TAILQ_FOREACH(task, &rpctask_q, r_chain) {
1969 if (rpc != task->r_rpcclnt || task->r_mrep != NULL ||
1970 (task->r_flags & R_SOFTTERM))
1971 continue;
1972 rpcclnt_softterm(task);
1973 }
1974 splx(s);
1975
1976 for (i = 0; i < 30; i++) {
1977 s = splnet();
1978 TAILQ_FOREACH(task, &rpctask_q, r_chain) {
1979 if (rpc == task->r_rpcclnt)
1980 break;
1981 }
1982 splx(s);
1983 if (task == NULL)
1984 return (0);
1985 tsleep(&lbolt, PSOCK, "nfscancel", 0);
1986 }
1987 return (EBUSY);
1988 }
1989
1990 static void
1991 rpcclnt_softterm(struct rpctask * task)
1992 {
1993 task->r_flags |= R_SOFTTERM;
1994 if (task->r_flags & R_SENT) {
1995 task->r_rpcclnt->rc_sent -= RPC_CWNDSCALE;
1996 task->r_flags &= ~R_SENT;
1997 }
1998 }
1999
2000
2001 #ifndef __OpenBSD__
2002 /* called by rpcclnt_get() */
2003 void
2004 rpcclnt_create(struct rpcclnt ** rpc)
2005 {
2006 MALLOC(*rpc, struct rpcclnt *, sizeof(struct rpcclnt), M_RPC, M_WAITOK | M_ZERO);
2007 }
2008
2009 /* called by rpcclnt_put() */
2010 void
2011 rpcclnt_destroy(struct rpcclnt * rpc)
2012 {
2013 if (rpc != NULL) {
2014 FREE(rpc, M_RPC);
2015 } else {
2016 RPCDEBUG("attempting to free a NULL rpcclnt (not dereferenced)");
2017 }
2018 }
2019 #endif /* !__OpenBSD__ */
2020
2021
2022 /* XXX: add a lock around the auth structure in struct rpcclnt and make this
2023 * call safe for calling durring a connection */
2024 static int
2025 rpcauth_buildheader(struct rpc_auth * auth, struct ucred * cred, struct mbuf ** mhdr, caddr_t * bp)
2026 {
2027 size_t authsiz, verfsiz;
2028 uint32_t mlen, grpsiz;
2029 register struct mbuf *mb, *mb2;
2030 caddr_t bpos;
2031 register u_int32_t *tl;
2032 register int i;
2033
2034 if (auth == NULL || mhdr == NULL)
2035 return EFAULT;
2036
2037 switch (auth->auth_type) {
2038 case RPCAUTH_NULL:
2039 authsiz = 0;
2040 verfsiz = 0;
2041 break;
2042 case RPCAUTH_UNIX:
2043 authsiz = (5 + cred->cr_ngroups) * RPCX_UNSIGNED;
2044 verfsiz = 0;
2045 break;
2046 default:
2047 return EPROTONOSUPPORT;
2048 break;
2049 };
2050
2051 mlen = rpcm_rndup(authsiz) + rpcm_rndup(verfsiz) + 4 * RPCX_UNSIGNED;
2052
2053 mb = *mhdr;
2054 bpos = *bp;
2055
2056 rpcm_build(tl, u_int32_t *, mlen);
2057
2058 *bp = bpos;
2059 *mhdr = mb;
2060
2061 *tl++ = txdr_unsigned(auth->auth_type);
2062 *tl++ = txdr_unsigned(authsiz);
2063 switch (auth->auth_type) {
2064 case RPCAUTH_UNIX:
2065 *tl++ = 0;
2066 *tl++ = 0;
2067
2068 *tl++ = txdr_unsigned(cred->cr_uid);
2069 *tl++ = txdr_unsigned(cred->cr_groups[0]);
2070 grpsiz = cred->cr_ngroups;
2071 *tl++ = txdr_unsigned(grpsiz);
2072 /* XXX: groups[0] is already sent... */
2073 for (i = 0 ; i < grpsiz ; i++) {
2074 *tl++ = txdr_unsigned(cred->cr_groups[i]);
2075 }
2076
2077 /* null verification header */
2078 *tl++ = txdr_unsigned(RPCAUTH_NULL);
2079 *tl++ = 0;
2080 break;
2081 case RPCAUTH_NULL:
2082 /* just a null verf header */
2083 *tl++ = txdr_unsigned(RPCAUTH_NULL);
2084 *tl = 0;
2085 break;
2086 default:
2087 panic("inconsistent rpc auth type");
2088 break;
2089 }
2090
2091 return 0;
2092 }
Cache object: 5b79ae23e60cc139c55891e057948af1
|