FreeBSD/Linux Kernel Cross Reference
sys/rpc/rpcclnt.c
1 /* $FreeBSD: releng/6.0/sys/rpc/rpcclnt.c 150721 2005-09-29 18:40:36Z rwatson $ */
2 /* $Id: rpcclnt.c,v 1.9 2003/11/05 14:59:03 rees Exp $ */
3
4 /*-
5 * copyright (c) 2003
6 * the regents of the university of michigan
7 * all rights reserved
8 *
9 * permission is granted to use, copy, create derivative works and redistribute
10 * this software and such derivative works for any purpose, so long as the name
11 * of the university of michigan is not used in any advertising or publicity
12 * pertaining to the use or distribution of this software without specific,
13 * written prior authorization. if the above copyright notice or any other
14 * identification of the university of michigan is included in any copy of any
15 * portion of this software, then the disclaimer below must also be included.
16 *
17 * this software is provided as is, without representation from the university
18 * of michigan as to its fitness for any purpose, and without warranty by the
19 * university of michigan of any kind, either express or implied, including
20 * without limitation the implied warranties of merchantability and fitness for
21 * a particular purpose. the regents of the university of michigan shall not be
22 * liable for any damages, including special, indirect, incidental, or
23 * consequential damages, with respect to any claim arising out of or in
24 * connection with the use of the software, even if it has been or is hereafter
25 * advised of the possibility of such damages.
26 */
27
28 /*-
29 * Copyright (c) 1989, 1991, 1993, 1995 The Regents of the University of
30 * California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by Rick Macklem at
33 * The University of Guelph.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions are
37 * met: 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer. 2.
39 * Redistributions in binary form must reproduce the above copyright notice,
40 * this list of conditions and the following disclaimer in the documentation
41 * and/or other materials provided with the distribution. 3. All advertising
42 * materials mentioning features or use of this software must display the
43 * following acknowledgement: This product includes software developed by the
44 * University of California, Berkeley and its contributors. 4. Neither the
45 * name of the University nor the names of its contributors may be used to
46 * endorse or promote products derived from this software without specific
47 * prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
50 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
51 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
52 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
53 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
55 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
62 */
63
64 /* XXX: kill ugly debug strings */
65 /* XXX: get rid of proct, as it is not even being used... (or keep it so v{2,3}
66 * can run, but clean it up! */
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/proc.h>
71 #include <sys/mount.h>
72 #include <sys/kernel.h>
73 #include <sys/mbuf.h>
74 #include <sys/syslog.h>
75 #include <sys/malloc.h>
76 #include <sys/uio.h>
77 #include <sys/lock.h>
78 #include <sys/signalvar.h>
79 #include <sys/sysent.h>
80 #include <sys/syscall.h>
81 #include <sys/sysctl.h>
82
83 #include <sys/domain.h>
84 #include <sys/protosw.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/mutex.h>
88
89 #include <netinet/in.h>
90 #include <netinet/tcp.h>
91
92 #include <nfs/rpcv2.h>
93
94 #include <rpc/rpcm_subs.h>
95 #include <rpc/rpcclnt.h>
96
97 /* memory management */
98 #ifdef __OpenBSD__
99 struct pool rpctask_pool;
100 struct pool rpcclnt_pool;
101 #define RPCTASKPOOL_LWM 10
102 #define RPCTASKPOOL_HWM 40
103 #else
104 static MALLOC_DEFINE(M_RPC, "rpcclnt", "rpc state");
105 #endif
106
107 #define RPC_RETURN(X) do { RPCDEBUG("returning %d", X); return X; }while(0)
108
109 /*
110 * Estimate rto for an nfs rpc sent via. an unreliable datagram. Use the mean
111 * and mean deviation of rtt for the appropriate type of rpc for the frequent
112 * rpcs and a default for the others. The justification for doing "other"
113 * this way is that these rpcs happen so infrequently that timer est. would
114 * probably be stale. Also, since many of these rpcs are non-idempotent, a
115 * conservative timeout is desired. getattr, lookup - A+2D read, write -
116 * A+4D other - nm_timeo
117 */
118 #define RPC_RTO(n, t) \
119 ((t) == 0 ? (n)->rc_timeo : \
120 ((t) < 3 ? \
121 (((((n)->rc_srtt[t-1] + 3) >> 2) + (n)->rc_sdrtt[t-1] + 1) >> 1) : \
122 ((((n)->rc_srtt[t-1] + 7) >> 3) + (n)->rc_sdrtt[t-1] + 1)))
123
124 #define RPC_SRTT(s,r) (r)->r_rpcclnt->rc_srtt[rpcclnt_proct((s),\
125 (r)->r_procnum) - 1]
126
127 #define RPC_SDRTT(s,r) (r)->r_rpcclnt->rc_sdrtt[rpcclnt_proct((s),\
128 (r)->r_procnum) - 1]
129
130
131 /*
132 * There is a congestion window for outstanding rpcs maintained per mount
133 * point. The cwnd size is adjusted in roughly the way that: Van Jacobson,
134 * Congestion avoidance and Control, In "Proceedings of SIGCOMM '88". ACM,
135 * August 1988. describes for TCP. The cwnd size is chopped in half on a
136 * retransmit timeout and incremented by 1/cwnd when each rpc reply is
137 * received and a full cwnd of rpcs is in progress. (The sent count and cwnd
138 * are scaled for integer arith.) Variants of "slow start" were tried and
139 * were found to be too much of a performance hit (ave. rtt 3 times larger),
140 * I suspect due to the large rtt that nfs rpcs have.
141 */
142 #define RPC_CWNDSCALE 256
143 #define RPC_MAXCWND (RPC_CWNDSCALE * 32)
144 static const int rpcclnt_backoff[8] = {2, 4, 8, 16, 32, 64, 128, 256,};
145
146 /* XXX ugly debug strings */
147 #define RPC_ERRSTR_ACCEPTED_SIZE 6
148 char *rpc_errstr_accepted[RPC_ERRSTR_ACCEPTED_SIZE] = {
149 "", /* no good message... */
150 "remote server hasn't exported program.",
151 "remote server can't support version number.",
152 "program can't support procedure.",
153 "procedure can't decode params.",
154 "remote error. remote side memory allocation failure?"
155 };
156
157 char *rpc_errstr_denied[2] = {
158 "remote server doesnt support rpc version 2!",
159 "remote server authentication error."
160 };
161
162 #define RPC_ERRSTR_AUTH_SIZE 6
163 char *rpc_errstr_auth[RPC_ERRSTR_AUTH_SIZE] = {
164 "",
165 "auth error: bad credential (seal broken).",
166 "auth error: client must begin new session.",
167 "auth error: bad verifier (seal broken).",
168 "auth error: verifier expired or replayed.",
169 "auth error: rejected for security reasons.",
170 };
171
172 /*
173 * Static data, mostly RPC constants in XDR form
174 */
175 static u_int32_t rpc_reply, rpc_call, rpc_vers;
176
177 /*
178 * rpc_msgdenied, rpc_mismatch, rpc_auth_unix, rpc_msgaccepted,
179 * rpc_autherr, rpc_auth_kerb;
180 */
181
182 static u_int32_t rpcclnt_xid = 0;
183 static u_int32_t rpcclnt_xid_touched = 0;
184 struct rpcstats rpcstats;
185 int rpcclnt_ticks;
186
187 SYSCTL_NODE(_kern, OID_AUTO, rpc, CTLFLAG_RD, 0, "RPC Subsystem");
188
189 SYSCTL_UINT(_kern_rpc, OID_AUTO, retries, CTLFLAG_RD, &rpcstats.rpcretries, 0, "retries");
190 SYSCTL_UINT(_kern_rpc, OID_AUTO, request, CTLFLAG_RD, &rpcstats.rpcrequests, 0, "request");
191 SYSCTL_UINT(_kern_rpc, OID_AUTO, timeouts, CTLFLAG_RD, &rpcstats.rpctimeouts, 0, "timeouts");
192 SYSCTL_UINT(_kern_rpc, OID_AUTO, unexpected, CTLFLAG_RD, &rpcstats.rpcunexpected, 0, "unexpected");
193 SYSCTL_UINT(_kern_rpc, OID_AUTO, invalid, CTLFLAG_RD, &rpcstats.rpcinvalid, 0, "invalid");
194
195
196 #ifdef RPCCLNT_DEBUG
197 int rpcdebugon = 0;
198 SYSCTL_UINT(_kern_rpc, OID_AUTO, debug_on, CTLFLAG_RW, &rpcdebugon, 0, "RPC Debug messages");
199 #endif
200
201 /*
202 * Queue head for rpctask's
203 */
204 static
205 TAILQ_HEAD(, rpctask) rpctask_q;
206 struct callout rpcclnt_callout;
207
208 #ifdef __OpenBSD__
209 static int rpcclnt_send(struct socket *, struct mbuf *, struct mbuf *, struct rpctask *);
210 static int rpcclnt_receive(struct rpctask *, struct mbuf **, struct mbuf **, RPC_EXEC_CTX);
211 #else
212 static int rpcclnt_send(struct socket *, struct sockaddr *, struct mbuf *, struct rpctask *);
213 static int rpcclnt_receive(struct rpctask *, struct sockaddr **, struct mbuf **, RPC_EXEC_CTX);
214 #endif
215
216 static int rpcclnt_msg(RPC_EXEC_CTX, const char *, char *);
217
218 static int rpcclnt_reply(struct rpctask *, RPC_EXEC_CTX);
219 static void rpcclnt_timer(void *);
220 static int rpcclnt_sndlock(int *, struct rpctask *);
221 static void rpcclnt_sndunlock(int *);
222 static int rpcclnt_rcvlock(struct rpctask *);
223 static void rpcclnt_rcvunlock(int *);
224 #if 0
225 void rpcclnt_realign(struct mbuf *, int);
226 #else
227 static void rpcclnt_realign(struct mbuf **, int);
228 #endif
229
230 static struct mbuf *rpcclnt_buildheader(struct rpcclnt *, int, struct mbuf *, u_int32_t, int *, struct mbuf **, struct ucred *);
231 static int rpcm_disct(struct mbuf **, caddr_t *, int, int, caddr_t *);
232 static u_int32_t rpcclnt_proct(struct rpcclnt *, u_int32_t);
233 static int rpc_adv(struct mbuf **, caddr_t *, int, int);
234 static void rpcclnt_softterm(struct rpctask * task);
235
236 static int rpcauth_buildheader(struct rpc_auth * auth, struct ucred *, struct mbuf **, caddr_t *);
237
238 void
239 rpcclnt_init(void)
240 {
241 #ifdef __OpenBSD__
242 static struct timeout rpcclnt_timer_to;
243 #endif
244
245 rpcclnt_ticks = (hz * RPC_TICKINTVL + 500) / 1000;
246 if (rpcclnt_ticks < 1)
247 rpcclnt_ticks = 1;
248 rpcstats.rpcretries = 0;
249 rpcstats.rpcrequests = 0;
250 rpcstats.rpctimeouts = 0;
251 rpcstats.rpcunexpected = 0;
252 rpcstats.rpcinvalid = 0;
253
254 /*
255 * rpc constants how about actually using more than one of these!
256 */
257
258 rpc_reply = txdr_unsigned(RPC_REPLY);
259 rpc_vers = txdr_unsigned(RPC_VER2);
260 rpc_call = txdr_unsigned(RPC_CALL);
261 #if 0
262 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
263 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
264 rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
265 rpc_autherr = txdr_unsigned(RPC_AUTHERR);
266 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
267 rpc_auth_kerb = txdr_unsigned(RPCAUTH_KERB4);
268 #endif
269
270 /* initialize rpctask queue */
271 TAILQ_INIT(&rpctask_q);
272
273 #ifdef __OpenBSD__
274 /* initialize pools */
275 pool_init(&rpctask_pool, sizeof(struct rpctask), 0, 0, RPCTASKPOOL_LWM,
276 "rpctask_p", NULL);
277 pool_setlowat(&rpctask_pool, RPCTASKPOOL_LWM);
278 pool_sethiwat(&rpctask_pool, RPCTASKPOOL_HWM);
279
280 pool_init(&rpcclnt_pool, sizeof(struct rpcclnt), 0, 0, 1, "rpcclnt_p", NULL);
281
282 /* initialize timers */
283 timeout_set(&rpcclnt_timer_to, rpcclnt_timer, &rpcclnt_timer_to);
284 rpcclnt_timer(&rpcclnt_timer_to);
285 #else /* !__OpenBSD__ */
286 callout_init(&rpcclnt_callout, 0);
287 #endif /* !__OpenBSD__ */
288
289 RPCDEBUG("rpc initialed");
290
291 return;
292 }
293
294 void
295 rpcclnt_uninit(void)
296 {
297 RPCDEBUG("uninit");
298 /* XXX delete sysctl variables? */
299 callout_stop(&rpcclnt_callout);
300 }
301
302 int
303 rpcclnt_setup(clnt, program, addr, sotype, soproto, auth, max_read_size, max_write_size, flags)
304 struct rpcclnt * clnt;
305 struct rpc_program * program;
306 struct sockaddr * addr;
307 int sotype;
308 int soproto;
309 struct rpc_auth * auth;
310 int max_read_size;
311 int max_write_size;
312 int flags;
313 {
314 if (clnt == NULL || program == NULL || addr == NULL || auth == NULL)
315 RPC_RETURN (EFAULT);
316
317 if (program->prog_name == NULL)
318 RPC_RETURN (EFAULT);
319 clnt->rc_prog = program;
320
321 clnt->rc_name = addr;
322 clnt->rc_sotype = sotype;
323 clnt->rc_soproto = soproto;
324 clnt->rc_auth = auth;
325 clnt->rc_rsize = max_read_size;
326 clnt->rc_wsize = max_write_size;
327 clnt->rc_flag = flags;
328
329 clnt->rc_proctlen = 0;
330 clnt->rc_proct = NULL;
331
332 RPC_RETURN (0);
333 }
334
335 /*
336 * Initialize sockets and congestion for a new RPC connection. We do not free
337 * the sockaddr if error.
338 */
339 int
340 rpcclnt_connect(rpc, td)
341 struct rpcclnt *rpc;
342 RPC_EXEC_CTX td;
343 {
344 struct socket *so;
345 int s, error, rcvreserve, sndreserve;
346 struct sockaddr *saddr;
347
348 #ifdef __OpenBSD__
349 struct sockaddr_in *sin;
350 struct mbuf *m;
351 #else
352 struct sockaddr_in sin;
353
354 int soarg;
355 struct sockopt opt;
356 #endif
357
358 if (rpc == NULL) {
359 RPCDEBUG("no rpcclnt struct!\n");
360 RPC_RETURN(EFAULT);
361 }
362
363 GIANT_REQUIRED; /* XXX until socket locking done */
364
365 /* create the socket */
366 rpc->rc_so = NULL;
367
368 saddr = rpc->rc_name;
369
370 error = socreate(saddr->sa_family, &rpc->rc_so, rpc->rc_sotype,
371 rpc->rc_soproto, td->td_ucred, td);
372
373 if (error) {
374 RPCDEBUG("error %d in socreate()", error);
375 RPC_RETURN(error);
376 }
377 so = rpc->rc_so;
378 rpc->rc_soflags = so->so_proto->pr_flags;
379
380 /*
381 * Some servers require that the client port be a reserved port
382 * number. We always allocate a reserved port, as this prevents
383 * filehandle disclosure through UDP port capture.
384 */
385 if (saddr->sa_family == AF_INET) {
386 #ifdef __OpenBSD__
387 struct mbuf *mopt;
388 int *ip;
389 #endif
390
391 #ifdef __OpenBSD__
392 MGET(mopt, M_TRYWAIT, MT_SOOPTS);
393 mopt->m_len = sizeof(int);
394 ip = mtod(mopt, int *);
395 *ip = IP_PORTRANGE_LOW;
396
397 error = sosetopt(so, IPPROTO_IP, IP_PORTRANGE, mopt);
398 #else
399 soarg = IP_PORTRANGE_LOW;
400 bzero(&opt, sizeof(struct sockopt));
401 opt.sopt_dir = SOPT_SET;
402 opt.sopt_level = IPPROTO_IP;
403 opt.sopt_name = IP_PORTRANGE;
404 opt.sopt_val = &soarg;
405 opt.sopt_valsize = sizeof(soarg);
406
407 error = sosetopt(so, &opt);
408 #endif
409 if (error)
410 goto bad;
411
412 #if __OpenBSD__
413 MGET(m, M_TRYWAIT, MT_SONAME);
414 sin = mtod(m, struct sockaddr_in *);
415 sin->sin_len = m->m_len = sizeof(struct sockaddr_in);
416 sin->sin_family = AF_INET;
417 sin->sin_addr.s_addr = INADDR_ANY;
418 sin->sin_port = htons(0);
419 error = sobind(so, m);
420 m_freem(m);
421 #else
422 sin.sin_len = sizeof(struct sockaddr_in);
423 sin.sin_family = AF_INET;
424 sin.sin_addr.s_addr = INADDR_ANY;
425 sin.sin_port = htons(0);
426 error = sobind(so, (struct sockaddr *) & sin, td);
427 #endif
428 if (error)
429 goto bad;
430
431 #if __OpenBSD__
432 MGET(mopt, M_TRYWAIT, MT_SOOPTS);
433 mopt->m_len = sizeof(int);
434 ip = mtod(mopt, int *);
435 *ip = IP_PORTRANGE_DEFAULT;
436 error = sosetopt(so, IPPROTO_IP, IP_PORTRANGE, mopt);
437 #else
438 soarg = IP_PORTRANGE_DEFAULT;
439 bzero(&opt, sizeof(struct sockopt));
440 opt.sopt_dir = SOPT_SET;
441 opt.sopt_level = IPPROTO_IP;
442 opt.sopt_name = IP_PORTRANGE;
443 opt.sopt_val = &soarg;
444 opt.sopt_valsize = sizeof(soarg);
445 error = sosetopt(so, &opt);
446 #endif
447 if (error)
448 goto bad;
449 }
450 /*
451 * Protocols that do not require connections may be optionally left
452 * unconnected for servers that reply from a port other than
453 * NFS_PORT.
454 */
455 if (rpc->rc_flag & RPCCLNT_NOCONN) {
456 if (rpc->rc_soflags & PR_CONNREQUIRED) {
457 error = ENOTCONN;
458 goto bad;
459 }
460 } else {
461 error = soconnect(so, saddr, td);
462 if (error)
463 goto bad;
464
465 /*
466 * Wait for the connection to complete. Cribbed from the
467 * connect system call but with the wait timing out so that
468 * interruptible mounts don't hang here for a long time.
469 */
470 #ifdef __OpenBSD__
471 s = splsoftnet();
472 #else
473 s = splnet();
474 #endif
475 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
476 (void)tsleep((caddr_t) & so->so_timeo, PSOCK,
477 "rpc", 2 * hz);
478
479 /*
480 * XXX needs to catch interrupt signals. something
481 * like this: if ((so->so_state & SS_ISCONNECTING) &&
482 * so->so_error == 0 && rep && (error =
483 * nfs_sigintr(nmp, rep, rep->r_td)) != 0) {
484 * so->so_state &= ~SS_ISCONNECTING; splx(s); goto
485 * bad; }
486 */
487 }
488 if (so->so_error) {
489 error = so->so_error;
490 so->so_error = 0;
491 splx(s);
492 goto bad;
493 }
494 splx(s);
495 }
496 if (rpc->rc_flag & (RPCCLNT_SOFT | RPCCLNT_INT)) {
497 so->so_rcv.sb_timeo = (5 * hz);
498 so->so_snd.sb_timeo = (5 * hz);
499 } else {
500 so->so_rcv.sb_timeo = 0;
501 so->so_snd.sb_timeo = 0;
502 }
503
504
505 if (rpc->rc_sotype == SOCK_DGRAM) {
506 sndreserve = rpc->rc_wsize + RPC_MAXPKTHDR;
507 rcvreserve = rpc->rc_rsize + RPC_MAXPKTHDR;
508 } else if (rpc->rc_sotype == SOCK_SEQPACKET) {
509 sndreserve = (rpc->rc_wsize + RPC_MAXPKTHDR) * 2;
510 rcvreserve = (rpc->rc_rsize + RPC_MAXPKTHDR) * 2;
511 } else {
512 if (rpc->rc_sotype != SOCK_STREAM)
513 panic("rpcclnt_connect() bad sotype");
514 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
515 #ifdef __OpenBSD__
516 MGET(m, M_TRYWAIT, MT_SOOPTS);
517 *mtod(m, int32_t *) = 1;
518 m->m_len = sizeof(int32_t);
519 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m);
520 #else
521 soarg = 1;
522
523 bzero(&opt, sizeof(struct sockopt));
524 opt.sopt_dir = SOPT_SET;
525 opt.sopt_level = SOL_SOCKET;
526 opt.sopt_name = SO_KEEPALIVE;
527 opt.sopt_val = &soarg;
528 opt.sopt_valsize = sizeof(soarg);
529 sosetopt(so, &opt);
530 #endif
531 }
532 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
533 #ifdef __OpenBSD__
534 MGET(m, M_TRYWAIT, MT_SOOPTS);
535 *mtod(m, int32_t *) = 1;
536 m->m_len = sizeof(int32_t);
537 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m);
538 #else
539 soarg = 1;
540
541 bzero(&opt, sizeof(struct sockopt));
542 opt.sopt_dir = SOPT_SET;
543 opt.sopt_level = IPPROTO_TCP;
544 opt.sopt_name = TCP_NODELAY;
545 opt.sopt_val = &soarg;
546 opt.sopt_valsize = sizeof(soarg);
547 sosetopt(so, &opt);
548 #endif
549 }
550 sndreserve = (rpc->rc_wsize + RPC_MAXPKTHDR +
551 sizeof(u_int32_t)) * 2;
552 rcvreserve = (rpc->rc_rsize + RPC_MAXPKTHDR +
553 sizeof(u_int32_t)) * 2;
554 }
555 error = soreserve(so, sndreserve, rcvreserve);
556 if (error)
557 goto bad;
558 so->so_rcv.sb_flags |= SB_NOINTR;
559 so->so_snd.sb_flags |= SB_NOINTR;
560
561 /* Initialize other non-zero congestion variables */
562 rpc->rc_srtt[0] = rpc->rc_srtt[1] = rpc->rc_srtt[2] =
563 rpc->rc_srtt[3] = (RPC_TIMEO << 3);
564 rpc->rc_sdrtt[0] = rpc->rc_sdrtt[1] = rpc->rc_sdrtt[2] =
565 rpc->rc_sdrtt[3] = 0;
566 rpc->rc_cwnd = RPC_MAXCWND / 2; /* Initial send window */
567 rpc->rc_sent = 0;
568 rpc->rc_timeouts = 0;
569 RPC_RETURN(0);
570
571 bad:
572 rpcclnt_disconnect(rpc);
573 RPC_RETURN(error);
574 }
575
576
577 /*
578 * Reconnect routine: Called when a connection is broken on a reliable
579 * protocol. - clean up the old socket - nfs_connect() again - set
580 * R_MUSTRESEND for all outstanding requests on mount point If this fails the
581 * mount point is DEAD! nb: Must be called with the nfs_sndlock() set on the
582 * mount point.
583 */
584 int
585 rpcclnt_reconnect(rep, td)
586 struct rpctask *rep;
587 RPC_EXEC_CTX td;
588 {
589 struct rpctask *rp;
590 struct rpcclnt *rpc = rep->r_rpcclnt;
591 int error;
592
593 rpcclnt_disconnect(rpc);
594 while ((error = rpcclnt_connect(rpc, td)) != 0) {
595 if (error == EINTR || error == ERESTART)
596 RPC_RETURN(EINTR);
597 tsleep(&lbolt, PSOCK, "rpccon", 0);
598 }
599
600 /*
601 * Loop through outstanding request list and fix up all requests on
602 * old socket.
603 */
604 for (rp = TAILQ_FIRST(&rpctask_q); rp != NULL;
605 rp = TAILQ_NEXT(rp, r_chain)) {
606 if (rp->r_rpcclnt == rpc)
607 rp->r_flags |= R_MUSTRESEND;
608 }
609 RPC_RETURN(0);
610 }
611
612 /*
613 * NFS disconnect. Clean up and unlink.
614 */
615 void
616 rpcclnt_disconnect(rpc)
617 struct rpcclnt *rpc;
618 {
619 struct socket *so;
620
621 GIANT_REQUIRED; /* XXX until socket locking done */
622
623 if (rpc->rc_so) {
624 so = rpc->rc_so;
625 rpc->rc_so = NULL;
626 soshutdown(so, 2);
627 soclose(so);
628 }
629 }
630
631 void
632 rpcclnt_safedisconnect(struct rpcclnt * rpc)
633 {
634 struct rpctask dummytask;
635
636 bzero(&dummytask, sizeof(dummytask));
637 dummytask.r_rpcclnt = rpc;
638 rpcclnt_rcvlock(&dummytask);
639 rpcclnt_disconnect(rpc);
640 rpcclnt_rcvunlock(&rpc->rc_flag);
641 }
642
643 /*
644 * This is the nfs send routine. For connection based socket types, it must
645 * be called with an nfs_sndlock() on the socket. "rep == NULL" indicates
646 * that it has been called from a server. For the client side: - return EINTR
647 * if the RPC is terminated, 0 otherwise - set R_MUSTRESEND if the send fails
648 * for any reason - do any cleanup required by recoverable socket errors
649 * (???) For the server side: - return EINTR or ERESTART if interrupted by a
650 * signal - return EPIPE if a connection is lost for connection based sockets
651 * (TCP...) - do any cleanup required by recoverable socket errors (???)
652 */
653 static int
654 rpcclnt_send(so, nam, top, rep)
655 struct socket *so;
656 #ifdef __OpenBSD__
657 struct mbuf *nam;
658 #else
659 struct sockaddr *nam;
660 #endif
661 struct mbuf *top;
662 struct rpctask *rep;
663 {
664 #ifdef __OpenBSD__
665 struct mbuf *sendnam;
666 #else
667 struct sockaddr *sendnam;
668 struct thread *td = curthread;
669 #endif
670 int error, soflags, flags;
671
672 GIANT_REQUIRED; /* XXX until socket locking done */
673
674 if (rep) {
675 if (rep->r_flags & R_SOFTTERM) {
676 m_freem(top);
677 RPC_RETURN(EINTR);
678 }
679 if ((so = rep->r_rpcclnt->rc_so) == NULL) {
680 rep->r_flags |= R_MUSTRESEND;
681 m_freem(top);
682 RPC_RETURN(0);
683 }
684 rep->r_flags &= ~R_MUSTRESEND;
685 soflags = rep->r_rpcclnt->rc_soflags;
686 } else
687 soflags = so->so_proto->pr_flags;
688
689 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
690 sendnam = NULL;
691 else
692 sendnam = nam;
693
694 if (so->so_type == SOCK_SEQPACKET)
695 flags = MSG_EOR;
696 else
697 flags = 0;
698
699 error = sosend(so, sendnam, NULL, top, NULL, flags, td);
700
701 if (error) {
702 if (rep) {
703 log(LOG_INFO, "rpc send error %d for service %s\n", error,
704 rep->r_rpcclnt->rc_prog->prog_name);
705 /*
706 * Deal with errors for the client side.
707 */
708 if (rep->r_flags & R_SOFTTERM)
709 error = EINTR;
710 else
711 rep->r_flags |= R_MUSTRESEND;
712 } else
713 log(LOG_INFO, "rpc service send error %d\n", error);
714
715 /*
716 * Handle any recoverable (soft) socket errors here.
717 */
718 if (error != EINTR && error != ERESTART &&
719 error != EWOULDBLOCK && error != EPIPE)
720 error = 0;
721 }
722 RPC_RETURN(error);
723 }
724
725 /*
726 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all done by
727 * soreceive(), but for SOCK_STREAM we must deal with the Record Mark and
728 * consolidate the data into a new mbuf list. nb: Sometimes TCP passes the
729 * data up to soreceive() in long lists of small mbufs. For SOCK_STREAM we
730 * must be very careful to read an entire record once we have read any of it,
731 * even if the system call has been interrupted.
732 */
733 static int
734 rpcclnt_receive(rep, aname, mp, td)
735 struct rpctask *rep;
736 #ifdef __OpenBSD__
737 struct mbuf **aname;
738 #else
739 struct sockaddr **aname;
740 #endif
741 struct mbuf **mp;
742 RPC_EXEC_CTX td;
743 {
744 struct socket *so;
745 struct uio auio;
746 struct iovec aio;
747 struct mbuf *m;
748 struct mbuf *control;
749 u_int32_t len;
750 #ifdef __OpenBSD__
751 struct mbuf **getnam;
752 #else
753 struct sockaddr **getnam;
754 #endif
755 int error, sotype, rcvflg;
756
757 GIANT_REQUIRED; /* XXX until socket locking done */
758
759 /*
760 * Set up arguments for soreceive()
761 */
762 *mp = NULL;
763 *aname = NULL;
764 sotype = rep->r_rpcclnt->rc_sotype;
765
766 /*
767 * For reliable protocols, lock against other senders/receivers in
768 * case a reconnect is necessary. For SOCK_STREAM, first get the
769 * Record Mark to find out how much more there is to get. We must
770 * lock the socket against other receivers until we have an entire
771 * rpc request/reply.
772 */
773 if (sotype != SOCK_DGRAM) {
774 error = rpcclnt_sndlock(&rep->r_rpcclnt->rc_flag, rep);
775 if (error)
776 RPC_RETURN(error);
777 tryagain:
778 /*
779 * Check for fatal errors and resending request.
780 */
781 /*
782 * Ugh: If a reconnect attempt just happened, rc_so would
783 * have changed. NULL indicates a failed attempt that has
784 * essentially shut down this mount point.
785 */
786 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) {
787 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
788 RPC_RETURN(EINTR);
789 }
790 so = rep->r_rpcclnt->rc_so;
791 if (!so) {
792 error = rpcclnt_reconnect(rep, td);
793 if (error) {
794 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
795 RPC_RETURN(error);
796 }
797 goto tryagain;
798 }
799 while (rep->r_flags & R_MUSTRESEND) {
800 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_TRYWAIT);
801 rpcstats.rpcretries++;
802 error = rpcclnt_send(so, rep->r_rpcclnt->rc_name, m, rep);
803 if (error) {
804 if (error == EINTR || error == ERESTART ||
805 (error = rpcclnt_reconnect(rep, td)) != 0) {
806 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
807 RPC_RETURN(error);
808 }
809 goto tryagain;
810 }
811 }
812 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
813 if (sotype == SOCK_STREAM) {
814 aio.iov_base = (caddr_t) & len;
815 aio.iov_len = sizeof(u_int32_t);
816 auio.uio_iov = &aio;
817 auio.uio_iovcnt = 1;
818 auio.uio_segflg = UIO_SYSSPACE;
819 auio.uio_rw = UIO_READ;
820 auio.uio_offset = 0;
821 auio.uio_resid = sizeof(u_int32_t);
822 #ifdef __OpenBSD__
823 auio.uio_procp = td;
824 #else
825 auio.uio_td = td;
826 #endif
827 do {
828 rcvflg = MSG_WAITALL;
829 error = soreceive(so, NULL, &auio, NULL, NULL, &rcvflg);
830 if (error == EWOULDBLOCK && rep) {
831 if (rep->r_flags & R_SOFTTERM)
832 RPC_RETURN(EINTR);
833 }
834 } while (error == EWOULDBLOCK);
835 if (!error && auio.uio_resid > 0) {
836 log(LOG_INFO,
837 "short receive (%zu/%zu) from rpc server %s\n",
838 sizeof(u_int32_t) - auio.uio_resid,
839 sizeof(u_int32_t),
840 rep->r_rpcclnt->rc_prog->prog_name);
841 error = EPIPE;
842 }
843 if (error)
844 goto errout;
845 len = ntohl(len) & ~0x80000000;
846 /*
847 * This is SERIOUS! We are out of sync with the
848 * sender and forcing a disconnect/reconnect is all I
849 * can do.
850 */
851 if (len > RPC_MAXPACKET) {
852 log(LOG_ERR, "%s (%d) from rpc server %s\n",
853 "impossible packet length",
854 len,
855 rep->r_rpcclnt->rc_prog->prog_name);
856 error = EFBIG;
857 goto errout;
858 }
859 auio.uio_resid = len;
860 do {
861 rcvflg = MSG_WAITALL;
862 error = soreceive(so, NULL, &auio, mp, NULL, &rcvflg);
863 } while (error == EWOULDBLOCK || error == EINTR ||
864 error == ERESTART);
865 if (!error && auio.uio_resid > 0) {
866 log(LOG_INFO,
867 "short receive (%d/%d) from rpc server %s\n",
868 len - auio.uio_resid, len,
869 rep->r_rpcclnt->rc_prog->prog_name);
870 error = EPIPE;
871 }
872 } else {
873 /*
874 * NB: Since uio_resid is big, MSG_WAITALL is ignored
875 * and soreceive() will return when it has either a
876 * control msg or a data msg. We have no use for
877 * control msg., but must grab them and then throw
878 * them away so we know what is going on.
879 */
880 auio.uio_resid = len = 100000000; /* Anything Big */
881 #ifdef __OpenBSD__
882 auio.uio_procp = td;
883 #else
884 auio.uio_td = td;
885 #endif
886 do {
887 rcvflg = 0;
888 error = soreceive(so, NULL, &auio, mp, &control, &rcvflg);
889 if (control)
890 m_freem(control);
891 if (error == EWOULDBLOCK && rep) {
892 if (rep->r_flags & R_SOFTTERM)
893 RPC_RETURN(EINTR);
894 }
895 } while (error == EWOULDBLOCK ||
896 (!error && *mp == NULL && control));
897 if ((rcvflg & MSG_EOR) == 0)
898 printf("Egad!!\n");
899 if (!error && *mp == NULL)
900 error = EPIPE;
901 len -= auio.uio_resid;
902 }
903 errout:
904 if (error && error != EINTR && error != ERESTART) {
905 m_freem(*mp);
906 *mp = (struct mbuf *) 0;
907 if (error != EPIPE)
908 log(LOG_INFO,
909 "receive error %d from rpc server %s\n",
910 error,
911 rep->r_rpcclnt->rc_prog->prog_name);
912 error = rpcclnt_sndlock(&rep->r_rpcclnt->rc_flag, rep);
913 if (!error)
914 error = rpcclnt_reconnect(rep, td);
915 if (!error)
916 goto tryagain;
917 }
918 } else {
919 if ((so = rep->r_rpcclnt->rc_so) == NULL)
920 RPC_RETURN(EACCES);
921 if (so->so_state & SS_ISCONNECTED)
922 getnam = NULL;
923 else
924 getnam = aname;
925 auio.uio_resid = len = 1000000;
926 #ifdef __OpenBSD__
927 auio.uio_procp = td;
928 #else
929 auio.uio_td = td;
930 #endif
931
932 do {
933 rcvflg = 0;
934 error = soreceive(so, getnam, &auio, mp, NULL, &rcvflg);
935 RPCDEBUG("soreceivce returns %d", error);
936 if (error == EWOULDBLOCK && (rep->r_flags & R_SOFTTERM)) {
937 RPCDEBUG("wouldblock && softerm -> EINTR");
938 RPC_RETURN(EINTR);
939 }
940 } while (error == EWOULDBLOCK);
941 len -= auio.uio_resid;
942 }
943 if (error) {
944 m_freem(*mp);
945 *mp = NULL;
946 } else {
947 /*
948 * Search for any mbufs that are not a multiple of 4 bytes
949 * long or with m_data not longword aligned. These could
950 * cause pointer alignment problems, so copy them to well
951 * aligned mbufs.
952 */
953 rpcclnt_realign(mp, 5 * RPCX_UNSIGNED);
954 }
955 RPC_RETURN(error);
956 }
957
958
959 /*
960 * Implement receipt of reply on a socket. We must search through the list of
961 * received datagrams matching them with outstanding requests using the xid,
962 * until ours is found.
963 */
964 /* ARGSUSED */
965 static int
966 rpcclnt_reply(myrep, td)
967 struct rpctask *myrep;
968 RPC_EXEC_CTX td;
969 {
970 struct rpctask *rep;
971 struct rpcclnt *rpc = myrep->r_rpcclnt;
972 int32_t t1;
973 struct mbuf *mrep, *md;
974 #ifdef __OpenBSD__
975 struct mbuf *nam;
976 #else
977 struct sockaddr *nam;
978 #endif
979 u_int32_t rxid, *tl;
980 caddr_t dpos, cp2;
981 int error;
982
983 /*
984 * Loop around until we get our own reply
985 */
986 for (;;) {
987 /*
988 * Lock against other receivers so that I don't get stuck in
989 * sbwait() after someone else has received my reply for me.
990 * Also necessary for connection based protocols to avoid
991 * race conditions during a reconnect.
992 */
993 error = rpcclnt_rcvlock(myrep);
994 if (error)
995 RPC_RETURN(error);
996 /* Already received, bye bye */
997 if (myrep->r_mrep != NULL) {
998 rpcclnt_rcvunlock(&rpc->rc_flag);
999 RPC_RETURN(0);
1000 }
1001 /*
1002 * Get the next Rpc reply off the socket
1003 */
1004 error = rpcclnt_receive(myrep, &nam, &mrep, td);
1005
1006 rpcclnt_rcvunlock(&rpc->rc_flag);
1007
1008 if (error) {
1009 /*
1010 * Ignore routing errors on connectionless
1011 * protocols??
1012 */
1013 if (RPCIGNORE_SOERROR(rpc->rc_soflags, error)) {
1014 rpc->rc_so->so_error = 0;
1015 if (myrep->r_flags & R_GETONEREP)
1016 RPC_RETURN(0);
1017 RPCDEBUG("ingoring routing error on connectionless protocol.");
1018 continue;
1019 }
1020 RPC_RETURN(error);
1021 }
1022 #ifdef __OpenBSD__
1023 if (nam)
1024 m_freem(nam);
1025 #else
1026 if (nam)
1027 FREE(nam, M_SONAME);
1028 #endif
1029
1030 /*
1031 * Get the xid and check that it is an rpc reply
1032 */
1033 md = mrep;
1034 dpos = mtod(md, caddr_t);
1035 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1036 rxid = *tl++;
1037 if (*tl != rpc_reply) {
1038 rpcstats.rpcinvalid++;
1039 m_freem(mrep);
1040 rpcmout:
1041 if (myrep->r_flags & R_GETONEREP)
1042 RPC_RETURN(0);
1043 continue;
1044 }
1045 /*
1046 * Loop through the request list to match up the reply Iff no
1047 * match, just drop the datagram
1048 */
1049 TAILQ_FOREACH(rep, &rpctask_q, r_chain) {
1050 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
1051 /* Found it.. */
1052 rep->r_mrep = mrep;
1053 rep->r_md = md;
1054 rep->r_dpos = dpos;
1055
1056 /*
1057 * Update congestion window. Do the additive
1058 * increase of one rpc/rtt.
1059 */
1060 if (rpc->rc_cwnd <= rpc->rc_sent) {
1061 rpc->rc_cwnd +=
1062 (RPC_CWNDSCALE * RPC_CWNDSCALE +
1063 (rpc->rc_cwnd >> 1)) / rpc->rc_cwnd;
1064 if (rpc->rc_cwnd > RPC_MAXCWND)
1065 rpc->rc_cwnd = RPC_MAXCWND;
1066 }
1067 rep->r_flags &= ~R_SENT;
1068 rpc->rc_sent -= RPC_CWNDSCALE;
1069 /*
1070 * Update rtt using a gain of 0.125 on the
1071 * mean and a gain of 0.25 on the deviation.
1072 */
1073 if (rep->r_flags & R_TIMING) {
1074 /*
1075 * Since the timer resolution of
1076 * NFS_HZ is so course, it can often
1077 * result in r_rtt == 0. Since r_rtt
1078 * == N means that the actual rtt is
1079 * between N+dt and N+2-dt ticks, add
1080 * 1.
1081 */
1082 t1 = rep->r_rtt + 1;
1083 t1 -= (RPC_SRTT(rpc, rep) >> 3);
1084 RPC_SRTT(rpc, rep) += t1;
1085 if (t1 < 0)
1086 t1 = -t1;
1087 t1 -= (RPC_SDRTT(rpc, rep) >> 2);
1088 RPC_SDRTT(rpc, rep) += t1;
1089 }
1090 rpc->rc_timeouts = 0;
1091 break;
1092 }
1093 }
1094 /*
1095 * If not matched to a request, drop it. If it's mine, get
1096 * out.
1097 */
1098 if (rep == 0) {
1099 rpcstats.rpcunexpected++;
1100 RPCDEBUG("rpc reply not matched\n");
1101 m_freem(mrep);
1102 } else if (rep == myrep) {
1103 if (rep->r_mrep == NULL)
1104 panic("rpcreply nil");
1105 RPC_RETURN(0);
1106 }
1107 if (myrep->r_flags & R_GETONEREP)
1108 RPC_RETURN(0);
1109 }
1110 }
1111
1112 /* XXX: ignores tryagain! */
1113 /*
1114 * code from nfs_request - goes something like this - fill in task struct -
1115 * links task into list - calls nfs_send() for first transmit - calls
1116 * nfs_receive() to get reply - fills in reply (which should be initialized
1117 * prior to calling), which is valid when 0 is returned and is NEVER freed in
1118 * this function
1119 *
1120 * always frees the request header, but NEVER frees 'mrest'
1121 *
1122 */
1123 /*
1124 * ruthtype
1125 * pcclnt_setauth() should be used before calling this. EAUTH is returned if
1126 * authentication fails.
1127 */
1128 /*
1129 * note that reply->result_* are invalid unless reply->type ==
1130 * RPC_MSGACCEPTED and reply->status == RPC_SUCCESS and that reply->verf_*
1131 * are invalid unless reply->type == RPC_MSGACCEPTED
1132 */
1133 int
1134 rpcclnt_request(rpc, mrest, procnum, td, cred, reply)
1135 struct rpcclnt *rpc;
1136 struct mbuf *mrest;
1137 int procnum;
1138 RPC_EXEC_CTX td;
1139 struct ucred *cred;
1140 struct rpc_reply *reply;
1141 {
1142 struct mbuf *m, *mrep;
1143 struct rpctask *task;
1144 u_int32_t *tl;
1145 struct mbuf *md, *mheadend;
1146 caddr_t dpos, cp2;
1147 int t1, s, error = 0, mrest_len;
1148 u_int32_t xid;
1149
1150 #ifdef __OpenBSD__
1151 task = pool_get(&rpctask_pool, PR_WAITOK);
1152 #else
1153 MALLOC(task, struct rpctask *, sizeof(struct rpctask), M_RPC, (M_WAITOK | M_ZERO));
1154 #endif
1155
1156 task->r_rpcclnt = rpc;
1157 task->r_procnum = procnum;
1158 task->r_td = td;
1159
1160 mrest_len = m_length(mrest, NULL);
1161
1162 m = rpcclnt_buildheader(rpc, procnum, mrest, mrest_len, &xid, &mheadend,
1163 cred);
1164
1165 /*
1166 * For stream protocols, insert a Sun RPC Record Mark.
1167 */
1168 if (rpc->rc_sotype == SOCK_STREAM) {
1169 M_PREPEND(m, RPCX_UNSIGNED, M_TRYWAIT);
1170 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1171 (m->m_pkthdr.len - RPCX_UNSIGNED));
1172 }
1173 task->r_mreq = m;
1174 task->r_xid = xid;
1175
1176 if (rpc->rc_flag & RPCCLNT_SOFT)
1177 task->r_retry = rpc->rc_retry;
1178 else
1179 task->r_retry = RPC_MAXREXMIT + 1; /* past clip limit */
1180 task->r_rtt = task->r_rexmit = 0;
1181
1182 if (rpcclnt_proct(rpc, procnum) > 0)
1183 task->r_flags = R_TIMING;
1184 else
1185 task->r_flags = 0;
1186 task->r_mrep = NULL;
1187
1188 /*
1189 * Do the client side RPC.
1190 */
1191 rpcstats.rpcrequests++;
1192
1193 /*
1194 * Chain request into list of outstanding requests. Be sure to put it
1195 * LAST so timer finds oldest requests first.
1196 */
1197 s = splsoftclock();
1198 if (TAILQ_EMPTY(&rpctask_q))
1199 callout_reset(&rpcclnt_callout, rpcclnt_ticks, rpcclnt_timer,
1200 NULL);
1201 TAILQ_INSERT_TAIL(&rpctask_q, task, r_chain);
1202
1203 /*
1204 * If backing off another request or avoiding congestion, don't send
1205 * this one now but let timer do it. If not timing a request, do it
1206 * now.
1207 */
1208 if (rpc->rc_so && (rpc->rc_sotype != SOCK_DGRAM ||
1209 (rpc->rc_flag & RPCCLNT_DUMBTIMR) ||
1210 rpc->rc_sent < rpc->rc_cwnd)) {
1211 splx(s);
1212
1213 if (rpc->rc_soflags & PR_CONNREQUIRED)
1214 error = rpcclnt_sndlock(&rpc->rc_flag, task);
1215 if (!error) {
1216 error = rpcclnt_send(rpc->rc_so, rpc->rc_name,
1217 m_copym(m, 0, M_COPYALL, M_TRYWAIT),
1218 task);
1219 if (rpc->rc_soflags & PR_CONNREQUIRED)
1220 rpcclnt_sndunlock(&rpc->rc_flag);
1221 }
1222 if (!error && (task->r_flags & R_MUSTRESEND) == 0) {
1223 rpc->rc_sent += RPC_CWNDSCALE;
1224 task->r_flags |= R_SENT;
1225 }
1226 } else {
1227 splx(s);
1228 task->r_rtt = -1;
1229 }
1230
1231 /*
1232 * Wait for the reply from our send or the timer's.
1233 */
1234 if (!error || error == EPIPE)
1235 error = rpcclnt_reply(task, td);
1236
1237 /*
1238 * RPC done, unlink the request.
1239 */
1240 s = splsoftclock();
1241 TAILQ_REMOVE(&rpctask_q, task, r_chain);
1242 if (TAILQ_EMPTY(&rpctask_q))
1243 callout_stop(&rpcclnt_callout);
1244 splx(s);
1245
1246 /*
1247 * Decrement the outstanding request count.
1248 */
1249 if (task->r_flags & R_SENT) {
1250 task->r_flags &= ~R_SENT; /* paranoia */
1251 rpc->rc_sent -= RPC_CWNDSCALE;
1252 }
1253 /*
1254 * If there was a successful reply and a tprintf msg. tprintf a
1255 * response.
1256 */
1257 if (!error && (task->r_flags & R_TPRINTFMSG)) {
1258 mtx_lock(&Giant);
1259 rpcclnt_msg(task->r_td, rpc->rc_prog->prog_name,
1260 "is alive again");
1261 mtx_unlock(&Giant);
1262 }
1263
1264 /* free request header (leaving mrest) */
1265 mheadend->m_next = NULL;
1266 m_freem(task->r_mreq);
1267
1268 /* initialize reply */
1269 reply->mrep = task->r_mrep;
1270 reply->verf_md = NULL;
1271 reply->result_md = NULL;
1272
1273 mrep = task->r_mrep;
1274 md = task->r_md;
1275 dpos = task->r_dpos;
1276
1277 /* task structure is no longer needed */
1278 #ifdef __OpenBSD__
1279 pool_put(&rpctask_pool, task);
1280 #else
1281 FREE(task, M_RPC);
1282 #endif
1283
1284 if (error)
1285 goto rpcmout;
1286
1287 /*
1288 * break down the rpc header and check if ok
1289 */
1290
1291 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1292 reply->stat.type = fxdr_unsigned(u_int32_t, *tl);
1293
1294 if (reply->stat.type == RPC_MSGDENIED) {
1295 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1296 reply->stat.status = fxdr_unsigned(u_int32_t, *tl);
1297
1298 switch (reply->stat.status) {
1299 case RPC_MISMATCH:
1300 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1301 reply->stat.mismatch_info.low = fxdr_unsigned(u_int32_t, *tl++);
1302 reply->stat.mismatch_info.high = fxdr_unsigned(u_int32_t, *tl);
1303 error = EOPNOTSUPP;
1304 break;
1305 case RPC_AUTHERR:
1306 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1307 reply->stat.autherr = fxdr_unsigned(u_int32_t, *tl);
1308 error = EACCES;
1309 break;
1310 default:
1311 error = EBADRPC;
1312 break;
1313 }
1314 goto rpcmout;
1315 } else if (reply->stat.type != RPC_MSGACCEPTED) {
1316 error = EBADRPC;
1317 goto rpcmout;
1318 }
1319
1320 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1321
1322 reply->verf_md = md;
1323 reply->verf_dpos = dpos;
1324
1325 reply->verf_type = fxdr_unsigned(u_int32_t, *tl++);
1326 reply->verf_size = fxdr_unsigned(u_int32_t, *tl);
1327
1328 if (reply->verf_size != 0)
1329 rpcm_adv(rpcm_rndup(reply->verf_size));
1330
1331 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1332 reply->stat.status = fxdr_unsigned(u_int32_t, *tl);
1333
1334 if (reply->stat.status == RPC_SUCCESS) {
1335 if ((uint32_t)(dpos - mtod(md, caddr_t)) >= md->m_len) {
1336 RPCDEBUG("where is the next mbuf?");
1337 RPCDEBUG("%d -> %d",
1338 (int)(dpos - mtod(md, caddr_t)), md->m_len);
1339 if (md->m_next == NULL) {
1340 error = EBADRPC;
1341 goto rpcmout;
1342 } else {
1343 reply->result_md = md->m_next;
1344 reply->result_dpos = mtod(reply->result_md,
1345 caddr_t);
1346 }
1347 } else {
1348 reply->result_md = md;
1349 reply->result_dpos = dpos;
1350 }
1351 } else if (reply->stat.status == RPC_PROGMISMATCH) {
1352 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1353 reply->stat.mismatch_info.low = fxdr_unsigned(u_int32_t, *tl++);
1354 reply->stat.mismatch_info.high = fxdr_unsigned(u_int32_t, *tl);
1355 error = EOPNOTSUPP;
1356 goto rpcmout;
1357 } else {
1358 error = EPROTONOSUPPORT;
1359 goto rpcmout;
1360 }
1361 error = 0;
1362
1363 rpcmout:
1364 RPC_RETURN(error);
1365 }
1366
1367
1368 /*
1369 * Nfs timer routine Scan the nfsreq list and retranmit any requests that
1370 * have timed out To avoid retransmission attempts on STREAM sockets (in the
1371 * future) make sure to set the r_retry field to 0 (implies nm_retry == 0).
1372 */
1373 void
1374 rpcclnt_timer(arg)
1375 void *arg;
1376 {
1377 #ifdef __OpenBSD__
1378 struct timeout *to = (struct timeout *) arg;
1379 #endif
1380 struct rpctask *rep;
1381 struct mbuf *m;
1382 struct socket *so;
1383 struct rpcclnt *rpc;
1384 int timeo;
1385 int s, error;
1386
1387 #ifndef __OpenBSD__
1388 struct thread *td = curthread;
1389 #endif
1390
1391 #if __OpenBSD__
1392 s = splsoftnet();
1393 #else
1394 s = splnet();
1395 #endif
1396 mtx_lock(&Giant); /* rpc_msg -> tprintf */
1397 TAILQ_FOREACH(rep, &rpctask_q, r_chain) {
1398 rpc = rep->r_rpcclnt;
1399 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
1400 continue;
1401 if (rpcclnt_sigintr(rpc, rep, rep->r_td)) {
1402 rep->r_flags |= R_SOFTTERM;
1403 continue;
1404 }
1405 if (rep->r_rtt >= 0) {
1406 rep->r_rtt++;
1407 if (rpc->rc_flag & RPCCLNT_DUMBTIMR)
1408 timeo = rpc->rc_timeo;
1409 else
1410 timeo = RPC_RTO(rpc, rpcclnt_proct(rep->r_rpcclnt,
1411 rep->r_procnum));
1412 if (rpc->rc_timeouts > 0)
1413 timeo *= rpcclnt_backoff[rpc->rc_timeouts - 1];
1414 if (rep->r_rtt <= timeo)
1415 continue;
1416 if (rpc->rc_timeouts < 8)
1417 rpc->rc_timeouts++;
1418 }
1419 /*
1420 * Check for server not responding
1421 */
1422 if ((rep->r_flags & R_TPRINTFMSG) == 0 &&
1423 rep->r_rexmit > rpc->rc_deadthresh) {
1424 rpcclnt_msg(rep->r_td, rpc->rc_prog->prog_name,
1425 "not responding");
1426 rep->r_flags |= R_TPRINTFMSG;
1427 }
1428 if (rep->r_rexmit >= rep->r_retry) { /* too many */
1429 rpcstats.rpctimeouts++;
1430 rep->r_flags |= R_SOFTTERM;
1431 continue;
1432 }
1433 if (rpc->rc_sotype != SOCK_DGRAM) {
1434 if (++rep->r_rexmit > RPC_MAXREXMIT)
1435 rep->r_rexmit = RPC_MAXREXMIT;
1436 continue;
1437 }
1438 if ((so = rpc->rc_so) == NULL)
1439 continue;
1440
1441 /*
1442 * If there is enough space and the window allows.. Resend it
1443 * Set r_rtt to -1 in case we fail to send it now.
1444 */
1445 rep->r_rtt = -1;
1446 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
1447 ((rpc->rc_flag & RPCCLNT_DUMBTIMR) ||
1448 (rep->r_flags & R_SENT) ||
1449 rpc->rc_sent < rpc->rc_cwnd) &&
1450 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))) {
1451 if ((rpc->rc_flag & RPCCLNT_NOCONN) == 0)
1452 error = (*so->so_proto->pr_usrreqs->pru_send) (so, 0, m,
1453 NULL, NULL, td);
1454 else
1455 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, rpc->rc_name, NULL, td);
1456 if (error) {
1457 if (RPCIGNORE_SOERROR(rpc->rc_soflags, error))
1458 so->so_error = 0;
1459 } else {
1460 /*
1461 * Iff first send, start timing else turn
1462 * timing off, backoff timer and divide
1463 * congestion window by 2.
1464 */
1465 if (rep->r_flags & R_SENT) {
1466 rep->r_flags &= ~R_TIMING;
1467 if (++rep->r_rexmit > RPC_MAXREXMIT)
1468 rep->r_rexmit = RPC_MAXREXMIT;
1469 rpc->rc_cwnd >>= 1;
1470 if (rpc->rc_cwnd < RPC_CWNDSCALE)
1471 rpc->rc_cwnd = RPC_CWNDSCALE;
1472 rpcstats.rpcretries++;
1473 } else {
1474 rep->r_flags |= R_SENT;
1475 rpc->rc_sent += RPC_CWNDSCALE;
1476 }
1477 rep->r_rtt = 0;
1478 }
1479 }
1480 }
1481 mtx_unlock(&Giant); /* rpc_msg -> tprintf */
1482 splx(s);
1483
1484 #ifdef __OpenBSD__
1485 timeout_add(rpcclnt_timer, to, rpcclnt_ticks);
1486 #else
1487 callout_reset(&rpcclnt_callout, rpcclnt_ticks, rpcclnt_timer, NULL);
1488 #endif
1489 }
1490
1491 /*
1492 * Test for a termination condition pending on the process. This is used for
1493 * RPCCLNT_INT mounts.
1494 */
1495 int
1496 rpcclnt_sigintr(rpc, task, pr)
1497 struct rpcclnt *rpc;
1498 struct rpctask *task;
1499 RPC_EXEC_CTX pr;
1500 {
1501 struct proc *p;
1502
1503 sigset_t tmpset;
1504
1505 if (rpc == NULL)
1506 return EFAULT;
1507
1508 /* XXX deal with forced unmounts */
1509
1510 if (task && (task->r_flags & R_SOFTTERM))
1511 RPC_RETURN(EINTR);
1512
1513 if (!(rpc->rc_flag & RPCCLNT_INT))
1514 RPC_RETURN(0);
1515
1516 if (pr == NULL)
1517 return (0);
1518
1519 #ifdef __OpenBSD__
1520 p = pr;
1521 if (p && p->p_siglist &&
1522 (((p->p_siglist & ~p->p_sigmask) & ~p->p_sigignore) &
1523 RPCINT_SIGMASK))
1524 RPC_RETURN(EINTR);
1525 #else
1526 p = pr->td_proc;
1527 PROC_LOCK(p);
1528 tmpset = p->p_siglist;
1529 SIGSETNAND(tmpset, pr->td_sigmask);
1530 mtx_lock(&p->p_sigacts->ps_mtx);
1531 SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
1532 mtx_unlock(&p->p_sigacts->ps_mtx);
1533 if (SIGNOTEMPTY(p->p_siglist) && RPCCLNTINT_SIGMASK(tmpset)) {
1534 PROC_UNLOCK(p);
1535 RPC_RETURN(EINTR);
1536 }
1537 PROC_UNLOCK(p);
1538 #endif
1539 RPC_RETURN(0);
1540 }
1541
1542 /*
1543 * Lock a socket against others. Necessary for STREAM sockets to ensure you
1544 * get an entire rpc request/reply and also to avoid race conditions between
1545 * the processes with nfs requests in progress when a reconnect is necessary.
1546 */
1547 static int
1548 rpcclnt_sndlock(flagp, task)
1549 int *flagp;
1550 struct rpctask *task;
1551 {
1552 RPC_EXEC_CTX p;
1553 int slpflag = 0, slptimeo = 0;
1554
1555 p = task->r_td;
1556 if (task->r_rpcclnt->rc_flag & RPCCLNT_INT)
1557 slpflag = PCATCH;
1558 while (*flagp & RPCCLNT_SNDLOCK) {
1559 if (rpcclnt_sigintr(task->r_rpcclnt, task, p))
1560 RPC_RETURN(EINTR);
1561 *flagp |= RPCCLNT_WANTSND;
1562 (void)tsleep((caddr_t) flagp, slpflag | (PZERO - 1), "rpcsndlck",
1563 slptimeo);
1564 if (slpflag == PCATCH) {
1565 slpflag = 0;
1566 slptimeo = 2 * hz;
1567 }
1568 }
1569 *flagp |= RPCCLNT_SNDLOCK;
1570 RPC_RETURN(0);
1571 }
1572
1573 /*
1574 * Unlock the stream socket for others.
1575 */
1576 static void
1577 rpcclnt_sndunlock(flagp)
1578 int *flagp;
1579 {
1580
1581 if ((*flagp & RPCCLNT_SNDLOCK) == 0)
1582 panic("rpc sndunlock");
1583 *flagp &= ~RPCCLNT_SNDLOCK;
1584 if (*flagp & RPCCLNT_WANTSND) {
1585 *flagp &= ~RPCCLNT_WANTSND;
1586 wakeup((caddr_t) flagp);
1587 }
1588 }
1589
1590 static int
1591 rpcclnt_rcvlock(task)
1592 struct rpctask *task;
1593 {
1594 int *flagp = &task->r_rpcclnt->rc_flag;
1595 int slpflag, slptimeo = 0;
1596
1597 if (*flagp & RPCCLNT_INT)
1598 slpflag = PCATCH;
1599 else
1600 slpflag = 0;
1601 while (*flagp & RPCCLNT_RCVLOCK) {
1602 if (rpcclnt_sigintr(task->r_rpcclnt, task, task->r_td))
1603 RPC_RETURN(EINTR);
1604 *flagp |= RPCCLNT_WANTRCV;
1605 (void)tsleep((caddr_t) flagp, slpflag | (PZERO - 1), "rpcrcvlk",
1606 slptimeo);
1607 if (slpflag == PCATCH) {
1608 slpflag = 0;
1609 slptimeo = 2 * hz;
1610 }
1611 }
1612 *flagp |= RPCCLNT_RCVLOCK;
1613 RPC_RETURN(0);
1614 }
1615
1616 /*
1617 * Unlock the stream socket for others.
1618 */
1619 static void
1620 rpcclnt_rcvunlock(flagp)
1621 int *flagp;
1622 {
1623
1624 if ((*flagp & RPCCLNT_RCVLOCK) == 0)
1625 panic("nfs rcvunlock");
1626 *flagp &= ~RPCCLNT_RCVLOCK;
1627 if (*flagp & RPCCLNT_WANTRCV) {
1628 *flagp &= ~RPCCLNT_WANTRCV;
1629 wakeup((caddr_t) flagp);
1630 }
1631 }
1632
1633 #if 0
1634 /*
1635 * Check for badly aligned mbuf data areas and realign data in an mbuf list
1636 * by copying the data areas up, as required.
1637 */
1638 void
1639 rpcclnt_realign(m, hsiz)
1640 struct mbuf *m;
1641 int hsiz;
1642 {
1643 struct mbuf *m2;
1644 int siz, mlen, olen;
1645 caddr_t tcp, fcp;
1646 struct mbuf *mnew;
1647
1648 while (m) {
1649 /*
1650 * This never happens for UDP, rarely happens for TCP but
1651 * frequently happens for iso transport.
1652 */
1653 if ((m->m_len & 0x3) || (mtod(m, long)&0x3)) {
1654 olen = m->m_len;
1655 fcp = mtod(m, caddr_t);
1656 if ((long)fcp & 0x3) {
1657 if (m->m_flags & M_PKTHDR)
1658 m_tag_delete_chain(m, NULL);
1659 m->m_flags &= ~M_PKTHDR;
1660 if (m->m_flags & M_EXT)
1661 m->m_data = m->m_ext.ext_buf +
1662 ((m->m_ext.ext_size - olen) & ~0x3);
1663 else
1664 m->m_data = m->m_dat;
1665 }
1666 m->m_len = 0;
1667 tcp = mtod(m, caddr_t);
1668 mnew = m;
1669 m2 = m->m_next;
1670
1671 /*
1672 * If possible, only put the first invariant part of
1673 * the RPC header in the first mbuf.
1674 */
1675 mlen = M_TRAILINGSPACE(m);
1676 if (olen <= hsiz && mlen > hsiz)
1677 mlen = hsiz;
1678
1679 /* Loop through the mbuf list consolidating data. */
1680 while (m) {
1681 while (olen > 0) {
1682 if (mlen == 0) {
1683 if (m2->m_flags & M_PKTHDR)
1684 m_tag_delete_chain(m2, NULL);
1685 m2->m_flags &= ~M_PKTHDR;
1686 if (m2->m_flags & M_EXT)
1687 m2->m_data = m2->m_ext.ext_buf;
1688 else
1689 m2->m_data = m2->m_dat;
1690 m2->m_len = 0;
1691 mlen = M_TRAILINGSPACE(m2);
1692 tcp = mtod(m2, caddr_t);
1693 mnew = m2;
1694 m2 = m2->m_next;
1695 }
1696 siz = min(mlen, olen);
1697 if (tcp != fcp)
1698 bcopy(fcp, tcp, siz);
1699 mnew->m_len += siz;
1700 mlen -= siz;
1701 olen -= siz;
1702 tcp += siz;
1703 fcp += siz;
1704 }
1705 m = m->m_next;
1706 if (m) {
1707 olen = m->m_len;
1708 fcp = mtod(m, caddr_t);
1709 }
1710 }
1711
1712 /*
1713 * Finally, set m_len == 0 for any trailing mbufs
1714 * that have been copied out of.
1715 */
1716 while (m2) {
1717 m2->m_len = 0;
1718 m2 = m2->m_next;
1719 }
1720 return;
1721 }
1722 m = m->m_next;
1723 }
1724 }
1725 #else
1726 static void
1727 rpcclnt_realign(struct mbuf **pm, int hsiz)
1728 {
1729 struct mbuf *m;
1730 struct mbuf *n = NULL;
1731 int off = 0;
1732
1733 RPCDEBUG("in rpcclnt_realign()");
1734
1735 while ((m = *pm) != NULL) {
1736 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
1737 MGET(n, M_TRYWAIT, MT_DATA);
1738 if (m->m_len >= MINCLSIZE) {
1739 MCLGET(n, M_TRYWAIT);
1740 }
1741 n->m_len = 0;
1742 break;
1743 }
1744 pm = &m->m_next;
1745 }
1746
1747 /*
1748 * If n is non-NULL, loop on m copying data, then replace the
1749 * portion of the chain that had to be realigned.
1750 */
1751 if (n != NULL) {
1752 while (m) {
1753 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
1754 off += m->m_len;
1755 m = m->m_next;
1756 }
1757 m_freem(*pm);
1758 *pm = n;
1759 }
1760
1761 RPCDEBUG("leave rpcclnt_realign()");
1762 }
1763 #endif
1764
1765 static int
1766 rpcclnt_msg(p, server, msg)
1767 RPC_EXEC_CTX p;
1768 const char *server;
1769 char *msg;
1770 {
1771 #ifdef __OpenBSD__
1772 tpr_t tpr;
1773 struct proc *pr = p;
1774
1775 if (p)
1776 tpr = tprintf_open(p);
1777 else
1778 tpr = NULL;
1779 tprintf(tpr, "rpc server %s: %s\n", server, msg);
1780 tprintf_close(tpr);
1781 RPC_RETURN(0);
1782 #else
1783 GIANT_REQUIRED;
1784
1785 tprintf(p ? p->td_proc : NULL, LOG_INFO,
1786 "nfs server %s: %s\n", server, msg);
1787 RPC_RETURN(0);
1788 #endif
1789 }
1790
1791 /*
1792 * Build the RPC header and fill in the authorization info. The authorization
1793 * string argument is only used when the credentials come from outside of the
1794 * kernel (AUTH_KERB). (likewise, the ucred is only used when inside the
1795 * kernel) Returns the head of the mbuf list.
1796 */
1797 static struct mbuf *
1798 rpcclnt_buildheader(rc, procid, mrest, mrest_len, xidp, mheadend, cred)
1799 struct rpcclnt *rc;
1800 int procid;
1801 struct mbuf *mrest;
1802 u_int32_t mrest_len;
1803 int *xidp;
1804 struct mbuf **mheadend;
1805 struct ucred * cred;
1806 {
1807 /* register */ struct mbuf *mb;
1808 register u_int32_t *tl;
1809 /* register */ caddr_t bpos;
1810 struct mbuf *mreq, *mb2;
1811 int error;
1812
1813 MGETHDR(mb, M_TRYWAIT, MT_DATA);
1814 if (6 * RPCX_UNSIGNED >= MINCLSIZE) {
1815 MCLGET(mb, M_TRYWAIT);
1816 } else if (6 * RPCX_UNSIGNED < MHLEN) {
1817 MH_ALIGN(mb, 6 * RPCX_UNSIGNED);
1818 } else {
1819 RPCDEBUG("mbuf too small");
1820 panic("cheap bailout");
1821 }
1822 mb->m_len = 0;
1823 mreq = mb;
1824 bpos = mtod(mb, caddr_t);
1825
1826 /*
1827 * First the RPC header.
1828 */
1829 rpcm_build(tl, u_int32_t *, 6 * RPCX_UNSIGNED);
1830
1831 /* Get a new (non-zero) xid */
1832 if ((rpcclnt_xid == 0) && (rpcclnt_xid_touched == 0)) {
1833 rpcclnt_xid = arc4random();
1834 rpcclnt_xid_touched = 1;
1835 } else {
1836 while ((*xidp = arc4random() % 256) == 0);
1837 rpcclnt_xid += *xidp;
1838 }
1839
1840 /* XXX: funky... */
1841 *tl++ = *xidp = txdr_unsigned(rpcclnt_xid);
1842
1843 *tl++ = rpc_call;
1844 *tl++ = rpc_vers;
1845 *tl++ = txdr_unsigned(rc->rc_prog->prog_id);
1846 *tl++ = txdr_unsigned(rc->rc_prog->prog_version);
1847 *tl++ = txdr_unsigned(procid);
1848
1849 if ((error = rpcauth_buildheader(rc->rc_auth, cred, &mb, &bpos))) {
1850 RPCDEBUG("rpcauth_buildheader failed %d", error);
1851 return NULL;
1852 }
1853
1854 mb->m_next = mrest;
1855 *mheadend = mb;
1856 mreq->m_pkthdr.len = m_length(mreq, NULL);
1857 mreq->m_pkthdr.rcvif = NULL;
1858 return (mreq);
1859 }
1860
1861 /*
1862 * Help break down an mbuf chain by setting the first siz bytes contiguous
1863 * pointed to by returned val. This is used by the macros rpcm_dissect and
1864 * rpcm_dissecton for tough cases. (The macros use the vars. dpos and dpos2)
1865 */
1866 static int
1867 rpcm_disct(mdp, dposp, siz, left, cp2)
1868 struct mbuf **mdp;
1869 caddr_t *dposp;
1870 int siz;
1871 int left;
1872 caddr_t *cp2;
1873 {
1874 struct mbuf *mp, *mp2;
1875 int siz2, xfer;
1876 caddr_t p;
1877
1878 mp = *mdp;
1879 while (left == 0) {
1880 *mdp = mp = mp->m_next;
1881 if (mp == NULL)
1882 RPC_RETURN(EBADRPC);
1883 left = mp->m_len;
1884 *dposp = mtod(mp, caddr_t);
1885 }
1886 if (left >= siz) {
1887 *cp2 = *dposp;
1888 *dposp += siz;
1889 } else if (mp->m_next == NULL) {
1890 RPC_RETURN(EBADRPC);
1891 } else if (siz > MHLEN) {
1892 panic("rpc S too big");
1893 } else {
1894 MGET(mp2, M_TRYWAIT, MT_DATA);
1895 mp2->m_next = mp->m_next;
1896 mp->m_next = mp2;
1897 mp->m_len -= left;
1898 mp = mp2;
1899 *cp2 = p = mtod(mp, caddr_t);
1900 bcopy(*dposp, p, left); /* Copy what was left */
1901 siz2 = siz - left;
1902 p += left;
1903 mp2 = mp->m_next;
1904 /* Loop around copying up the siz2 bytes */
1905 while (siz2 > 0) {
1906 if (mp2 == NULL)
1907 RPC_RETURN(EBADRPC);
1908 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
1909 if (xfer > 0) {
1910 bcopy(mtod(mp2, caddr_t), p, xfer);
1911 RPCMADV(mp2, xfer);
1912 mp2->m_len -= xfer;
1913 p += xfer;
1914 siz2 -= xfer;
1915 }
1916 if (siz2 > 0)
1917 mp2 = mp2->m_next;
1918 }
1919 mp->m_len = siz;
1920 *mdp = mp2;
1921 *dposp = mtod(mp2, caddr_t);
1922 }
1923 RPC_RETURN(0);
1924 }
1925
1926
1927
1928 static u_int32_t
1929 rpcclnt_proct(rpc, procid)
1930 struct rpcclnt *rpc;
1931 u_int32_t procid;
1932 {
1933 if (rpc->rc_proctlen != 0 && rpc->rc_proct != NULL &&
1934 procid < rpc->rc_proctlen) {
1935 return rpc->rc_proct[procid];
1936 }
1937 return (0);
1938 }
1939
1940 static int
1941 rpc_adv(mdp, dposp, offs, left)
1942 struct mbuf **mdp;
1943 caddr_t *dposp;
1944 int offs;
1945 int left;
1946 {
1947 struct mbuf *m;
1948 int s;
1949
1950 m = *mdp;
1951 s = left;
1952 while (s < offs) {
1953 offs -= s;
1954 m = m->m_next;
1955 if (m == NULL)
1956 RPC_RETURN(EBADRPC);
1957 s = m->m_len;
1958 }
1959 *mdp = m;
1960 *dposp = mtod(m, caddr_t) + offs;
1961 RPC_RETURN(0);
1962 }
1963
1964 int
1965 rpcclnt_cancelreqs(rpc)
1966 struct rpcclnt *rpc;
1967 {
1968 struct rpctask *task;
1969 int i, s;
1970
1971 s = splnet();
1972 TAILQ_FOREACH(task, &rpctask_q, r_chain) {
1973 if (rpc != task->r_rpcclnt || task->r_mrep != NULL ||
1974 (task->r_flags & R_SOFTTERM))
1975 continue;
1976 rpcclnt_softterm(task);
1977 }
1978 splx(s);
1979
1980 for (i = 0; i < 30; i++) {
1981 s = splnet();
1982 TAILQ_FOREACH(task, &rpctask_q, r_chain) {
1983 if (rpc == task->r_rpcclnt)
1984 break;
1985 }
1986 splx(s);
1987 if (task == NULL)
1988 return (0);
1989 tsleep(&lbolt, PSOCK, "nfscancel", 0);
1990 }
1991 return (EBUSY);
1992 }
1993
1994 static void
1995 rpcclnt_softterm(struct rpctask * task)
1996 {
1997 task->r_flags |= R_SOFTTERM;
1998 if (task->r_flags & R_SENT) {
1999 task->r_rpcclnt->rc_sent -= RPC_CWNDSCALE;
2000 task->r_flags &= ~R_SENT;
2001 }
2002 }
2003
2004
2005 #ifndef __OpenBSD__
2006 /* called by rpcclnt_get() */
2007 void
2008 rpcclnt_create(struct rpcclnt ** rpc)
2009 {
2010 MALLOC(*rpc, struct rpcclnt *, sizeof(struct rpcclnt), M_RPC, M_WAITOK | M_ZERO);
2011 }
2012
2013 /* called by rpcclnt_put() */
2014 void
2015 rpcclnt_destroy(struct rpcclnt * rpc)
2016 {
2017 if (rpc != NULL) {
2018 FREE(rpc, M_RPC);
2019 } else {
2020 RPCDEBUG("attempting to free a NULL rpcclnt (not dereferenced)");
2021 }
2022 }
2023 #endif /* !__OpenBSD__ */
2024
2025
2026 /* XXX: add a lock around the auth structure in struct rpcclnt and make this
2027 * call safe for calling durring a connection */
2028 static int
2029 rpcauth_buildheader(struct rpc_auth * auth, struct ucred * cred, struct mbuf ** mhdr, caddr_t * bp)
2030 {
2031 size_t authsiz, verfsiz;
2032 uint32_t mlen, grpsiz;
2033 register struct mbuf *mb, *mb2;
2034 caddr_t bpos;
2035 register u_int32_t *tl;
2036 register int i;
2037
2038 if (auth == NULL || mhdr == NULL)
2039 return EFAULT;
2040
2041 switch (auth->auth_type) {
2042 case RPCAUTH_NULL:
2043 authsiz = 0;
2044 verfsiz = 0;
2045 break;
2046 case RPCAUTH_UNIX:
2047 authsiz = (5 + cred->cr_ngroups) * RPCX_UNSIGNED;
2048 verfsiz = 0;
2049 break;
2050 default:
2051 return EPROTONOSUPPORT;
2052 break;
2053 };
2054
2055 mlen = rpcm_rndup(authsiz) + rpcm_rndup(verfsiz) + 4 * RPCX_UNSIGNED;
2056
2057 mb = *mhdr;
2058 bpos = *bp;
2059
2060 rpcm_build(tl, u_int32_t *, mlen);
2061
2062 *bp = bpos;
2063 *mhdr = mb;
2064
2065 *tl++ = txdr_unsigned(auth->auth_type);
2066 *tl++ = txdr_unsigned(authsiz);
2067 switch (auth->auth_type) {
2068 case RPCAUTH_UNIX:
2069 *tl++ = 0;
2070 *tl++ = 0;
2071
2072 *tl++ = txdr_unsigned(cred->cr_uid);
2073 *tl++ = txdr_unsigned(cred->cr_groups[0]);
2074 grpsiz = cred->cr_ngroups;
2075 *tl++ = txdr_unsigned(grpsiz);
2076 /* XXX: groups[0] is already sent... */
2077 for (i = 0 ; i < grpsiz ; i++) {
2078 *tl++ = txdr_unsigned(cred->cr_groups[i]);
2079 }
2080
2081 /* null verification header */
2082 *tl++ = txdr_unsigned(RPCAUTH_NULL);
2083 *tl++ = 0;
2084 break;
2085 case RPCAUTH_NULL:
2086 /* just a null verf header */
2087 *tl++ = txdr_unsigned(RPCAUTH_NULL);
2088 *tl = 0;
2089 break;
2090 default:
2091 panic("inconsistent rpc auth type");
2092 break;
2093 }
2094
2095 return 0;
2096 }
Cache object: a6980405552962834ddf90126d4653a7
|