FreeBSD/Linux Kernel Cross Reference
sys/rpc/rpcclnt.c
1 /* $FreeBSD: releng/5.2/sys/rpc/rpcclnt.c 124995 2004-01-26 04:38:17Z scottl $ */
2 /* $Id: rpcclnt.c,v 1.9 2003/11/05 14:59:03 rees Exp $ */
3
4 /*
5 * copyright (c) 2003
6 * the regents of the university of michigan
7 * all rights reserved
8 *
9 * permission is granted to use, copy, create derivative works and redistribute
10 * this software and such derivative works for any purpose, so long as the name
11 * of the university of michigan is not used in any advertising or publicity
12 * pertaining to the use or distribution of this software without specific,
13 * written prior authorization. if the above copyright notice or any other
14 * identification of the university of michigan is included in any copy of any
15 * portion of this software, then the disclaimer below must also be included.
16 *
17 * this software is provided as is, without representation from the university
18 * of michigan as to its fitness for any purpose, and without warranty by the
19 * university of michigan of any kind, either express or implied, including
20 * without limitation the implied warranties of merchantability and fitness for
21 * a particular purpose. the regents of the university of michigan shall not be
22 * liable for any damages, including special, indirect, incidental, or
23 * consequential damages, with respect to any claim arising out of or in
24 * connection with the use of the software, even if it has been or is hereafter
25 * advised of the possibility of such damages.
26 */
27
28 /*
29 * Copyright (c) 1989, 1991, 1993, 1995 The Regents of the University of
30 * California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by Rick Macklem at
33 * The University of Guelph.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions are
37 * met: 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer. 2.
39 * Redistributions in binary form must reproduce the above copyright notice,
40 * this list of conditions and the following disclaimer in the documentation
41 * and/or other materials provided with the distribution. 3. All advertising
42 * materials mentioning features or use of this software must display the
43 * following acknowledgement: This product includes software developed by the
44 * University of California, Berkeley and its contributors. 4. Neither the
45 * name of the University nor the names of its contributors may be used to
46 * endorse or promote products derived from this software without specific
47 * prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
50 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
51 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
52 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
53 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
55 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
62 */
63
64 /* XXX: kill ugly debug strings */
65 /* XXX: get rid of proct, as it is not even being used... (or keep it so v{2,3}
66 * can run, but clean it up! */
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/proc.h>
71 #include <sys/mount.h>
72 #include <sys/kernel.h>
73 #include <sys/mbuf.h>
74 #include <sys/syslog.h>
75 #include <sys/malloc.h>
76 #include <sys/uio.h>
77 #include <sys/lock.h>
78 #include <sys/signalvar.h>
79 #include <sys/sysent.h>
80 #include <sys/syscall.h>
81 #include <sys/sysctl.h>
82
83 #include <sys/domain.h>
84 #include <sys/protosw.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/mutex.h>
88
89 #include <netinet/in.h>
90 #include <netinet/tcp.h>
91
92 #include <nfs/rpcv2.h>
93
94 #include <rpc/rpcm_subs.h>
95 #include <rpc/rpcclnt.h>
96
97 /* memory management */
98 #ifdef __OpenBSD__
99 struct pool rpctask_pool;
100 struct pool rpcclnt_pool;
101 #define RPCTASKPOOL_LWM 10
102 #define RPCTASKPOOL_HWM 40
103 #else
104 static MALLOC_DEFINE(M_RPC, "rpcclnt", "rpc state");
105 #endif
106
107 #define RPC_RETURN(X) do { RPCDEBUG("returning %d", X); return X; }while(0)
108
109 /*
110 * Estimate rto for an nfs rpc sent via. an unreliable datagram. Use the mean
111 * and mean deviation of rtt for the appropriate type of rpc for the frequent
112 * rpcs and a default for the others. The justification for doing "other"
113 * this way is that these rpcs happen so infrequently that timer est. would
114 * probably be stale. Also, since many of these rpcs are non-idempotent, a
115 * conservative timeout is desired. getattr, lookup - A+2D read, write -
116 * A+4D other - nm_timeo
117 */
118 #define RPC_RTO(n, t) \
119 ((t) == 0 ? (n)->rc_timeo : \
120 ((t) < 3 ? \
121 (((((n)->rc_srtt[t-1] + 3) >> 2) + (n)->rc_sdrtt[t-1] + 1) >> 1) : \
122 ((((n)->rc_srtt[t-1] + 7) >> 3) + (n)->rc_sdrtt[t-1] + 1)))
123
124 #define RPC_SRTT(s,r) (r)->r_rpcclnt->rc_srtt[rpcclnt_proct((s),\
125 (r)->r_procnum) - 1]
126
127 #define RPC_SDRTT(s,r) (r)->r_rpcclnt->rc_sdrtt[rpcclnt_proct((s),\
128 (r)->r_procnum) - 1]
129
130
131 /*
132 * There is a congestion window for outstanding rpcs maintained per mount
133 * point. The cwnd size is adjusted in roughly the way that: Van Jacobson,
134 * Congestion avoidance and Control, In "Proceedings of SIGCOMM '88". ACM,
135 * August 1988. describes for TCP. The cwnd size is chopped in half on a
136 * retransmit timeout and incremented by 1/cwnd when each rpc reply is
137 * received and a full cwnd of rpcs is in progress. (The sent count and cwnd
138 * are scaled for integer arith.) Variants of "slow start" were tried and
139 * were found to be too much of a performance hit (ave. rtt 3 times larger),
140 * I suspect due to the large rtt that nfs rpcs have.
141 */
142 #define RPC_CWNDSCALE 256
143 #define RPC_MAXCWND (RPC_CWNDSCALE * 32)
144 static int rpcclnt_backoff[8] = {2, 4, 8, 16, 32, 64, 128, 256,};
145
146 /* XXX ugly debug strings */
147 #define RPC_ERRSTR_ACCEPTED_SIZE 6
148 char *rpc_errstr_accepted[RPC_ERRSTR_ACCEPTED_SIZE] = {
149 "", /* no good message... */
150 "remote server hasn't exported program.",
151 "remote server can't support version number.",
152 "program can't support procedure.",
153 "procedure can't decode params.",
154 "remote error. remote side memory allocation failure?"
155 };
156
157 char *rpc_errstr_denied[2] = {
158 "remote server doesnt support rpc version 2!",
159 "remote server authentication error."
160 };
161
162 #define RPC_ERRSTR_AUTH_SIZE 6
163 char *rpc_errstr_auth[RPC_ERRSTR_AUTH_SIZE] = {
164 "",
165 "auth error: bad credential (seal broken).",
166 "auth error: client must begin new session.",
167 "auth error: bad verifier (seal broken).",
168 "auth error: verifier expired or replayed.",
169 "auth error: rejected for security reasons.",
170 };
171
172 /*
173 * Static data, mostly RPC constants in XDR form
174 */
175 static u_int32_t rpc_reply, rpc_call, rpc_vers;
176
177 /*
178 * rpc_msgdenied, rpc_mismatch, rpc_auth_unix, rpc_msgaccepted,
179 * rpc_autherr, rpc_auth_kerb;
180 */
181
182 static u_int32_t rpcclnt_xid = 0;
183 static u_int32_t rpcclnt_xid_touched = 0;
184 struct rpcstats rpcstats;
185 int rpcclnt_ticks;
186
187 SYSCTL_NODE(_kern, OID_AUTO, rpc, CTLFLAG_RD, 0, "RPC Subsystem");
188
189 SYSCTL_UINT(_kern_rpc, OID_AUTO, retries, CTLFLAG_RD, &rpcstats.rpcretries, 0, "retries");
190 SYSCTL_UINT(_kern_rpc, OID_AUTO, request, CTLFLAG_RD, &rpcstats.rpcrequests, 0, "request");
191 SYSCTL_UINT(_kern_rpc, OID_AUTO, timeouts, CTLFLAG_RD, &rpcstats.rpctimeouts, 0, "timeouts");
192 SYSCTL_UINT(_kern_rpc, OID_AUTO, unexpected, CTLFLAG_RD, &rpcstats.rpcunexpected, 0, "unexpected");
193 SYSCTL_UINT(_kern_rpc, OID_AUTO, invalid, CTLFLAG_RD, &rpcstats.rpcinvalid, 0, "invalid");
194
195
196 #ifdef RPCCLNT_DEBUG
197 int rpcdebugon = 0;
198 SYSCTL_UINT(_kern_rpc, OID_AUTO, debug_on, CTLFLAG_RW, &rpcdebugon, 0, "RPC Debug messages");
199 #endif
200
201 /*
202 * Queue head for rpctask's
203 */
204 static
205 TAILQ_HEAD(, rpctask) rpctask_q;
206 struct callout_handle rpcclnt_timer_handle;
207
208 #ifdef __OpenBSD__
209 static int rpcclnt_send(struct socket *, struct mbuf *, struct mbuf *, struct rpctask *);
210 static int rpcclnt_receive(struct rpctask *, struct mbuf **, struct mbuf **, RPC_EXEC_CTX);
211 #else
212 static int rpcclnt_send(struct socket *, struct sockaddr *, struct mbuf *, struct rpctask *);
213 static int rpcclnt_receive(struct rpctask *, struct sockaddr **, struct mbuf **, RPC_EXEC_CTX);
214 #endif
215
216 static int rpcclnt_msg(RPC_EXEC_CTX, const char *, char *);
217
218 static int rpcclnt_reply(struct rpctask *, RPC_EXEC_CTX);
219 static void rpcclnt_timer(void *);
220 static int rpcclnt_sndlock(int *, struct rpctask *);
221 static void rpcclnt_sndunlock(int *);
222 static int rpcclnt_rcvlock(struct rpctask *);
223 static void rpcclnt_rcvunlock(int *);
224 #if 0
225 void rpcclnt_realign(struct mbuf *, int);
226 #else
227 static void rpcclnt_realign(struct mbuf **, int);
228 #endif
229
230 static struct mbuf *rpcclnt_buildheader(struct rpcclnt *, int, struct mbuf *, u_int32_t, int *, struct mbuf **, struct ucred *);
231 static int rpcm_disct(struct mbuf **, caddr_t *, int, int, caddr_t *);
232 static u_int32_t rpcclnt_proct(struct rpcclnt *, u_int32_t);
233 static int rpc_adv(struct mbuf **, caddr_t *, int, int);
234 static void rpcclnt_softterm(struct rpctask * task);
235
236 static int rpcauth_buildheader(struct rpc_auth * auth, struct ucred *, struct mbuf **, caddr_t *);
237
238 void
239 rpcclnt_init(void)
240 {
241 #ifdef __OpenBSD__
242 static struct timeout rpcclnt_timer_to;
243 #endif
244
245 rpcclnt_ticks = (hz * RPC_TICKINTVL + 500) / 1000;
246 if (rpcclnt_ticks < 1)
247 rpcclnt_ticks = 1;
248 rpcstats.rpcretries = 0;
249 rpcstats.rpcrequests = 0;
250 rpcstats.rpctimeouts = 0;
251 rpcstats.rpcunexpected = 0;
252 rpcstats.rpcinvalid = 0;
253
254 /*
255 * rpc constants how about actually using more than one of these!
256 */
257
258 rpc_reply = txdr_unsigned(RPC_REPLY);
259 rpc_vers = txdr_unsigned(RPC_VER2);
260 rpc_call = txdr_unsigned(RPC_CALL);
261 #if 0
262 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
263 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
264 rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
265 rpc_autherr = txdr_unsigned(RPC_AUTHERR);
266 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
267 rpc_auth_kerb = txdr_unsigned(RPCAUTH_KERB4);
268 #endif
269
270 /* initialize rpctask queue */
271 TAILQ_INIT(&rpctask_q);
272
273 #ifdef __OpenBSD__
274 /* initialize pools */
275 pool_init(&rpctask_pool, sizeof(struct rpctask), 0, 0, RPCTASKPOOL_LWM,
276 "rpctask_p", NULL);
277 pool_setlowat(&rpctask_pool, RPCTASKPOOL_LWM);
278 pool_sethiwat(&rpctask_pool, RPCTASKPOOL_HWM);
279
280 pool_init(&rpcclnt_pool, sizeof(struct rpcclnt), 0, 0, 1, "rpcclnt_p", NULL);
281
282 /* initialize timers */
283 timeout_set(&rpcclnt_timer_to, rpcclnt_timer, &rpcclnt_timer_to);
284 rpcclnt_timer(&rpcclnt_timer_to);
285 #else /* !__OpenBSD__ */
286 rpcclnt_timer(NULL);
287
288 #endif /* !__OpenBSD__ */
289
290 RPCDEBUG("rpc initialed");
291
292 return;
293 }
294
295 void
296 rpcclnt_uninit(void)
297 {
298 RPCDEBUG("uninit");
299 untimeout(rpcclnt_timer, (void *)NULL, rpcclnt_timer_handle);
300
301 /* XXX delete sysctl variables? */
302 }
303
304 int
305 rpcclnt_setup(clnt, program, addr, sotype, soproto, auth, max_read_size, max_write_size, flags)
306 struct rpcclnt * clnt;
307 struct rpc_program * program;
308 struct sockaddr * addr;
309 int sotype;
310 int soproto;
311 struct rpc_auth * auth;
312 int max_read_size;
313 int max_write_size;
314 int flags;
315 {
316 if (clnt == NULL || program == NULL || addr == NULL || auth == NULL)
317 RPC_RETURN (EFAULT);
318
319 if (program->prog_name == NULL)
320 RPC_RETURN (EFAULT);
321 clnt->rc_prog = program;
322
323 clnt->rc_name = addr;
324 clnt->rc_sotype = sotype;
325 clnt->rc_soproto = soproto;
326 clnt->rc_auth = auth;
327 clnt->rc_rsize = max_read_size;
328 clnt->rc_wsize = max_write_size;
329 clnt->rc_flag = flags;
330
331 clnt->rc_proctlen = 0;
332 clnt->rc_proct = NULL;
333
334 RPC_RETURN (0);
335 }
336
337 /*
338 * Initialize sockets and congestion for a new RPC connection. We do not free
339 * the sockaddr if error.
340 */
341 int
342 rpcclnt_connect(rpc, td)
343 struct rpcclnt *rpc;
344 RPC_EXEC_CTX td;
345 {
346 struct socket *so;
347 int s, error, rcvreserve, sndreserve;
348 struct sockaddr *saddr;
349
350 #ifdef __OpenBSD__
351 struct sockaddr_in *sin;
352 struct mbuf *m;
353 #else
354 struct sockaddr_in sin;
355
356 int soarg;
357 struct sockopt opt;
358 #endif
359
360 if (rpc == NULL) {
361 RPCDEBUG("no rpcclnt struct!\n");
362 RPC_RETURN(EFAULT);
363 }
364
365 GIANT_REQUIRED; /* XXX until socket locking done */
366
367 /* create the socket */
368 rpc->rc_so = NULL;
369
370 saddr = rpc->rc_name;
371
372 error = socreate(saddr->sa_family, &rpc->rc_so, rpc->rc_sotype,
373 rpc->rc_soproto, td->td_ucred, td);
374
375 if (error) {
376 RPCDEBUG("error %d in socreate()", error);
377 RPC_RETURN(error);
378 }
379 so = rpc->rc_so;
380 rpc->rc_soflags = so->so_proto->pr_flags;
381
382 /*
383 * Some servers require that the client port be a reserved port
384 * number. We always allocate a reserved port, as this prevents
385 * filehandle disclosure through UDP port capture.
386 */
387 if (saddr->sa_family == AF_INET) {
388 #ifdef __OpenBSD__
389 struct mbuf *mopt;
390 int *ip;
391 #endif
392
393 #ifdef __OpenBSD__
394 MGET(mopt, M_TRYWAIT, MT_SOOPTS);
395 mopt->m_len = sizeof(int);
396 ip = mtod(mopt, int *);
397 *ip = IP_PORTRANGE_LOW;
398
399 error = sosetopt(so, IPPROTO_IP, IP_PORTRANGE, mopt);
400 #else
401 soarg = IP_PORTRANGE_LOW;
402 bzero(&opt, sizeof(struct sockopt));
403 opt.sopt_dir = SOPT_SET;
404 opt.sopt_level = IPPROTO_IP;
405 opt.sopt_name = IP_PORTRANGE;
406 opt.sopt_val = &soarg;
407 opt.sopt_valsize = sizeof(soarg);
408
409 error = sosetopt(so, &opt);
410 #endif
411 if (error)
412 goto bad;
413
414 #if __OpenBSD__
415 MGET(m, M_TRYWAIT, MT_SONAME);
416 sin = mtod(m, struct sockaddr_in *);
417 sin->sin_len = m->m_len = sizeof(struct sockaddr_in);
418 sin->sin_family = AF_INET;
419 sin->sin_addr.s_addr = INADDR_ANY;
420 sin->sin_port = htons(0);
421 error = sobind(so, m);
422 m_freem(m);
423 #else
424 sin.sin_len = sizeof(struct sockaddr_in);
425 sin.sin_family = AF_INET;
426 sin.sin_addr.s_addr = INADDR_ANY;
427 sin.sin_port = htons(0);
428 error = sobind(so, (struct sockaddr *) & sin, td);
429 #endif
430 if (error)
431 goto bad;
432
433 #if __OpenBSD__
434 MGET(mopt, M_TRYWAIT, MT_SOOPTS);
435 mopt->m_len = sizeof(int);
436 ip = mtod(mopt, int *);
437 *ip = IP_PORTRANGE_DEFAULT;
438 error = sosetopt(so, IPPROTO_IP, IP_PORTRANGE, mopt);
439 #else
440 soarg = IP_PORTRANGE_DEFAULT;
441 bzero(&opt, sizeof(struct sockopt));
442 opt.sopt_dir = SOPT_SET;
443 opt.sopt_level = IPPROTO_IP;
444 opt.sopt_name = IP_PORTRANGE;
445 opt.sopt_val = &soarg;
446 opt.sopt_valsize = sizeof(soarg);
447 error = sosetopt(so, &opt);
448 #endif
449 if (error)
450 goto bad;
451 }
452 /*
453 * Protocols that do not require connections may be optionally left
454 * unconnected for servers that reply from a port other than
455 * NFS_PORT.
456 */
457 if (rpc->rc_flag & RPCCLNT_NOCONN) {
458 if (rpc->rc_soflags & PR_CONNREQUIRED) {
459 error = ENOTCONN;
460 goto bad;
461 }
462 } else {
463 error = soconnect(so, saddr, td);
464 if (error)
465 goto bad;
466
467 /*
468 * Wait for the connection to complete. Cribbed from the
469 * connect system call but with the wait timing out so that
470 * interruptible mounts don't hang here for a long time.
471 */
472 #ifdef __OpenBSD__
473 s = splsoftnet();
474 #else
475 s = splnet();
476 #endif
477 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
478 (void)tsleep((caddr_t) & so->so_timeo, PSOCK,
479 "rpc", 2 * hz);
480
481 /*
482 * XXX needs to catch interrupt signals. something
483 * like this: if ((so->so_state & SS_ISCONNECTING) &&
484 * so->so_error == 0 && rep && (error =
485 * nfs_sigintr(nmp, rep, rep->r_td)) != 0) {
486 * so->so_state &= ~SS_ISCONNECTING; splx(s); goto
487 * bad; }
488 */
489 }
490 if (so->so_error) {
491 error = so->so_error;
492 so->so_error = 0;
493 splx(s);
494 goto bad;
495 }
496 splx(s);
497 }
498 if (rpc->rc_flag & (RPCCLNT_SOFT | RPCCLNT_INT)) {
499 so->so_rcv.sb_timeo = (5 * hz);
500 so->so_snd.sb_timeo = (5 * hz);
501 } else {
502 so->so_rcv.sb_timeo = 0;
503 so->so_snd.sb_timeo = 0;
504 }
505
506
507 if (rpc->rc_sotype == SOCK_DGRAM) {
508 sndreserve = rpc->rc_wsize + RPC_MAXPKTHDR;
509 rcvreserve = rpc->rc_rsize + RPC_MAXPKTHDR;
510 } else if (rpc->rc_sotype == SOCK_SEQPACKET) {
511 sndreserve = (rpc->rc_wsize + RPC_MAXPKTHDR) * 2;
512 rcvreserve = (rpc->rc_rsize + RPC_MAXPKTHDR) * 2;
513 } else {
514 if (rpc->rc_sotype != SOCK_STREAM)
515 panic("rpcclnt_connect() bad sotype");
516 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
517 #ifdef __OpenBSD__
518 MGET(m, M_TRYWAIT, MT_SOOPTS);
519 *mtod(m, int32_t *) = 1;
520 m->m_len = sizeof(int32_t);
521 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m);
522 #else
523 soarg = 1;
524
525 bzero(&opt, sizeof(struct sockopt));
526 opt.sopt_dir = SOPT_SET;
527 opt.sopt_level = SOL_SOCKET;
528 opt.sopt_name = SO_KEEPALIVE;
529 opt.sopt_val = &soarg;
530 opt.sopt_valsize = sizeof(soarg);
531 sosetopt(so, &opt);
532 #endif
533 }
534 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
535 #ifdef __OpenBSD__
536 MGET(m, M_TRYWAIT, MT_SOOPTS);
537 *mtod(m, int32_t *) = 1;
538 m->m_len = sizeof(int32_t);
539 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m);
540 #else
541 soarg = 1;
542
543 bzero(&opt, sizeof(struct sockopt));
544 opt.sopt_dir = SOPT_SET;
545 opt.sopt_level = IPPROTO_TCP;
546 opt.sopt_name = TCP_NODELAY;
547 opt.sopt_val = &soarg;
548 opt.sopt_valsize = sizeof(soarg);
549 sosetopt(so, &opt);
550 #endif
551 }
552 sndreserve = (rpc->rc_wsize + RPC_MAXPKTHDR +
553 sizeof(u_int32_t)) * 2;
554 rcvreserve = (rpc->rc_rsize + RPC_MAXPKTHDR +
555 sizeof(u_int32_t)) * 2;
556 }
557 error = soreserve(so, sndreserve, rcvreserve);
558 if (error)
559 goto bad;
560 so->so_rcv.sb_flags |= SB_NOINTR;
561 so->so_snd.sb_flags |= SB_NOINTR;
562
563 /* Initialize other non-zero congestion variables */
564 rpc->rc_srtt[0] = rpc->rc_srtt[1] = rpc->rc_srtt[2] = rpc->rc_srtt[3] =
565 rpc->rc_srtt[4] = (RPC_TIMEO << 3);
566 rpc->rc_sdrtt[0] = rpc->rc_sdrtt[1] = rpc->rc_sdrtt[2] =
567 rpc->rc_sdrtt[3] = rpc->rc_sdrtt[4] = 0;
568 rpc->rc_cwnd = RPC_MAXCWND / 2; /* Initial send window */
569 rpc->rc_sent = 0;
570 rpc->rc_timeouts = 0;
571 RPC_RETURN(0);
572
573 bad:
574 rpcclnt_disconnect(rpc);
575 RPC_RETURN(error);
576 }
577
578
579 /*
580 * Reconnect routine: Called when a connection is broken on a reliable
581 * protocol. - clean up the old socket - nfs_connect() again - set
582 * R_MUSTRESEND for all outstanding requests on mount point If this fails the
583 * mount point is DEAD! nb: Must be called with the nfs_sndlock() set on the
584 * mount point.
585 */
586 int
587 rpcclnt_reconnect(rep, td)
588 struct rpctask *rep;
589 RPC_EXEC_CTX td;
590 {
591 struct rpctask *rp;
592 struct rpcclnt *rpc = rep->r_rpcclnt;
593 int error;
594
595 rpcclnt_disconnect(rpc);
596 while ((error = rpcclnt_connect(rpc, td)) != 0) {
597 if (error == EINTR || error == ERESTART)
598 RPC_RETURN(EINTR);
599 tsleep(&lbolt, PSOCK, "rpccon", 0);
600 }
601
602 /*
603 * Loop through outstanding request list and fix up all requests on
604 * old socket.
605 */
606 for (rp = TAILQ_FIRST(&rpctask_q); rp != NULL;
607 rp = TAILQ_NEXT(rp, r_chain)) {
608 if (rp->r_rpcclnt == rpc)
609 rp->r_flags |= R_MUSTRESEND;
610 }
611 RPC_RETURN(0);
612 }
613
614 /*
615 * NFS disconnect. Clean up and unlink.
616 */
617 void
618 rpcclnt_disconnect(rpc)
619 struct rpcclnt *rpc;
620 {
621 struct socket *so;
622
623 GIANT_REQUIRED; /* XXX until socket locking done */
624
625 if (rpc->rc_so) {
626 so = rpc->rc_so;
627 rpc->rc_so = NULL;
628 soshutdown(so, 2);
629 soclose(so);
630 }
631 }
632
633 void
634 rpcclnt_safedisconnect(struct rpcclnt * rpc)
635 {
636 struct rpctask dummytask;
637
638 bzero(&dummytask, sizeof(dummytask));
639 dummytask.r_rpcclnt = rpc;
640 rpcclnt_rcvlock(&dummytask);
641 rpcclnt_disconnect(rpc);
642 rpcclnt_rcvunlock(&rpc->rc_flag);
643 }
644
645 /*
646 * This is the nfs send routine. For connection based socket types, it must
647 * be called with an nfs_sndlock() on the socket. "rep == NULL" indicates
648 * that it has been called from a server. For the client side: - return EINTR
649 * if the RPC is terminated, 0 otherwise - set R_MUSTRESEND if the send fails
650 * for any reason - do any cleanup required by recoverable socket errors
651 * (???) For the server side: - return EINTR or ERESTART if interrupted by a
652 * signal - return EPIPE if a connection is lost for connection based sockets
653 * (TCP...) - do any cleanup required by recoverable socket errors (???)
654 */
655 static int
656 rpcclnt_send(so, nam, top, rep)
657 struct socket *so;
658 #ifdef __OpenBSD__
659 struct mbuf *nam;
660 #else
661 struct sockaddr *nam;
662 #endif
663 struct mbuf *top;
664 struct rpctask *rep;
665 {
666 #ifdef __OpenBSD__
667 struct mbuf *sendnam;
668 #else
669 struct sockaddr *sendnam;
670 struct thread *td = curthread;
671 #endif
672 int error, soflags, flags;
673
674 GIANT_REQUIRED; /* XXX until socket locking done */
675
676 if (rep) {
677 if (rep->r_flags & R_SOFTTERM) {
678 m_freem(top);
679 RPC_RETURN(EINTR);
680 }
681 if ((so = rep->r_rpcclnt->rc_so) == NULL) {
682 rep->r_flags |= R_MUSTRESEND;
683 m_freem(top);
684 RPC_RETURN(0);
685 }
686 rep->r_flags &= ~R_MUSTRESEND;
687 soflags = rep->r_rpcclnt->rc_soflags;
688 } else
689 soflags = so->so_proto->pr_flags;
690
691 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
692 sendnam = NULL;
693 else
694 sendnam = nam;
695
696 if (so->so_type == SOCK_SEQPACKET)
697 flags = MSG_EOR;
698 else
699 flags = 0;
700
701 error = sosend(so, sendnam, NULL, top, NULL, flags, td);
702
703 if (error) {
704 if (rep) {
705 log(LOG_INFO, "rpc send error %d for service %s\n", error,
706 rep->r_rpcclnt->rc_prog->prog_name);
707 /*
708 * Deal with errors for the client side.
709 */
710 if (rep->r_flags & R_SOFTTERM)
711 error = EINTR;
712 else
713 rep->r_flags |= R_MUSTRESEND;
714 } else
715 log(LOG_INFO, "rpc service send error %d\n", error);
716
717 /*
718 * Handle any recoverable (soft) socket errors here.
719 */
720 if (error != EINTR && error != ERESTART &&
721 error != EWOULDBLOCK && error != EPIPE)
722 error = 0;
723 }
724 RPC_RETURN(error);
725 }
726
727 /*
728 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all done by
729 * soreceive(), but for SOCK_STREAM we must deal with the Record Mark and
730 * consolidate the data into a new mbuf list. nb: Sometimes TCP passes the
731 * data up to soreceive() in long lists of small mbufs. For SOCK_STREAM we
732 * must be very careful to read an entire record once we have read any of it,
733 * even if the system call has been interrupted.
734 */
735 static int
736 rpcclnt_receive(rep, aname, mp, td)
737 struct rpctask *rep;
738 #ifdef __OpenBSD__
739 struct mbuf **aname;
740 #else
741 struct sockaddr **aname;
742 #endif
743 struct mbuf **mp;
744 RPC_EXEC_CTX td;
745 {
746 struct socket *so;
747 struct uio auio;
748 struct iovec aio;
749 struct mbuf *m;
750 struct mbuf *control;
751 u_int32_t len;
752 #ifdef __OpenBSD__
753 struct mbuf **getnam;
754 #else
755 struct sockaddr **getnam;
756 #endif
757 int error, sotype, rcvflg;
758
759 GIANT_REQUIRED; /* XXX until socket locking done */
760
761 /*
762 * Set up arguments for soreceive()
763 */
764 *mp = NULL;
765 *aname = NULL;
766 sotype = rep->r_rpcclnt->rc_sotype;
767
768 /*
769 * For reliable protocols, lock against other senders/receivers in
770 * case a reconnect is necessary. For SOCK_STREAM, first get the
771 * Record Mark to find out how much more there is to get. We must
772 * lock the socket against other receivers until we have an entire
773 * rpc request/reply.
774 */
775 if (sotype != SOCK_DGRAM) {
776 error = rpcclnt_sndlock(&rep->r_rpcclnt->rc_flag, rep);
777 if (error)
778 RPC_RETURN(error);
779 tryagain:
780 /*
781 * Check for fatal errors and resending request.
782 */
783 /*
784 * Ugh: If a reconnect attempt just happened, rc_so would
785 * have changed. NULL indicates a failed attempt that has
786 * essentially shut down this mount point.
787 */
788 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) {
789 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
790 RPC_RETURN(EINTR);
791 }
792 so = rep->r_rpcclnt->rc_so;
793 if (!so) {
794 error = rpcclnt_reconnect(rep, td);
795 if (error) {
796 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
797 RPC_RETURN(error);
798 }
799 goto tryagain;
800 }
801 while (rep->r_flags & R_MUSTRESEND) {
802 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_TRYWAIT);
803 rpcstats.rpcretries++;
804 error = rpcclnt_send(so, rep->r_rpcclnt->rc_name, m, rep);
805 if (error) {
806 if (error == EINTR || error == ERESTART ||
807 (error = rpcclnt_reconnect(rep, td)) != 0) {
808 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
809 RPC_RETURN(error);
810 }
811 goto tryagain;
812 }
813 }
814 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
815 if (sotype == SOCK_STREAM) {
816 aio.iov_base = (caddr_t) & len;
817 aio.iov_len = sizeof(u_int32_t);
818 auio.uio_iov = &aio;
819 auio.uio_iovcnt = 1;
820 auio.uio_segflg = UIO_SYSSPACE;
821 auio.uio_rw = UIO_READ;
822 auio.uio_offset = 0;
823 auio.uio_resid = sizeof(u_int32_t);
824 #ifdef __OpenBSD__
825 auio.uio_procp = td;
826 #else
827 auio.uio_td = td;
828 #endif
829 do {
830 rcvflg = MSG_WAITALL;
831 error = soreceive(so, NULL, &auio, NULL, NULL, &rcvflg);
832 if (error == EWOULDBLOCK && rep) {
833 if (rep->r_flags & R_SOFTTERM)
834 RPC_RETURN(EINTR);
835 }
836 } while (error == EWOULDBLOCK);
837 if (!error && auio.uio_resid > 0) {
838 log(LOG_INFO,
839 "short receive (%zu/%zu) from rpc server %s\n",
840 sizeof(u_int32_t) - auio.uio_resid,
841 sizeof(u_int32_t),
842 rep->r_rpcclnt->rc_prog->prog_name);
843 error = EPIPE;
844 }
845 if (error)
846 goto errout;
847 len = ntohl(len) & ~0x80000000;
848 /*
849 * This is SERIOUS! We are out of sync with the
850 * sender and forcing a disconnect/reconnect is all I
851 * can do.
852 */
853 if (len > RPC_MAXPACKET) {
854 log(LOG_ERR, "%s (%d) from rpc server %s\n",
855 "impossible packet length",
856 len,
857 rep->r_rpcclnt->rc_prog->prog_name);
858 error = EFBIG;
859 goto errout;
860 }
861 auio.uio_resid = len;
862 do {
863 rcvflg = MSG_WAITALL;
864 error = soreceive(so, NULL, &auio, mp, NULL, &rcvflg);
865 } while (error == EWOULDBLOCK || error == EINTR ||
866 error == ERESTART);
867 if (!error && auio.uio_resid > 0) {
868 log(LOG_INFO,
869 "short receive (%d/%d) from rpc server %s\n",
870 len - auio.uio_resid, len,
871 rep->r_rpcclnt->rc_prog->prog_name);
872 error = EPIPE;
873 }
874 } else {
875 /*
876 * NB: Since uio_resid is big, MSG_WAITALL is ignored
877 * and soreceive() will return when it has either a
878 * control msg or a data msg. We have no use for
879 * control msg., but must grab them and then throw
880 * them away so we know what is going on.
881 */
882 auio.uio_resid = len = 100000000; /* Anything Big */
883 #ifdef __OpenBSD__
884 auio.uio_procp = td;
885 #else
886 auio.uio_td = td;
887 #endif
888 do {
889 rcvflg = 0;
890 error = soreceive(so, NULL, &auio, mp, &control, &rcvflg);
891 if (control)
892 m_freem(control);
893 if (error == EWOULDBLOCK && rep) {
894 if (rep->r_flags & R_SOFTTERM)
895 RPC_RETURN(EINTR);
896 }
897 } while (error == EWOULDBLOCK ||
898 (!error && *mp == NULL && control));
899 if ((rcvflg & MSG_EOR) == 0)
900 printf("Egad!!\n");
901 if (!error && *mp == NULL)
902 error = EPIPE;
903 len -= auio.uio_resid;
904 }
905 errout:
906 if (error && error != EINTR && error != ERESTART) {
907 m_freem(*mp);
908 *mp = (struct mbuf *) 0;
909 if (error != EPIPE)
910 log(LOG_INFO,
911 "receive error %d from rpc server %s\n",
912 error,
913 rep->r_rpcclnt->rc_prog->prog_name);
914 error = rpcclnt_sndlock(&rep->r_rpcclnt->rc_flag, rep);
915 if (!error)
916 error = rpcclnt_reconnect(rep, td);
917 if (!error)
918 goto tryagain;
919 }
920 } else {
921 if ((so = rep->r_rpcclnt->rc_so) == NULL)
922 RPC_RETURN(EACCES);
923 if (so->so_state & SS_ISCONNECTED)
924 getnam = NULL;
925 else
926 getnam = aname;
927 auio.uio_resid = len = 1000000;
928 #ifdef __OpenBSD__
929 auio.uio_procp = td;
930 #else
931 auio.uio_td = td;
932 #endif
933
934 do {
935 rcvflg = 0;
936 error = soreceive(so, getnam, &auio, mp, NULL, &rcvflg);
937 RPCDEBUG("soreceivce returns %d", error);
938 if (error == EWOULDBLOCK && (rep->r_flags & R_SOFTTERM)) {
939 RPCDEBUG("wouldblock && softerm -> EINTR");
940 RPC_RETURN(EINTR);
941 }
942 } while (error == EWOULDBLOCK);
943 len -= auio.uio_resid;
944 }
945 if (error) {
946 m_freem(*mp);
947 *mp = NULL;
948 }
949
950 /*
951 * Search for any mbufs that are not a multiple of 4 bytes
952 * long or with m_data not longword aligned. These could
953 * cause pointer alignment problems, so copy them to well
954 * aligned mbufs.
955 */
956 rpcclnt_realign(mp, 5 * RPCX_UNSIGNED);
957 RPC_RETURN(error);
958 }
959
960
961 /*
962 * Implement receipt of reply on a socket. We must search through the list of
963 * received datagrams matching them with outstanding requests using the xid,
964 * until ours is found.
965 */
966 /* ARGSUSED */
967 static int
968 rpcclnt_reply(myrep, td)
969 struct rpctask *myrep;
970 RPC_EXEC_CTX td;
971 {
972 struct rpctask *rep;
973 struct rpcclnt *rpc = myrep->r_rpcclnt;
974 int32_t t1;
975 struct mbuf *mrep, *md;
976 #ifdef __OpenBSD__
977 struct mbuf *nam;
978 #else
979 struct sockaddr *nam;
980 #endif
981 u_int32_t rxid, *tl;
982 caddr_t dpos, cp2;
983 int error;
984
985 /*
986 * Loop around until we get our own reply
987 */
988 for (;;) {
989 /*
990 * Lock against other receivers so that I don't get stuck in
991 * sbwait() after someone else has received my reply for me.
992 * Also necessary for connection based protocols to avoid
993 * race conditions during a reconnect.
994 */
995 error = rpcclnt_rcvlock(myrep);
996 if (error)
997 RPC_RETURN(error);
998 /* Already received, bye bye */
999 if (myrep->r_mrep != NULL) {
1000 rpcclnt_rcvunlock(&rpc->rc_flag);
1001 RPC_RETURN(0);
1002 }
1003 /*
1004 * Get the next Rpc reply off the socket
1005 */
1006 error = rpcclnt_receive(myrep, &nam, &mrep, td);
1007
1008 rpcclnt_rcvunlock(&rpc->rc_flag);
1009
1010 if (error) {
1011 /*
1012 * Ignore routing errors on connectionless
1013 * protocols??
1014 */
1015 if (RPCIGNORE_SOERROR(rpc->rc_soflags, error)) {
1016 rpc->rc_so->so_error = 0;
1017 if (myrep->r_flags & R_GETONEREP)
1018 RPC_RETURN(0);
1019 RPCDEBUG("ingoring routing error on connectionless protocol.");
1020 continue;
1021 }
1022 RPC_RETURN(error);
1023 }
1024 #ifdef __OpenBSD__
1025 if (nam)
1026 m_freem(nam);
1027 #else
1028 if (nam)
1029 FREE(nam, M_SONAME);
1030 #endif
1031
1032 /*
1033 * Get the xid and check that it is an rpc reply
1034 */
1035 md = mrep;
1036 dpos = mtod(md, caddr_t);
1037 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1038 rxid = *tl++;
1039 if (*tl != rpc_reply) {
1040 rpcstats.rpcinvalid++;
1041 m_freem(mrep);
1042 rpcmout:
1043 if (myrep->r_flags & R_GETONEREP)
1044 RPC_RETURN(0);
1045 continue;
1046 }
1047 /*
1048 * Loop through the request list to match up the reply Iff no
1049 * match, just drop the datagram
1050 */
1051 TAILQ_FOREACH(rep, &rpctask_q, r_chain) {
1052 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
1053 /* Found it.. */
1054 rep->r_mrep = mrep;
1055 rep->r_md = md;
1056 rep->r_dpos = dpos;
1057
1058 /*
1059 * Update congestion window. Do the additive
1060 * increase of one rpc/rtt.
1061 */
1062 if (rpc->rc_cwnd <= rpc->rc_sent) {
1063 rpc->rc_cwnd +=
1064 (RPC_CWNDSCALE * RPC_CWNDSCALE +
1065 (rpc->rc_cwnd >> 1)) / rpc->rc_cwnd;
1066 if (rpc->rc_cwnd > RPC_MAXCWND)
1067 rpc->rc_cwnd = RPC_MAXCWND;
1068 }
1069 rep->r_flags &= ~R_SENT;
1070 rpc->rc_sent -= RPC_CWNDSCALE;
1071 /*
1072 * Update rtt using a gain of 0.125 on the
1073 * mean and a gain of 0.25 on the deviation.
1074 */
1075 if (rep->r_flags & R_TIMING) {
1076 /*
1077 * Since the timer resolution of
1078 * NFS_HZ is so course, it can often
1079 * result in r_rtt == 0. Since r_rtt
1080 * == N means that the actual rtt is
1081 * between N+dt and N+2-dt ticks, add
1082 * 1.
1083 */
1084 t1 = rep->r_rtt + 1;
1085 t1 -= (RPC_SRTT(rpc, rep) >> 3);
1086 RPC_SRTT(rpc, rep) += t1;
1087 if (t1 < 0)
1088 t1 = -t1;
1089 t1 -= (RPC_SDRTT(rpc, rep) >> 2);
1090 RPC_SDRTT(rpc, rep) += t1;
1091 }
1092 rpc->rc_timeouts = 0;
1093 break;
1094 }
1095 }
1096 /*
1097 * If not matched to a request, drop it. If it's mine, get
1098 * out.
1099 */
1100 if (rep == 0) {
1101 rpcstats.rpcunexpected++;
1102 RPCDEBUG("rpc reply not matched\n");
1103 m_freem(mrep);
1104 } else if (rep == myrep) {
1105 if (rep->r_mrep == NULL)
1106 panic("rpcreply nil");
1107 RPC_RETURN(0);
1108 }
1109 if (myrep->r_flags & R_GETONEREP)
1110 RPC_RETURN(0);
1111 }
1112 }
1113
1114 /* XXX: ignores tryagain! */
1115 /*
1116 * code from nfs_request - goes something like this - fill in task struct -
1117 * links task into list - calls nfs_send() for first transmit - calls
1118 * nfs_receive() to get reply - fills in reply (which should be initialized
1119 * prior to calling), which is valid when 0 is returned and is NEVER freed in
1120 * this function
1121 *
1122 * always frees the request header, but NEVER frees 'mrest'
1123 *
1124 */
1125 /*
1126 * ruthtype
1127 * pcclnt_setauth() should be used before calling this. EAUTH is returned if
1128 * authentication fails.
1129 */
1130 /*
1131 * note that reply->result_* are invalid unless reply->type ==
1132 * RPC_MSGACCEPTED and reply->status == RPC_SUCCESS and that reply->verf_*
1133 * are invalid unless reply->type == RPC_MSGACCEPTED
1134 */
1135 int
1136 rpcclnt_request(rpc, mrest, procnum, td, cred, reply)
1137 struct rpcclnt *rpc;
1138 struct mbuf *mrest;
1139 int procnum;
1140 RPC_EXEC_CTX td;
1141 struct ucred *cred;
1142 struct rpc_reply *reply;
1143 {
1144 struct mbuf *m, *mrep;
1145 struct rpctask *task;
1146 u_int32_t *tl;
1147 struct mbuf *md, *mheadend;
1148 caddr_t dpos, cp2;
1149 int t1, s, error = 0, mrest_len;
1150 u_int32_t xid;
1151
1152 #ifdef __OpenBSD__
1153 task = pool_get(&rpctask_pool, PR_WAITOK);
1154 #else
1155 MALLOC(task, struct rpctask *, sizeof(struct rpctask), M_RPC, (M_WAITOK | M_ZERO));
1156 #endif
1157
1158 task->r_rpcclnt = rpc;
1159 task->r_procnum = procnum;
1160 task->r_td = td;
1161
1162 mrest_len = m_length(mrest, NULL);
1163
1164 m = rpcclnt_buildheader(rpc, procnum, mrest, mrest_len, &xid, &mheadend,
1165 cred);
1166
1167 /*
1168 * For stream protocols, insert a Sun RPC Record Mark.
1169 */
1170 if (rpc->rc_sotype == SOCK_STREAM) {
1171 M_PREPEND(m, RPCX_UNSIGNED, M_TRYWAIT);
1172 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1173 (m->m_pkthdr.len - RPCX_UNSIGNED));
1174 }
1175 task->r_mreq = m;
1176 task->r_xid = xid;
1177
1178 if (rpc->rc_flag & RPCCLNT_SOFT)
1179 task->r_retry = rpc->rc_retry;
1180 else
1181 task->r_retry = RPC_MAXREXMIT + 1; /* past clip limit */
1182 task->r_rtt = task->r_rexmit = 0;
1183
1184 if (rpcclnt_proct(rpc, procnum) > 0)
1185 task->r_flags = R_TIMING;
1186 else
1187 task->r_flags = 0;
1188 task->r_mrep = NULL;
1189
1190 /*
1191 * Do the client side RPC.
1192 */
1193 rpcstats.rpcrequests++;
1194
1195 /*
1196 * Chain request into list of outstanding requests. Be sure to put it
1197 * LAST so timer finds oldest requests first.
1198 */
1199 s = splsoftclock();
1200 TAILQ_INSERT_TAIL(&rpctask_q, task, r_chain);
1201
1202 /*
1203 * If backing off another request or avoiding congestion, don't send
1204 * this one now but let timer do it. If not timing a request, do it
1205 * now.
1206 */
1207 if (rpc->rc_so && (rpc->rc_sotype != SOCK_DGRAM ||
1208 (rpc->rc_flag & RPCCLNT_DUMBTIMR) ||
1209 rpc->rc_sent < rpc->rc_cwnd)) {
1210 splx(s);
1211
1212 if (rpc->rc_soflags & PR_CONNREQUIRED)
1213 error = rpcclnt_sndlock(&rpc->rc_flag, task);
1214 if (!error) {
1215 error = rpcclnt_send(rpc->rc_so, rpc->rc_name,
1216 m_copym(m, 0, M_COPYALL, M_TRYWAIT),
1217 task);
1218 if (rpc->rc_soflags & PR_CONNREQUIRED)
1219 rpcclnt_sndunlock(&rpc->rc_flag);
1220 }
1221 if (!error && (task->r_flags & R_MUSTRESEND) == 0) {
1222 rpc->rc_sent += RPC_CWNDSCALE;
1223 task->r_flags |= R_SENT;
1224 }
1225 } else {
1226 splx(s);
1227 task->r_rtt = -1;
1228 }
1229
1230 /*
1231 * Wait for the reply from our send or the timer's.
1232 */
1233 if (!error || error == EPIPE)
1234 error = rpcclnt_reply(task, td);
1235
1236 /*
1237 * RPC done, unlink the request.
1238 */
1239 s = splsoftclock();
1240 TAILQ_REMOVE(&rpctask_q, task, r_chain);
1241 splx(s);
1242
1243 /*
1244 * Decrement the outstanding request count.
1245 */
1246 if (task->r_flags & R_SENT) {
1247 task->r_flags &= ~R_SENT; /* paranoia */
1248 rpc->rc_sent -= RPC_CWNDSCALE;
1249 }
1250 /*
1251 * If there was a successful reply and a tprintf msg. tprintf a
1252 * response.
1253 */
1254 if (!error && (task->r_flags & R_TPRINTFMSG))
1255 rpcclnt_msg(task->r_td, rpc->rc_prog->prog_name,
1256 "is alive again");
1257
1258 /* free request header (leaving mrest) */
1259 mheadend->m_next = NULL;
1260 m_freem(task->r_mreq);
1261
1262 /* initialize reply */
1263 reply->mrep = task->r_mrep;
1264 reply->verf_md = NULL;
1265 reply->result_md = NULL;
1266
1267 mrep = task->r_mrep;
1268 md = task->r_md;
1269 dpos = task->r_dpos;
1270
1271 /* task structure is no longer needed */
1272 #ifdef __OpenBSD__
1273 pool_put(&rpctask_pool, task);
1274 #else
1275 FREE(task, M_RPC);
1276 #endif
1277
1278 if (error)
1279 goto rpcmout;
1280
1281 /*
1282 * break down the rpc header and check if ok
1283 */
1284
1285 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1286 reply->stat.type = fxdr_unsigned(u_int32_t, *tl);
1287
1288 if (reply->stat.type == RPC_MSGDENIED) {
1289 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1290 reply->stat.status = fxdr_unsigned(u_int32_t, *tl);
1291
1292 switch (reply->stat.status) {
1293 case RPC_MISMATCH:
1294 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1295 reply->stat.mismatch_info.low = fxdr_unsigned(u_int32_t, *tl++);
1296 reply->stat.mismatch_info.high = fxdr_unsigned(u_int32_t, *tl);
1297 error = EOPNOTSUPP;
1298 break;
1299 case RPC_AUTHERR:
1300 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1301 reply->stat.autherr = fxdr_unsigned(u_int32_t, *tl);
1302 error = EACCES;
1303 break;
1304 default:
1305 error = EBADRPC;
1306 break;
1307 }
1308 goto rpcmout;
1309 } else if (reply->stat.type != RPC_MSGACCEPTED) {
1310 error = EBADRPC;
1311 goto rpcmout;
1312 }
1313
1314 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1315
1316 reply->verf_md = md;
1317 reply->verf_dpos = dpos;
1318
1319 reply->verf_type = fxdr_unsigned(u_int32_t, *tl++);
1320 reply->verf_size = fxdr_unsigned(u_int32_t, *tl);
1321
1322 if (reply->verf_size != 0)
1323 rpcm_adv(rpcm_rndup(reply->verf_size));
1324
1325 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1326 reply->stat.status = fxdr_unsigned(u_int32_t, *tl);
1327
1328 if (reply->stat.status == RPC_SUCCESS) {
1329 if ((uint32_t)(dpos - mtod(md, caddr_t)) >= md->m_len) {
1330 RPCDEBUG("where is the next mbuf?");
1331 RPCDEBUG("%d -> %d",
1332 (int)(dpos - mtod(md, caddr_t)), md->m_len);
1333 if (md->m_next == NULL) {
1334 error = EBADRPC;
1335 goto rpcmout;
1336 } else {
1337 reply->result_md = md->m_next;
1338 reply->result_dpos = mtod(reply->result_md,
1339 caddr_t);
1340 }
1341 } else {
1342 reply->result_md = md;
1343 reply->result_dpos = dpos;
1344 }
1345 } else if (reply->stat.status == RPC_PROGMISMATCH) {
1346 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1347 reply->stat.mismatch_info.low = fxdr_unsigned(u_int32_t, *tl++);
1348 reply->stat.mismatch_info.high = fxdr_unsigned(u_int32_t, *tl);
1349 error = EOPNOTSUPP;
1350 goto rpcmout;
1351 } else {
1352 error = EPROTONOSUPPORT;
1353 goto rpcmout;
1354 }
1355 error = 0;
1356
1357 rpcmout:
1358 RPC_RETURN(error);
1359 }
1360
1361
1362 /*
1363 * Nfs timer routine Scan the nfsreq list and retranmit any requests that
1364 * have timed out To avoid retransmission attempts on STREAM sockets (in the
1365 * future) make sure to set the r_retry field to 0 (implies nm_retry == 0).
1366 */
1367 void
1368 rpcclnt_timer(arg)
1369 void *arg;
1370 {
1371 #ifdef __OpenBSD__
1372 struct timeout *to = (struct timeout *) arg;
1373 #endif
1374 struct rpctask *rep;
1375 struct mbuf *m;
1376 struct socket *so;
1377 struct rpcclnt *rpc;
1378 int timeo;
1379 int s, error;
1380
1381 #ifndef __OpenBSD__
1382 struct thread *td = curthread;
1383 #endif
1384
1385 #if __OpenBSD__
1386 s = splsoftnet();
1387 #else
1388 s = splnet();
1389 #endif
1390 TAILQ_FOREACH(rep, &rpctask_q, r_chain) {
1391 rpc = rep->r_rpcclnt;
1392 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
1393 continue;
1394 if (rpcclnt_sigintr(rpc, rep, rep->r_td)) {
1395 rep->r_flags |= R_SOFTTERM;
1396 continue;
1397 }
1398 if (rep->r_rtt >= 0) {
1399 rep->r_rtt++;
1400 if (rpc->rc_flag & RPCCLNT_DUMBTIMR)
1401 timeo = rpc->rc_timeo;
1402 else
1403 timeo = RPC_RTO(rpc, rpcclnt_proct(rep->r_rpcclnt,
1404 rep->r_procnum));
1405 if (rpc->rc_timeouts > 0)
1406 timeo *= rpcclnt_backoff[rpc->rc_timeouts - 1];
1407 if (rep->r_rtt <= timeo)
1408 continue;
1409 if (rpc->rc_timeouts < 8)
1410 rpc->rc_timeouts++;
1411 }
1412 /*
1413 * Check for server not responding
1414 */
1415 if ((rep->r_flags & R_TPRINTFMSG) == 0 &&
1416 rep->r_rexmit > rpc->rc_deadthresh) {
1417 rpcclnt_msg(rep->r_td, rpc->rc_prog->prog_name,
1418 "not responding");
1419 rep->r_flags |= R_TPRINTFMSG;
1420 }
1421 if (rep->r_rexmit >= rep->r_retry) { /* too many */
1422 rpcstats.rpctimeouts++;
1423 rep->r_flags |= R_SOFTTERM;
1424 continue;
1425 }
1426 if (rpc->rc_sotype != SOCK_DGRAM) {
1427 if (++rep->r_rexmit > RPC_MAXREXMIT)
1428 rep->r_rexmit = RPC_MAXREXMIT;
1429 continue;
1430 }
1431 if ((so = rpc->rc_so) == NULL)
1432 continue;
1433
1434 /*
1435 * If there is enough space and the window allows.. Resend it
1436 * Set r_rtt to -1 in case we fail to send it now.
1437 */
1438 rep->r_rtt = -1;
1439 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
1440 ((rpc->rc_flag & RPCCLNT_DUMBTIMR) ||
1441 (rep->r_flags & R_SENT) ||
1442 rpc->rc_sent < rpc->rc_cwnd) &&
1443 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))) {
1444 if ((rpc->rc_flag & RPCCLNT_NOCONN) == 0)
1445 error = (*so->so_proto->pr_usrreqs->pru_send) (so, 0, m,
1446 NULL, NULL, td);
1447 else
1448 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, rpc->rc_name, NULL, td);
1449 if (error) {
1450 if (RPCIGNORE_SOERROR(rpc->rc_soflags, error))
1451 so->so_error = 0;
1452 } else {
1453 /*
1454 * Iff first send, start timing else turn
1455 * timing off, backoff timer and divide
1456 * congestion window by 2.
1457 */
1458 if (rep->r_flags & R_SENT) {
1459 rep->r_flags &= ~R_TIMING;
1460 if (++rep->r_rexmit > RPC_MAXREXMIT)
1461 rep->r_rexmit = RPC_MAXREXMIT;
1462 rpc->rc_cwnd >>= 1;
1463 if (rpc->rc_cwnd < RPC_CWNDSCALE)
1464 rpc->rc_cwnd = RPC_CWNDSCALE;
1465 rpcstats.rpcretries++;
1466 } else {
1467 rep->r_flags |= R_SENT;
1468 rpc->rc_sent += RPC_CWNDSCALE;
1469 }
1470 rep->r_rtt = 0;
1471 }
1472 }
1473 }
1474 splx(s);
1475
1476 #ifdef __OpenBSD__
1477 timeout_add(rpcclnt_timer, to, rpcclnt_ticks);
1478 #else
1479 rpcclnt_timer_handle = timeout(rpcclnt_timer, NULL, rpcclnt_ticks);
1480 #endif
1481 }
1482
1483 /*
1484 * Test for a termination condition pending on the process. This is used for
1485 * RPCCLNT_INT mounts.
1486 */
1487 int
1488 rpcclnt_sigintr(rpc, task, pr)
1489 struct rpcclnt *rpc;
1490 struct rpctask *task;
1491 RPC_EXEC_CTX pr;
1492 {
1493 struct proc *p;
1494
1495 sigset_t tmpset;
1496
1497 if (rpc == NULL)
1498 return EFAULT;
1499
1500 /* XXX deal with forced unmounts */
1501
1502 if (task && (task->r_flags & R_SOFTTERM))
1503 RPC_RETURN(EINTR);
1504
1505 if (!(rpc->rc_flag & RPCCLNT_INT))
1506 RPC_RETURN(0);
1507
1508 if (pr == NULL)
1509 return (0);
1510
1511 #ifdef __OpenBSD__
1512 p = pr;
1513 if (p && p->p_siglist &&
1514 (((p->p_siglist & ~p->p_sigmask) & ~p->p_sigignore) &
1515 RPCINT_SIGMASK))
1516 RPC_RETURN(EINTR);
1517 #else
1518 p = pr->td_proc;
1519 PROC_LOCK(p);
1520 tmpset = p->p_siglist;
1521 SIGSETNAND(tmpset, pr->td_sigmask);
1522 mtx_lock(&p->p_sigacts->ps_mtx);
1523 SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
1524 mtx_unlock(&p->p_sigacts->ps_mtx);
1525 if (SIGNOTEMPTY(p->p_siglist) && RPCCLNTINT_SIGMASK(tmpset)) {
1526 PROC_UNLOCK(p);
1527 RPC_RETURN(EINTR);
1528 }
1529 PROC_UNLOCK(p);
1530 #endif
1531 RPC_RETURN(0);
1532 }
1533
1534 /*
1535 * Lock a socket against others. Necessary for STREAM sockets to ensure you
1536 * get an entire rpc request/reply and also to avoid race conditions between
1537 * the processes with nfs requests in progress when a reconnect is necessary.
1538 */
1539 static int
1540 rpcclnt_sndlock(flagp, task)
1541 int *flagp;
1542 struct rpctask *task;
1543 {
1544 RPC_EXEC_CTX p;
1545 int slpflag = 0, slptimeo = 0;
1546
1547 if (task) {
1548 p = task->r_td;
1549 if (task->r_rpcclnt->rc_flag & RPCCLNT_INT)
1550 slpflag = PCATCH;
1551 } else
1552 p = NULL;
1553 while (*flagp & RPCCLNT_SNDLOCK) {
1554 if (rpcclnt_sigintr(task->r_rpcclnt, task, p))
1555 RPC_RETURN(EINTR);
1556 *flagp |= RPCCLNT_WANTSND;
1557 (void)tsleep((caddr_t) flagp, slpflag | (PZERO - 1), "rpcsndlck",
1558 slptimeo);
1559 if (slpflag == PCATCH) {
1560 slpflag = 0;
1561 slptimeo = 2 * hz;
1562 }
1563 }
1564 *flagp |= RPCCLNT_SNDLOCK;
1565 RPC_RETURN(0);
1566 }
1567
1568 /*
1569 * Unlock the stream socket for others.
1570 */
1571 static void
1572 rpcclnt_sndunlock(flagp)
1573 int *flagp;
1574 {
1575
1576 if ((*flagp & RPCCLNT_SNDLOCK) == 0)
1577 panic("rpc sndunlock");
1578 *flagp &= ~RPCCLNT_SNDLOCK;
1579 if (*flagp & RPCCLNT_WANTSND) {
1580 *flagp &= ~RPCCLNT_WANTSND;
1581 wakeup((caddr_t) flagp);
1582 }
1583 }
1584
1585 static int
1586 rpcclnt_rcvlock(task)
1587 struct rpctask *task;
1588 {
1589 int *flagp = &task->r_rpcclnt->rc_flag;
1590 int slpflag, slptimeo = 0;
1591
1592 if (*flagp & RPCCLNT_INT)
1593 slpflag = PCATCH;
1594 else
1595 slpflag = 0;
1596 while (*flagp & RPCCLNT_RCVLOCK) {
1597 if (rpcclnt_sigintr(task->r_rpcclnt, task, task->r_td))
1598 RPC_RETURN(EINTR);
1599 *flagp |= RPCCLNT_WANTRCV;
1600 (void)tsleep((caddr_t) flagp, slpflag | (PZERO - 1), "rpcrcvlk",
1601 slptimeo);
1602 if (slpflag == PCATCH) {
1603 slpflag = 0;
1604 slptimeo = 2 * hz;
1605 }
1606 }
1607 *flagp |= RPCCLNT_RCVLOCK;
1608 RPC_RETURN(0);
1609 }
1610
1611 /*
1612 * Unlock the stream socket for others.
1613 */
1614 static void
1615 rpcclnt_rcvunlock(flagp)
1616 int *flagp;
1617 {
1618
1619 if ((*flagp & RPCCLNT_RCVLOCK) == 0)
1620 panic("nfs rcvunlock");
1621 *flagp &= ~RPCCLNT_RCVLOCK;
1622 if (*flagp & RPCCLNT_WANTRCV) {
1623 *flagp &= ~RPCCLNT_WANTRCV;
1624 wakeup((caddr_t) flagp);
1625 }
1626 }
1627
1628 #if 0
1629 /*
1630 * Check for badly aligned mbuf data areas and realign data in an mbuf list
1631 * by copying the data areas up, as required.
1632 */
1633 void
1634 rpcclnt_realign(m, hsiz)
1635 struct mbuf *m;
1636 int hsiz;
1637 {
1638 struct mbuf *m2;
1639 int siz, mlen, olen;
1640 caddr_t tcp, fcp;
1641 struct mbuf *mnew;
1642
1643 while (m) {
1644 /*
1645 * This never happens for UDP, rarely happens for TCP but
1646 * frequently happens for iso transport.
1647 */
1648 if ((m->m_len & 0x3) || (mtod(m, long)&0x3)) {
1649 olen = m->m_len;
1650 fcp = mtod(m, caddr_t);
1651 if ((long)fcp & 0x3) {
1652 if (m->m_flags & M_PKTHDR)
1653 m_tag_delete_chain(m, NULL);
1654 m->m_flags &= ~M_PKTHDR;
1655 if (m->m_flags & M_EXT)
1656 m->m_data = m->m_ext.ext_buf +
1657 ((m->m_ext.ext_size - olen) & ~0x3);
1658 else
1659 m->m_data = m->m_dat;
1660 }
1661 m->m_len = 0;
1662 tcp = mtod(m, caddr_t);
1663 mnew = m;
1664 m2 = m->m_next;
1665
1666 /*
1667 * If possible, only put the first invariant part of
1668 * the RPC header in the first mbuf.
1669 */
1670 mlen = M_TRAILINGSPACE(m);
1671 if (olen <= hsiz && mlen > hsiz)
1672 mlen = hsiz;
1673
1674 /* Loop through the mbuf list consolidating data. */
1675 while (m) {
1676 while (olen > 0) {
1677 if (mlen == 0) {
1678 if (m2->m_flags & M_PKTHDR)
1679 m_tag_delete_chain(m2, NULL);
1680 m2->m_flags &= ~M_PKTHDR;
1681 if (m2->m_flags & M_EXT)
1682 m2->m_data = m2->m_ext.ext_buf;
1683 else
1684 m2->m_data = m2->m_dat;
1685 m2->m_len = 0;
1686 mlen = M_TRAILINGSPACE(m2);
1687 tcp = mtod(m2, caddr_t);
1688 mnew = m2;
1689 m2 = m2->m_next;
1690 }
1691 siz = min(mlen, olen);
1692 if (tcp != fcp)
1693 bcopy(fcp, tcp, siz);
1694 mnew->m_len += siz;
1695 mlen -= siz;
1696 olen -= siz;
1697 tcp += siz;
1698 fcp += siz;
1699 }
1700 m = m->m_next;
1701 if (m) {
1702 olen = m->m_len;
1703 fcp = mtod(m, caddr_t);
1704 }
1705 }
1706
1707 /*
1708 * Finally, set m_len == 0 for any trailing mbufs
1709 * that have been copied out of.
1710 */
1711 while (m2) {
1712 m2->m_len = 0;
1713 m2 = m2->m_next;
1714 }
1715 return;
1716 }
1717 m = m->m_next;
1718 }
1719 }
1720 #else
1721 static void
1722 rpcclnt_realign(struct mbuf **pm, int hsiz)
1723 {
1724 struct mbuf *m;
1725 struct mbuf *n = NULL;
1726 int off = 0;
1727
1728 RPCDEBUG("in rpcclnt_realign()");
1729
1730 while ((m = *pm) != NULL) {
1731 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
1732 MGET(n, M_TRYWAIT, MT_DATA);
1733 if (m->m_len >= MINCLSIZE) {
1734 MCLGET(n, M_TRYWAIT);
1735 }
1736 n->m_len = 0;
1737 break;
1738 }
1739 pm = &m->m_next;
1740 }
1741
1742 /*
1743 * If n is non-NULL, loop on m copying data, then replace the
1744 * portion of the chain that had to be realigned.
1745 */
1746 if (n != NULL) {
1747 while (m) {
1748 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
1749 off += m->m_len;
1750 m = m->m_next;
1751 }
1752 m_freem(*pm);
1753 *pm = n;
1754 }
1755
1756 RPCDEBUG("leave rpcclnt_realign()");
1757 }
1758 #endif
1759
1760 static int
1761 rpcclnt_msg(p, server, msg)
1762 RPC_EXEC_CTX p;
1763 const char *server;
1764 char *msg;
1765 {
1766 #ifdef __OpenBSD__
1767 tpr_t tpr;
1768 struct proc *pr = p;
1769
1770 if (p)
1771 tpr = tprintf_open(p);
1772 else
1773 tpr = NULL;
1774 tprintf(tpr, "rpc server %s: %s\n", server, msg);
1775 tprintf_close(tpr);
1776 RPC_RETURN(0);
1777 #else
1778 tprintf(p ? p->td_proc : NULL, LOG_INFO,
1779 "nfs server %s: %s\n", server, msg);
1780 RPC_RETURN(0);
1781 #endif
1782 }
1783
1784 /*
1785 * Build the RPC header and fill in the authorization info. The authorization
1786 * string argument is only used when the credentials come from outside of the
1787 * kernel (AUTH_KERB). (likewise, the ucred is only used when inside the
1788 * kernel) Returns the head of the mbuf list.
1789 */
1790 static struct mbuf *
1791 rpcclnt_buildheader(rc, procid, mrest, mrest_len, xidp, mheadend, cred)
1792 struct rpcclnt *rc;
1793 int procid;
1794 struct mbuf *mrest;
1795 u_int32_t mrest_len;
1796 int *xidp;
1797 struct mbuf **mheadend;
1798 struct ucred * cred;
1799 {
1800 /* register */ struct mbuf *mb;
1801 register u_int32_t *tl;
1802 /* register */ caddr_t bpos;
1803 struct mbuf *mreq, *mb2;
1804 int error;
1805
1806 MGETHDR(mb, M_TRYWAIT, MT_DATA);
1807 if (6 * RPCX_UNSIGNED >= MINCLSIZE) {
1808 MCLGET(mb, M_TRYWAIT);
1809 } else if (6 * RPCX_UNSIGNED < MHLEN) {
1810 MH_ALIGN(mb, 6 * RPCX_UNSIGNED);
1811 } else {
1812 RPCDEBUG("mbuf too small");
1813 panic("cheap bailout");
1814 }
1815 mb->m_len = 0;
1816 mreq = mb;
1817 bpos = mtod(mb, caddr_t);
1818
1819 /*
1820 * First the RPC header.
1821 */
1822 rpcm_build(tl, u_int32_t *, 6 * RPCX_UNSIGNED);
1823
1824 /* Get a new (non-zero) xid */
1825 if ((rpcclnt_xid == 0) && (rpcclnt_xid_touched == 0)) {
1826 rpcclnt_xid = arc4random();
1827 rpcclnt_xid_touched = 1;
1828 } else {
1829 while ((*xidp = arc4random() % 256) == 0);
1830 rpcclnt_xid += *xidp;
1831 }
1832
1833 /* XXX: funky... */
1834 *tl++ = *xidp = txdr_unsigned(rpcclnt_xid);
1835
1836 *tl++ = rpc_call;
1837 *tl++ = rpc_vers;
1838 *tl++ = txdr_unsigned(rc->rc_prog->prog_id);
1839 *tl++ = txdr_unsigned(rc->rc_prog->prog_version);
1840 *tl++ = txdr_unsigned(procid);
1841
1842 if ((error = rpcauth_buildheader(rc->rc_auth, cred, &mb, &bpos))) {
1843 RPCDEBUG("rpcauth_buildheader failed %d", error);
1844 return NULL;
1845 }
1846
1847 mb->m_next = mrest;
1848 *mheadend = mb;
1849 mreq->m_pkthdr.len = m_length(mreq, NULL);
1850 mreq->m_pkthdr.rcvif = NULL;
1851 return (mreq);
1852 }
1853
1854 /*
1855 * Help break down an mbuf chain by setting the first siz bytes contiguous
1856 * pointed to by returned val. This is used by the macros rpcm_dissect and
1857 * rpcm_dissecton for tough cases. (The macros use the vars. dpos and dpos2)
1858 */
1859 static int
1860 rpcm_disct(mdp, dposp, siz, left, cp2)
1861 struct mbuf **mdp;
1862 caddr_t *dposp;
1863 int siz;
1864 int left;
1865 caddr_t *cp2;
1866 {
1867 struct mbuf *mp, *mp2;
1868 int siz2, xfer;
1869 caddr_t p;
1870
1871 mp = *mdp;
1872 while (left == 0) {
1873 *mdp = mp = mp->m_next;
1874 if (mp == NULL)
1875 RPC_RETURN(EBADRPC);
1876 left = mp->m_len;
1877 *dposp = mtod(mp, caddr_t);
1878 }
1879 if (left >= siz) {
1880 *cp2 = *dposp;
1881 *dposp += siz;
1882 } else if (mp->m_next == NULL) {
1883 RPC_RETURN(EBADRPC);
1884 } else if (siz > MHLEN) {
1885 panic("rpc S too big");
1886 } else {
1887 MGET(mp2, M_TRYWAIT, MT_DATA);
1888 mp2->m_next = mp->m_next;
1889 mp->m_next = mp2;
1890 mp->m_len -= left;
1891 mp = mp2;
1892 *cp2 = p = mtod(mp, caddr_t);
1893 bcopy(*dposp, p, left); /* Copy what was left */
1894 siz2 = siz - left;
1895 p += left;
1896 mp2 = mp->m_next;
1897 /* Loop around copying up the siz2 bytes */
1898 while (siz2 > 0) {
1899 if (mp2 == NULL)
1900 RPC_RETURN(EBADRPC);
1901 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
1902 if (xfer > 0) {
1903 bcopy(mtod(mp2, caddr_t), p, xfer);
1904 RPCMADV(mp2, xfer);
1905 mp2->m_len -= xfer;
1906 p += xfer;
1907 siz2 -= xfer;
1908 }
1909 if (siz2 > 0)
1910 mp2 = mp2->m_next;
1911 }
1912 mp->m_len = siz;
1913 *mdp = mp2;
1914 *dposp = mtod(mp2, caddr_t);
1915 }
1916 RPC_RETURN(0);
1917 }
1918
1919
1920
1921 static u_int32_t
1922 rpcclnt_proct(rpc, procid)
1923 struct rpcclnt *rpc;
1924 u_int32_t procid;
1925 {
1926 if (rpc->rc_proctlen != 0 && rpc->rc_proct != NULL &&
1927 procid < rpc->rc_proctlen) {
1928 return rpc->rc_proct[procid];
1929 }
1930 return (0);
1931 }
1932
1933 static int
1934 rpc_adv(mdp, dposp, offs, left)
1935 struct mbuf **mdp;
1936 caddr_t *dposp;
1937 int offs;
1938 int left;
1939 {
1940 struct mbuf *m;
1941 int s;
1942
1943 m = *mdp;
1944 s = left;
1945 while (s < offs) {
1946 offs -= s;
1947 m = m->m_next;
1948 if (m == NULL)
1949 RPC_RETURN(EBADRPC);
1950 s = m->m_len;
1951 }
1952 *mdp = m;
1953 *dposp = mtod(m, caddr_t) + offs;
1954 RPC_RETURN(0);
1955 }
1956
1957 int
1958 rpcclnt_cancelreqs(rpc)
1959 struct rpcclnt *rpc;
1960 {
1961 struct rpctask *task;
1962 int i, s;
1963
1964 s = splnet();
1965 TAILQ_FOREACH(task, &rpctask_q, r_chain) {
1966 if (rpc != task->r_rpcclnt || task->r_mrep != NULL ||
1967 (task->r_flags & R_SOFTTERM))
1968 continue;
1969 rpcclnt_softterm(task);
1970 }
1971 splx(s);
1972
1973 for (i = 0; i < 30; i++) {
1974 s = splnet();
1975 TAILQ_FOREACH(task, &rpctask_q, r_chain) {
1976 if (rpc == task->r_rpcclnt)
1977 break;
1978 }
1979 splx(s);
1980 if (task == NULL)
1981 return (0);
1982 tsleep(&lbolt, PSOCK, "nfscancel", 0);
1983 }
1984 return (EBUSY);
1985 }
1986
1987 static void
1988 rpcclnt_softterm(struct rpctask * task)
1989 {
1990 task->r_flags |= R_SOFTTERM;
1991 if (task->r_flags & R_SENT) {
1992 task->r_rpcclnt->rc_sent -= RPC_CWNDSCALE;
1993 task->r_flags &= ~R_SENT;
1994 }
1995 }
1996
1997
1998 #ifndef __OpenBSD__
1999 /* called by rpcclnt_get() */
2000 void
2001 rpcclnt_create(struct rpcclnt ** rpc)
2002 {
2003 MALLOC(*rpc, struct rpcclnt *, sizeof(struct rpcclnt), M_RPC, M_WAITOK | M_ZERO);
2004 }
2005
2006 /* called by rpcclnt_put() */
2007 void
2008 rpcclnt_destroy(struct rpcclnt * rpc)
2009 {
2010 if (rpc != NULL) {
2011 FREE(rpc, M_RPC);
2012 } else {
2013 RPCDEBUG("attempting to free a NULL rpcclnt (not dereferenced)");
2014 }
2015 }
2016 #endif /* !__OpenBSD__ */
2017
2018
2019 /* XXX: add a lock around the auth structure in struct rpcclnt and make this
2020 * call safe for calling durring a connection */
2021 static int
2022 rpcauth_buildheader(struct rpc_auth * auth, struct ucred * cred, struct mbuf ** mhdr, caddr_t * bp)
2023 {
2024 size_t authsiz, verfsiz;
2025 uint32_t mlen, grpsiz;
2026 register struct mbuf *mb, *mb2;
2027 caddr_t bpos;
2028 register u_int32_t *tl;
2029 register int i;
2030
2031 if (auth == NULL || mhdr == NULL)
2032 return EFAULT;
2033
2034 switch (auth->auth_type) {
2035 case RPCAUTH_NULL:
2036 authsiz = 0;
2037 verfsiz = 0;
2038 break;
2039 case RPCAUTH_UNIX:
2040 authsiz = (5 + cred->cr_ngroups) * RPCX_UNSIGNED;
2041 verfsiz = 0;
2042 break;
2043 default:
2044 return EPROTONOSUPPORT;
2045 break;
2046 };
2047
2048 mlen = rpcm_rndup(authsiz) + rpcm_rndup(verfsiz) + 4 * RPCX_UNSIGNED;
2049
2050 mb = *mhdr;
2051 bpos = *bp;
2052
2053 rpcm_build(tl, u_int32_t *, mlen);
2054
2055 *bp = bpos;
2056 *mhdr = mb;
2057
2058 *tl++ = txdr_unsigned(auth->auth_type);
2059 *tl++ = txdr_unsigned(authsiz);
2060 switch (auth->auth_type) {
2061 case RPCAUTH_UNIX:
2062 *tl++ = 0;
2063 *tl++ = 0;
2064
2065 *tl++ = txdr_unsigned(cred->cr_uid);
2066 *tl++ = txdr_unsigned(cred->cr_groups[0]);
2067 grpsiz = cred->cr_ngroups;
2068 *tl++ = txdr_unsigned(grpsiz);
2069 /* XXX: groups[0] is already sent... */
2070 for (i = 0 ; i < grpsiz ; i++) {
2071 *tl++ = txdr_unsigned(cred->cr_groups[i]);
2072 }
2073
2074 /* null verification header */
2075 *tl++ = txdr_unsigned(RPCAUTH_NULL);
2076 *tl++ = 0;
2077 break;
2078 case RPCAUTH_NULL:
2079 /* just a null verf header */
2080 *tl++ = txdr_unsigned(RPCAUTH_NULL);
2081 *tl = 0;
2082 break;
2083 default:
2084 panic("inconsistent rpc auth type");
2085 break;
2086 }
2087
2088 return 0;
2089 }
Cache object: c7652b7c945b1b4cab4127dfa9389aac
|