FreeBSD/Linux Kernel Cross Reference
sys/rpc/rpcclnt.c
1 /* $FreeBSD: releng/6.4/sys/rpc/rpcclnt.c 154922 2006-01-27 18:22:11Z rees $ */
2 /* $Id: rpcclnt.c,v 1.9 2003/11/05 14:59:03 rees Exp $ */
3
4 /*-
5 * copyright (c) 2003
6 * the regents of the university of michigan
7 * all rights reserved
8 *
9 * permission is granted to use, copy, create derivative works and redistribute
10 * this software and such derivative works for any purpose, so long as the name
11 * of the university of michigan is not used in any advertising or publicity
12 * pertaining to the use or distribution of this software without specific,
13 * written prior authorization. if the above copyright notice or any other
14 * identification of the university of michigan is included in any copy of any
15 * portion of this software, then the disclaimer below must also be included.
16 *
17 * this software is provided as is, without representation from the university
18 * of michigan as to its fitness for any purpose, and without warranty by the
19 * university of michigan of any kind, either express or implied, including
20 * without limitation the implied warranties of merchantability and fitness for
21 * a particular purpose. the regents of the university of michigan shall not be
22 * liable for any damages, including special, indirect, incidental, or
23 * consequential damages, with respect to any claim arising out of or in
24 * connection with the use of the software, even if it has been or is hereafter
25 * advised of the possibility of such damages.
26 */
27
28 /*-
29 * Copyright (c) 1989, 1991, 1993, 1995 The Regents of the University of
30 * California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by Rick Macklem at
33 * The University of Guelph.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions are
37 * met: 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer. 2.
39 * Redistributions in binary form must reproduce the above copyright notice,
40 * this list of conditions and the following disclaimer in the documentation
41 * and/or other materials provided with the distribution. 3. All advertising
42 * materials mentioning features or use of this software must display the
43 * following acknowledgement: This product includes software developed by the
44 * University of California, Berkeley and its contributors. 4. Neither the
45 * name of the University nor the names of its contributors may be used to
46 * endorse or promote products derived from this software without specific
47 * prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
50 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
51 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
52 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
53 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
55 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
62 */
63
64 /* XXX: kill ugly debug strings */
65 /* XXX: get rid of proct, as it is not even being used... (or keep it so v{2,3}
66 * can run, but clean it up! */
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/proc.h>
71 #include <sys/mount.h>
72 #include <sys/kernel.h>
73 #include <sys/mbuf.h>
74 #include <sys/syslog.h>
75 #include <sys/malloc.h>
76 #include <sys/uio.h>
77 #include <sys/lock.h>
78 #include <sys/signalvar.h>
79 #include <sys/sysent.h>
80 #include <sys/syscall.h>
81 #include <sys/sysctl.h>
82
83 #include <sys/domain.h>
84 #include <sys/protosw.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/mutex.h>
88
89 #include <netinet/in.h>
90 #include <netinet/tcp.h>
91
92 #include <nfs/rpcv2.h>
93
94 #include <rpc/rpcm_subs.h>
95 #include <rpc/rpcclnt.h>
96
97 /* memory management */
98 #ifdef __OpenBSD__
99 struct pool rpctask_pool;
100 struct pool rpcclnt_pool;
101 #define RPCTASKPOOL_LWM 10
102 #define RPCTASKPOOL_HWM 40
103 #else
104 static MALLOC_DEFINE(M_RPC, "rpcclnt", "rpc state");
105 #endif
106
107 #define RPC_RETURN(X) do { RPCDEBUG("returning %d", X); return X; }while(0)
108
109 /*
110 * Estimate rto for an nfs rpc sent via. an unreliable datagram. Use the mean
111 * and mean deviation of rtt for the appropriate type of rpc for the frequent
112 * rpcs and a default for the others. The justification for doing "other"
113 * this way is that these rpcs happen so infrequently that timer est. would
114 * probably be stale. Also, since many of these rpcs are non-idempotent, a
115 * conservative timeout is desired. getattr, lookup - A+2D read, write -
116 * A+4D other - nm_timeo
117 */
118 #define RPC_RTO(n, t) \
119 ((t) == 0 ? (n)->rc_timeo : \
120 ((t) < 3 ? \
121 (((((n)->rc_srtt[t-1] + 3) >> 2) + (n)->rc_sdrtt[t-1] + 1) >> 1) : \
122 ((((n)->rc_srtt[t-1] + 7) >> 3) + (n)->rc_sdrtt[t-1] + 1)))
123
124 #define RPC_SRTT(s,r) (r)->r_rpcclnt->rc_srtt[rpcclnt_proct((s),\
125 (r)->r_procnum) - 1]
126
127 #define RPC_SDRTT(s,r) (r)->r_rpcclnt->rc_sdrtt[rpcclnt_proct((s),\
128 (r)->r_procnum) - 1]
129
130
131 /*
132 * There is a congestion window for outstanding rpcs maintained per mount
133 * point. The cwnd size is adjusted in roughly the way that: Van Jacobson,
134 * Congestion avoidance and Control, In "Proceedings of SIGCOMM '88". ACM,
135 * August 1988. describes for TCP. The cwnd size is chopped in half on a
136 * retransmit timeout and incremented by 1/cwnd when each rpc reply is
137 * received and a full cwnd of rpcs is in progress. (The sent count and cwnd
138 * are scaled for integer arith.) Variants of "slow start" were tried and
139 * were found to be too much of a performance hit (ave. rtt 3 times larger),
140 * I suspect due to the large rtt that nfs rpcs have.
141 */
142 #define RPC_CWNDSCALE 256
143 #define RPC_MAXCWND (RPC_CWNDSCALE * 32)
144 static const int rpcclnt_backoff[8] = {2, 4, 8, 16, 32, 64, 128, 256,};
145
146 /* XXX ugly debug strings */
147 #define RPC_ERRSTR_ACCEPTED_SIZE 6
148 char *rpc_errstr_accepted[RPC_ERRSTR_ACCEPTED_SIZE] = {
149 "", /* no good message... */
150 "remote server hasn't exported program.",
151 "remote server can't support version number.",
152 "program can't support procedure.",
153 "procedure can't decode params.",
154 "remote error. remote side memory allocation failure?"
155 };
156
157 char *rpc_errstr_denied[2] = {
158 "remote server doesnt support rpc version 2!",
159 "remote server authentication error."
160 };
161
162 #define RPC_ERRSTR_AUTH_SIZE 6
163 char *rpc_errstr_auth[RPC_ERRSTR_AUTH_SIZE] = {
164 "",
165 "auth error: bad credential (seal broken).",
166 "auth error: client must begin new session.",
167 "auth error: bad verifier (seal broken).",
168 "auth error: verifier expired or replayed.",
169 "auth error: rejected for security reasons.",
170 };
171
172 /*
173 * Static data, mostly RPC constants in XDR form
174 */
175 static u_int32_t rpc_reply, rpc_call, rpc_vers;
176
177 /*
178 * rpc_msgdenied, rpc_mismatch, rpc_auth_unix, rpc_msgaccepted,
179 * rpc_autherr, rpc_auth_kerb;
180 */
181
182 static u_int32_t rpcclnt_xid = 0;
183 static u_int32_t rpcclnt_xid_touched = 0;
184 struct rpcstats rpcstats;
185 int rpcclnt_ticks;
186
187 SYSCTL_NODE(_kern, OID_AUTO, rpc, CTLFLAG_RD, 0, "RPC Subsystem");
188
189 SYSCTL_UINT(_kern_rpc, OID_AUTO, retries, CTLFLAG_RD, &rpcstats.rpcretries, 0, "retries");
190 SYSCTL_UINT(_kern_rpc, OID_AUTO, request, CTLFLAG_RD, &rpcstats.rpcrequests, 0, "request");
191 SYSCTL_UINT(_kern_rpc, OID_AUTO, timeouts, CTLFLAG_RD, &rpcstats.rpctimeouts, 0, "timeouts");
192 SYSCTL_UINT(_kern_rpc, OID_AUTO, unexpected, CTLFLAG_RD, &rpcstats.rpcunexpected, 0, "unexpected");
193 SYSCTL_UINT(_kern_rpc, OID_AUTO, invalid, CTLFLAG_RD, &rpcstats.rpcinvalid, 0, "invalid");
194
195
196 #ifdef RPCCLNT_DEBUG
197 int rpcdebugon = 0;
198 SYSCTL_UINT(_kern_rpc, OID_AUTO, debug_on, CTLFLAG_RW, &rpcdebugon, 0, "RPC Debug messages");
199 #endif
200
201 /*
202 * Queue head for rpctask's
203 */
204 static
205 TAILQ_HEAD(, rpctask) rpctask_q;
206 struct callout rpcclnt_callout;
207
208 #ifdef __OpenBSD__
209 static int rpcclnt_send(struct socket *, struct mbuf *, struct mbuf *, struct rpctask *);
210 static int rpcclnt_receive(struct rpctask *, struct mbuf **, struct mbuf **, RPC_EXEC_CTX);
211 #else
212 static int rpcclnt_send(struct socket *, struct sockaddr *, struct mbuf *, struct rpctask *);
213 static int rpcclnt_receive(struct rpctask *, struct sockaddr **, struct mbuf **, RPC_EXEC_CTX);
214 #endif
215
216 static int rpcclnt_msg(RPC_EXEC_CTX, const char *, char *);
217
218 static int rpcclnt_reply(struct rpctask *, RPC_EXEC_CTX);
219 static void rpcclnt_timer(void *);
220 static int rpcclnt_sndlock(int *, struct rpctask *);
221 static void rpcclnt_sndunlock(int *);
222 static int rpcclnt_rcvlock(struct rpctask *);
223 static void rpcclnt_rcvunlock(int *);
224 #if 0
225 void rpcclnt_realign(struct mbuf *, int);
226 #else
227 static void rpcclnt_realign(struct mbuf **, int);
228 #endif
229
230 static struct mbuf *rpcclnt_buildheader(struct rpcclnt *, int, struct mbuf *, u_int32_t, int *, struct mbuf **, struct ucred *);
231 static int rpcm_disct(struct mbuf **, caddr_t *, int, int, caddr_t *);
232 static u_int32_t rpcclnt_proct(struct rpcclnt *, u_int32_t);
233 static int rpc_adv(struct mbuf **, caddr_t *, int, int);
234 static void rpcclnt_softterm(struct rpctask * task);
235
236 static int rpcauth_buildheader(struct rpc_auth * auth, struct ucred *, struct mbuf **, caddr_t *);
237
238 void
239 rpcclnt_init(void)
240 {
241 #ifdef __OpenBSD__
242 static struct timeout rpcclnt_timer_to;
243 #endif
244
245 rpcclnt_ticks = (hz * RPC_TICKINTVL + 500) / 1000;
246 if (rpcclnt_ticks < 1)
247 rpcclnt_ticks = 1;
248 rpcstats.rpcretries = 0;
249 rpcstats.rpcrequests = 0;
250 rpcstats.rpctimeouts = 0;
251 rpcstats.rpcunexpected = 0;
252 rpcstats.rpcinvalid = 0;
253
254 /*
255 * rpc constants how about actually using more than one of these!
256 */
257
258 rpc_reply = txdr_unsigned(RPC_REPLY);
259 rpc_vers = txdr_unsigned(RPC_VER2);
260 rpc_call = txdr_unsigned(RPC_CALL);
261 #if 0
262 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
263 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
264 rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
265 rpc_autherr = txdr_unsigned(RPC_AUTHERR);
266 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
267 rpc_auth_kerb = txdr_unsigned(RPCAUTH_KERB4);
268 #endif
269
270 /* initialize rpctask queue */
271 TAILQ_INIT(&rpctask_q);
272
273 #ifdef __OpenBSD__
274 /* initialize pools */
275 pool_init(&rpctask_pool, sizeof(struct rpctask), 0, 0, RPCTASKPOOL_LWM,
276 "rpctask_p", NULL);
277 pool_setlowat(&rpctask_pool, RPCTASKPOOL_LWM);
278 pool_sethiwat(&rpctask_pool, RPCTASKPOOL_HWM);
279
280 pool_init(&rpcclnt_pool, sizeof(struct rpcclnt), 0, 0, 1, "rpcclnt_p", NULL);
281
282 /* initialize timers */
283 timeout_set(&rpcclnt_timer_to, rpcclnt_timer, &rpcclnt_timer_to);
284 rpcclnt_timer(&rpcclnt_timer_to);
285 #else /* !__OpenBSD__ */
286 callout_init(&rpcclnt_callout, 0);
287 #endif /* !__OpenBSD__ */
288
289 RPCDEBUG("rpc initialed");
290
291 return;
292 }
293
294 void
295 rpcclnt_uninit(void)
296 {
297 RPCDEBUG("uninit");
298 /* XXX delete sysctl variables? */
299 callout_stop(&rpcclnt_callout);
300 }
301
302 int
303 rpcclnt_setup(clnt, program, addr, sotype, soproto, auth, max_read_size, max_write_size, flags)
304 struct rpcclnt * clnt;
305 struct rpc_program * program;
306 struct sockaddr * addr;
307 int sotype;
308 int soproto;
309 struct rpc_auth * auth;
310 int max_read_size;
311 int max_write_size;
312 int flags;
313 {
314 if (clnt == NULL || program == NULL || addr == NULL || auth == NULL)
315 RPC_RETURN (EFAULT);
316
317 if (program->prog_name == NULL)
318 RPC_RETURN (EFAULT);
319 clnt->rc_prog = program;
320
321 clnt->rc_name = addr;
322 clnt->rc_sotype = sotype;
323 clnt->rc_soproto = soproto;
324 clnt->rc_auth = auth;
325 clnt->rc_rsize = max_read_size;
326 clnt->rc_wsize = max_write_size;
327 clnt->rc_flag = flags;
328
329 clnt->rc_proctlen = 0;
330 clnt->rc_proct = NULL;
331
332 RPC_RETURN (0);
333 }
334
335 /*
336 * Initialize sockets and congestion for a new RPC connection. We do not free
337 * the sockaddr if error.
338 */
339 int
340 rpcclnt_connect(rpc, td)
341 struct rpcclnt *rpc;
342 RPC_EXEC_CTX td;
343 {
344 struct socket *so;
345 int s, error, rcvreserve, sndreserve;
346 struct sockaddr *saddr;
347
348 #ifdef __OpenBSD__
349 struct sockaddr_in *sin;
350 struct mbuf *m;
351 #else
352 struct sockaddr_in sin;
353
354 int soarg;
355 struct sockopt opt;
356 #endif
357
358 if (rpc == NULL) {
359 RPCDEBUG("no rpcclnt struct!\n");
360 RPC_RETURN(EFAULT);
361 }
362
363 GIANT_REQUIRED; /* XXX until socket locking done */
364
365 /* create the socket */
366 rpc->rc_so = NULL;
367
368 saddr = rpc->rc_name;
369
370 error = socreate(saddr->sa_family, &rpc->rc_so, rpc->rc_sotype,
371 rpc->rc_soproto, td->td_ucred, td);
372
373 if (error) {
374 RPCDEBUG("error %d in socreate()", error);
375 RPC_RETURN(error);
376 }
377 so = rpc->rc_so;
378 rpc->rc_soflags = so->so_proto->pr_flags;
379
380 /*
381 * Some servers require that the client port be a reserved port
382 * number. We always allocate a reserved port, as this prevents
383 * filehandle disclosure through UDP port capture.
384 */
385 if (saddr->sa_family == AF_INET) {
386 #ifdef __OpenBSD__
387 struct mbuf *mopt;
388 int *ip;
389 #endif
390
391 #ifdef __OpenBSD__
392 MGET(mopt, M_TRYWAIT, MT_SOOPTS);
393 mopt->m_len = sizeof(int);
394 ip = mtod(mopt, int *);
395 *ip = IP_PORTRANGE_LOW;
396
397 error = sosetopt(so, IPPROTO_IP, IP_PORTRANGE, mopt);
398 #else
399 soarg = IP_PORTRANGE_LOW;
400 bzero(&opt, sizeof(struct sockopt));
401 opt.sopt_dir = SOPT_SET;
402 opt.sopt_level = IPPROTO_IP;
403 opt.sopt_name = IP_PORTRANGE;
404 opt.sopt_val = &soarg;
405 opt.sopt_valsize = sizeof(soarg);
406
407 error = sosetopt(so, &opt);
408 #endif
409 if (error)
410 goto bad;
411
412 #if __OpenBSD__
413 MGET(m, M_TRYWAIT, MT_SONAME);
414 sin = mtod(m, struct sockaddr_in *);
415 sin->sin_len = m->m_len = sizeof(struct sockaddr_in);
416 sin->sin_family = AF_INET;
417 sin->sin_addr.s_addr = INADDR_ANY;
418 sin->sin_port = htons(0);
419 error = sobind(so, m);
420 m_freem(m);
421 #else
422 sin.sin_len = sizeof(struct sockaddr_in);
423 sin.sin_family = AF_INET;
424 sin.sin_addr.s_addr = INADDR_ANY;
425 sin.sin_port = htons(0);
426 /*
427 * &thread0 gives us root credentials to ensure sobind
428 * will give us a reserved ephemeral port.
429 */
430 error = sobind(so, (struct sockaddr *) & sin, &thread0);
431 #endif
432 if (error)
433 goto bad;
434
435 #if __OpenBSD__
436 MGET(mopt, M_TRYWAIT, MT_SOOPTS);
437 mopt->m_len = sizeof(int);
438 ip = mtod(mopt, int *);
439 *ip = IP_PORTRANGE_DEFAULT;
440 error = sosetopt(so, IPPROTO_IP, IP_PORTRANGE, mopt);
441 #else
442 soarg = IP_PORTRANGE_DEFAULT;
443 bzero(&opt, sizeof(struct sockopt));
444 opt.sopt_dir = SOPT_SET;
445 opt.sopt_level = IPPROTO_IP;
446 opt.sopt_name = IP_PORTRANGE;
447 opt.sopt_val = &soarg;
448 opt.sopt_valsize = sizeof(soarg);
449 error = sosetopt(so, &opt);
450 #endif
451 if (error)
452 goto bad;
453 }
454 /*
455 * Protocols that do not require connections may be optionally left
456 * unconnected for servers that reply from a port other than
457 * NFS_PORT.
458 */
459 if (rpc->rc_flag & RPCCLNT_NOCONN) {
460 if (rpc->rc_soflags & PR_CONNREQUIRED) {
461 error = ENOTCONN;
462 goto bad;
463 }
464 } else {
465 error = soconnect(so, saddr, td);
466 if (error)
467 goto bad;
468
469 /*
470 * Wait for the connection to complete. Cribbed from the
471 * connect system call but with the wait timing out so that
472 * interruptible mounts don't hang here for a long time.
473 */
474 #ifdef __OpenBSD__
475 s = splsoftnet();
476 #else
477 s = splnet();
478 #endif
479 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
480 (void)tsleep((caddr_t) & so->so_timeo, PSOCK,
481 "rpc", 2 * hz);
482
483 /*
484 * XXX needs to catch interrupt signals. something
485 * like this: if ((so->so_state & SS_ISCONNECTING) &&
486 * so->so_error == 0 && rep && (error =
487 * nfs_sigintr(nmp, rep, rep->r_td)) != 0) {
488 * so->so_state &= ~SS_ISCONNECTING; splx(s); goto
489 * bad; }
490 */
491 }
492 if (so->so_error) {
493 error = so->so_error;
494 so->so_error = 0;
495 splx(s);
496 goto bad;
497 }
498 splx(s);
499 }
500 if (rpc->rc_flag & (RPCCLNT_SOFT | RPCCLNT_INT)) {
501 so->so_rcv.sb_timeo = (5 * hz);
502 so->so_snd.sb_timeo = (5 * hz);
503 } else {
504 so->so_rcv.sb_timeo = 0;
505 so->so_snd.sb_timeo = 0;
506 }
507
508
509 if (rpc->rc_sotype == SOCK_DGRAM) {
510 sndreserve = rpc->rc_wsize + RPC_MAXPKTHDR;
511 rcvreserve = rpc->rc_rsize + RPC_MAXPKTHDR;
512 } else if (rpc->rc_sotype == SOCK_SEQPACKET) {
513 sndreserve = (rpc->rc_wsize + RPC_MAXPKTHDR) * 2;
514 rcvreserve = (rpc->rc_rsize + RPC_MAXPKTHDR) * 2;
515 } else {
516 if (rpc->rc_sotype != SOCK_STREAM)
517 panic("rpcclnt_connect() bad sotype");
518 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
519 #ifdef __OpenBSD__
520 MGET(m, M_TRYWAIT, MT_SOOPTS);
521 *mtod(m, int32_t *) = 1;
522 m->m_len = sizeof(int32_t);
523 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m);
524 #else
525 soarg = 1;
526
527 bzero(&opt, sizeof(struct sockopt));
528 opt.sopt_dir = SOPT_SET;
529 opt.sopt_level = SOL_SOCKET;
530 opt.sopt_name = SO_KEEPALIVE;
531 opt.sopt_val = &soarg;
532 opt.sopt_valsize = sizeof(soarg);
533 sosetopt(so, &opt);
534 #endif
535 }
536 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
537 #ifdef __OpenBSD__
538 MGET(m, M_TRYWAIT, MT_SOOPTS);
539 *mtod(m, int32_t *) = 1;
540 m->m_len = sizeof(int32_t);
541 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m);
542 #else
543 soarg = 1;
544
545 bzero(&opt, sizeof(struct sockopt));
546 opt.sopt_dir = SOPT_SET;
547 opt.sopt_level = IPPROTO_TCP;
548 opt.sopt_name = TCP_NODELAY;
549 opt.sopt_val = &soarg;
550 opt.sopt_valsize = sizeof(soarg);
551 sosetopt(so, &opt);
552 #endif
553 }
554 sndreserve = (rpc->rc_wsize + RPC_MAXPKTHDR +
555 sizeof(u_int32_t)) * 2;
556 rcvreserve = (rpc->rc_rsize + RPC_MAXPKTHDR +
557 sizeof(u_int32_t)) * 2;
558 }
559 error = soreserve(so, sndreserve, rcvreserve);
560 if (error)
561 goto bad;
562 so->so_rcv.sb_flags |= SB_NOINTR;
563 so->so_snd.sb_flags |= SB_NOINTR;
564
565 /* Initialize other non-zero congestion variables */
566 rpc->rc_srtt[0] = rpc->rc_srtt[1] = rpc->rc_srtt[2] =
567 rpc->rc_srtt[3] = (RPC_TIMEO << 3);
568 rpc->rc_sdrtt[0] = rpc->rc_sdrtt[1] = rpc->rc_sdrtt[2] =
569 rpc->rc_sdrtt[3] = 0;
570 rpc->rc_cwnd = RPC_MAXCWND / 2; /* Initial send window */
571 rpc->rc_sent = 0;
572 rpc->rc_timeouts = 0;
573 RPC_RETURN(0);
574
575 bad:
576 rpcclnt_disconnect(rpc);
577 RPC_RETURN(error);
578 }
579
580
581 /*
582 * Reconnect routine: Called when a connection is broken on a reliable
583 * protocol. - clean up the old socket - nfs_connect() again - set
584 * R_MUSTRESEND for all outstanding requests on mount point If this fails the
585 * mount point is DEAD! nb: Must be called with the nfs_sndlock() set on the
586 * mount point.
587 */
588 int
589 rpcclnt_reconnect(rep, td)
590 struct rpctask *rep;
591 RPC_EXEC_CTX td;
592 {
593 struct rpctask *rp;
594 struct rpcclnt *rpc = rep->r_rpcclnt;
595 int error;
596
597 rpcclnt_disconnect(rpc);
598 while ((error = rpcclnt_connect(rpc, td)) != 0) {
599 if (error == EINTR || error == ERESTART)
600 RPC_RETURN(EINTR);
601 tsleep(&lbolt, PSOCK, "rpccon", 0);
602 }
603
604 /*
605 * Loop through outstanding request list and fix up all requests on
606 * old socket.
607 */
608 for (rp = TAILQ_FIRST(&rpctask_q); rp != NULL;
609 rp = TAILQ_NEXT(rp, r_chain)) {
610 if (rp->r_rpcclnt == rpc)
611 rp->r_flags |= R_MUSTRESEND;
612 }
613 RPC_RETURN(0);
614 }
615
616 /*
617 * NFS disconnect. Clean up and unlink.
618 */
619 void
620 rpcclnt_disconnect(rpc)
621 struct rpcclnt *rpc;
622 {
623 struct socket *so;
624
625 GIANT_REQUIRED; /* XXX until socket locking done */
626
627 if (rpc->rc_so) {
628 so = rpc->rc_so;
629 rpc->rc_so = NULL;
630 soshutdown(so, 2);
631 soclose(so);
632 }
633 }
634
635 void
636 rpcclnt_safedisconnect(struct rpcclnt * rpc)
637 {
638 struct rpctask dummytask;
639
640 bzero(&dummytask, sizeof(dummytask));
641 dummytask.r_rpcclnt = rpc;
642 rpcclnt_rcvlock(&dummytask);
643 rpcclnt_disconnect(rpc);
644 rpcclnt_rcvunlock(&rpc->rc_flag);
645 }
646
647 /*
648 * This is the nfs send routine. For connection based socket types, it must
649 * be called with an nfs_sndlock() on the socket. "rep == NULL" indicates
650 * that it has been called from a server. For the client side: - return EINTR
651 * if the RPC is terminated, 0 otherwise - set R_MUSTRESEND if the send fails
652 * for any reason - do any cleanup required by recoverable socket errors
653 * (???) For the server side: - return EINTR or ERESTART if interrupted by a
654 * signal - return EPIPE if a connection is lost for connection based sockets
655 * (TCP...) - do any cleanup required by recoverable socket errors (???)
656 */
657 static int
658 rpcclnt_send(so, nam, top, rep)
659 struct socket *so;
660 #ifdef __OpenBSD__
661 struct mbuf *nam;
662 #else
663 struct sockaddr *nam;
664 #endif
665 struct mbuf *top;
666 struct rpctask *rep;
667 {
668 #ifdef __OpenBSD__
669 struct mbuf *sendnam;
670 #else
671 struct sockaddr *sendnam;
672 struct thread *td = curthread;
673 #endif
674 int error, soflags, flags;
675
676 GIANT_REQUIRED; /* XXX until socket locking done */
677
678 if (rep) {
679 if (rep->r_flags & R_SOFTTERM) {
680 m_freem(top);
681 RPC_RETURN(EINTR);
682 }
683 if ((so = rep->r_rpcclnt->rc_so) == NULL) {
684 rep->r_flags |= R_MUSTRESEND;
685 m_freem(top);
686 RPC_RETURN(0);
687 }
688 rep->r_flags &= ~R_MUSTRESEND;
689 soflags = rep->r_rpcclnt->rc_soflags;
690 } else
691 soflags = so->so_proto->pr_flags;
692
693 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
694 sendnam = NULL;
695 else
696 sendnam = nam;
697
698 if (so->so_type == SOCK_SEQPACKET)
699 flags = MSG_EOR;
700 else
701 flags = 0;
702
703 error = sosend(so, sendnam, NULL, top, NULL, flags, td);
704
705 if (error) {
706 if (rep) {
707 log(LOG_INFO, "rpc send error %d for service %s\n", error,
708 rep->r_rpcclnt->rc_prog->prog_name);
709 /*
710 * Deal with errors for the client side.
711 */
712 if (rep->r_flags & R_SOFTTERM)
713 error = EINTR;
714 else
715 rep->r_flags |= R_MUSTRESEND;
716 } else
717 log(LOG_INFO, "rpc service send error %d\n", error);
718
719 /*
720 * Handle any recoverable (soft) socket errors here.
721 */
722 if (error != EINTR && error != ERESTART &&
723 error != EWOULDBLOCK && error != EPIPE)
724 error = 0;
725 }
726 RPC_RETURN(error);
727 }
728
729 /*
730 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all done by
731 * soreceive(), but for SOCK_STREAM we must deal with the Record Mark and
732 * consolidate the data into a new mbuf list. nb: Sometimes TCP passes the
733 * data up to soreceive() in long lists of small mbufs. For SOCK_STREAM we
734 * must be very careful to read an entire record once we have read any of it,
735 * even if the system call has been interrupted.
736 */
737 static int
738 rpcclnt_receive(rep, aname, mp, td)
739 struct rpctask *rep;
740 #ifdef __OpenBSD__
741 struct mbuf **aname;
742 #else
743 struct sockaddr **aname;
744 #endif
745 struct mbuf **mp;
746 RPC_EXEC_CTX td;
747 {
748 struct socket *so;
749 struct uio auio;
750 struct iovec aio;
751 struct mbuf *m;
752 struct mbuf *control;
753 u_int32_t len;
754 #ifdef __OpenBSD__
755 struct mbuf **getnam;
756 #else
757 struct sockaddr **getnam;
758 #endif
759 int error, sotype, rcvflg;
760
761 GIANT_REQUIRED; /* XXX until socket locking done */
762
763 /*
764 * Set up arguments for soreceive()
765 */
766 *mp = NULL;
767 *aname = NULL;
768 sotype = rep->r_rpcclnt->rc_sotype;
769
770 /*
771 * For reliable protocols, lock against other senders/receivers in
772 * case a reconnect is necessary. For SOCK_STREAM, first get the
773 * Record Mark to find out how much more there is to get. We must
774 * lock the socket against other receivers until we have an entire
775 * rpc request/reply.
776 */
777 if (sotype != SOCK_DGRAM) {
778 error = rpcclnt_sndlock(&rep->r_rpcclnt->rc_flag, rep);
779 if (error)
780 RPC_RETURN(error);
781 tryagain:
782 /*
783 * Check for fatal errors and resending request.
784 */
785 /*
786 * Ugh: If a reconnect attempt just happened, rc_so would
787 * have changed. NULL indicates a failed attempt that has
788 * essentially shut down this mount point.
789 */
790 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) {
791 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
792 RPC_RETURN(EINTR);
793 }
794 so = rep->r_rpcclnt->rc_so;
795 if (!so) {
796 error = rpcclnt_reconnect(rep, td);
797 if (error) {
798 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
799 RPC_RETURN(error);
800 }
801 goto tryagain;
802 }
803 while (rep->r_flags & R_MUSTRESEND) {
804 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_TRYWAIT);
805 rpcstats.rpcretries++;
806 error = rpcclnt_send(so, rep->r_rpcclnt->rc_name, m, rep);
807 if (error) {
808 if (error == EINTR || error == ERESTART ||
809 (error = rpcclnt_reconnect(rep, td)) != 0) {
810 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
811 RPC_RETURN(error);
812 }
813 goto tryagain;
814 }
815 }
816 rpcclnt_sndunlock(&rep->r_rpcclnt->rc_flag);
817 if (sotype == SOCK_STREAM) {
818 aio.iov_base = (caddr_t) & len;
819 aio.iov_len = sizeof(u_int32_t);
820 auio.uio_iov = &aio;
821 auio.uio_iovcnt = 1;
822 auio.uio_segflg = UIO_SYSSPACE;
823 auio.uio_rw = UIO_READ;
824 auio.uio_offset = 0;
825 auio.uio_resid = sizeof(u_int32_t);
826 #ifdef __OpenBSD__
827 auio.uio_procp = td;
828 #else
829 auio.uio_td = td;
830 #endif
831 do {
832 rcvflg = MSG_WAITALL;
833 error = soreceive(so, NULL, &auio, NULL, NULL, &rcvflg);
834 if (error == EWOULDBLOCK && rep) {
835 if (rep->r_flags & R_SOFTTERM)
836 RPC_RETURN(EINTR);
837 }
838 } while (error == EWOULDBLOCK);
839 if (!error && auio.uio_resid > 0) {
840 log(LOG_INFO,
841 "short receive (%zu/%zu) from rpc server %s\n",
842 sizeof(u_int32_t) - auio.uio_resid,
843 sizeof(u_int32_t),
844 rep->r_rpcclnt->rc_prog->prog_name);
845 error = EPIPE;
846 }
847 if (error)
848 goto errout;
849 len = ntohl(len) & ~0x80000000;
850 /*
851 * This is SERIOUS! We are out of sync with the
852 * sender and forcing a disconnect/reconnect is all I
853 * can do.
854 */
855 if (len > RPC_MAXPACKET) {
856 log(LOG_ERR, "%s (%d) from rpc server %s\n",
857 "impossible packet length",
858 len,
859 rep->r_rpcclnt->rc_prog->prog_name);
860 error = EFBIG;
861 goto errout;
862 }
863 auio.uio_resid = len;
864 do {
865 rcvflg = MSG_WAITALL;
866 error = soreceive(so, NULL, &auio, mp, NULL, &rcvflg);
867 } while (error == EWOULDBLOCK || error == EINTR ||
868 error == ERESTART);
869 if (!error && auio.uio_resid > 0) {
870 log(LOG_INFO,
871 "short receive (%d/%d) from rpc server %s\n",
872 len - auio.uio_resid, len,
873 rep->r_rpcclnt->rc_prog->prog_name);
874 error = EPIPE;
875 }
876 } else {
877 /*
878 * NB: Since uio_resid is big, MSG_WAITALL is ignored
879 * and soreceive() will return when it has either a
880 * control msg or a data msg. We have no use for
881 * control msg., but must grab them and then throw
882 * them away so we know what is going on.
883 */
884 auio.uio_resid = len = 100000000; /* Anything Big */
885 #ifdef __OpenBSD__
886 auio.uio_procp = td;
887 #else
888 auio.uio_td = td;
889 #endif
890 do {
891 rcvflg = 0;
892 error = soreceive(so, NULL, &auio, mp, &control, &rcvflg);
893 if (control)
894 m_freem(control);
895 if (error == EWOULDBLOCK && rep) {
896 if (rep->r_flags & R_SOFTTERM)
897 RPC_RETURN(EINTR);
898 }
899 } while (error == EWOULDBLOCK ||
900 (!error && *mp == NULL && control));
901 if ((rcvflg & MSG_EOR) == 0)
902 printf("Egad!!\n");
903 if (!error && *mp == NULL)
904 error = EPIPE;
905 len -= auio.uio_resid;
906 }
907 errout:
908 if (error && error != EINTR && error != ERESTART) {
909 m_freem(*mp);
910 *mp = (struct mbuf *) 0;
911 if (error != EPIPE)
912 log(LOG_INFO,
913 "receive error %d from rpc server %s\n",
914 error,
915 rep->r_rpcclnt->rc_prog->prog_name);
916 error = rpcclnt_sndlock(&rep->r_rpcclnt->rc_flag, rep);
917 if (!error)
918 error = rpcclnt_reconnect(rep, td);
919 if (!error)
920 goto tryagain;
921 }
922 } else {
923 if ((so = rep->r_rpcclnt->rc_so) == NULL)
924 RPC_RETURN(EACCES);
925 if (so->so_state & SS_ISCONNECTED)
926 getnam = NULL;
927 else
928 getnam = aname;
929 auio.uio_resid = len = 1000000;
930 #ifdef __OpenBSD__
931 auio.uio_procp = td;
932 #else
933 auio.uio_td = td;
934 #endif
935
936 do {
937 rcvflg = 0;
938 error = soreceive(so, getnam, &auio, mp, NULL, &rcvflg);
939 RPCDEBUG("soreceivce returns %d", error);
940 if (error == EWOULDBLOCK && (rep->r_flags & R_SOFTTERM)) {
941 RPCDEBUG("wouldblock && softerm -> EINTR");
942 RPC_RETURN(EINTR);
943 }
944 } while (error == EWOULDBLOCK);
945 len -= auio.uio_resid;
946 }
947 if (error) {
948 m_freem(*mp);
949 *mp = NULL;
950 } else {
951 /*
952 * Search for any mbufs that are not a multiple of 4 bytes
953 * long or with m_data not longword aligned. These could
954 * cause pointer alignment problems, so copy them to well
955 * aligned mbufs.
956 */
957 rpcclnt_realign(mp, 5 * RPCX_UNSIGNED);
958 }
959 RPC_RETURN(error);
960 }
961
962
963 /*
964 * Implement receipt of reply on a socket. We must search through the list of
965 * received datagrams matching them with outstanding requests using the xid,
966 * until ours is found.
967 */
968 /* ARGSUSED */
969 static int
970 rpcclnt_reply(myrep, td)
971 struct rpctask *myrep;
972 RPC_EXEC_CTX td;
973 {
974 struct rpctask *rep;
975 struct rpcclnt *rpc = myrep->r_rpcclnt;
976 int32_t t1;
977 struct mbuf *mrep, *md;
978 #ifdef __OpenBSD__
979 struct mbuf *nam;
980 #else
981 struct sockaddr *nam;
982 #endif
983 u_int32_t rxid, *tl;
984 caddr_t dpos, cp2;
985 int error;
986
987 /*
988 * Loop around until we get our own reply
989 */
990 for (;;) {
991 /*
992 * Lock against other receivers so that I don't get stuck in
993 * sbwait() after someone else has received my reply for me.
994 * Also necessary for connection based protocols to avoid
995 * race conditions during a reconnect.
996 */
997 error = rpcclnt_rcvlock(myrep);
998 if (error)
999 RPC_RETURN(error);
1000 /* Already received, bye bye */
1001 if (myrep->r_mrep != NULL) {
1002 rpcclnt_rcvunlock(&rpc->rc_flag);
1003 RPC_RETURN(0);
1004 }
1005 /*
1006 * Get the next Rpc reply off the socket
1007 */
1008 error = rpcclnt_receive(myrep, &nam, &mrep, td);
1009
1010 rpcclnt_rcvunlock(&rpc->rc_flag);
1011
1012 if (error) {
1013 /*
1014 * Ignore routing errors on connectionless
1015 * protocols??
1016 */
1017 if (RPCIGNORE_SOERROR(rpc->rc_soflags, error)) {
1018 rpc->rc_so->so_error = 0;
1019 if (myrep->r_flags & R_GETONEREP)
1020 RPC_RETURN(0);
1021 RPCDEBUG("ingoring routing error on connectionless protocol.");
1022 continue;
1023 }
1024 RPC_RETURN(error);
1025 }
1026 #ifdef __OpenBSD__
1027 if (nam)
1028 m_freem(nam);
1029 #else
1030 if (nam)
1031 FREE(nam, M_SONAME);
1032 #endif
1033
1034 /*
1035 * Get the xid and check that it is an rpc reply
1036 */
1037 md = mrep;
1038 dpos = mtod(md, caddr_t);
1039 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1040 rxid = *tl++;
1041 if (*tl != rpc_reply) {
1042 rpcstats.rpcinvalid++;
1043 m_freem(mrep);
1044 rpcmout:
1045 if (myrep->r_flags & R_GETONEREP)
1046 RPC_RETURN(0);
1047 continue;
1048 }
1049 /*
1050 * Loop through the request list to match up the reply Iff no
1051 * match, just drop the datagram
1052 */
1053 TAILQ_FOREACH(rep, &rpctask_q, r_chain) {
1054 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
1055 /* Found it.. */
1056 rep->r_mrep = mrep;
1057 rep->r_md = md;
1058 rep->r_dpos = dpos;
1059
1060 /*
1061 * Update congestion window. Do the additive
1062 * increase of one rpc/rtt.
1063 */
1064 if (rpc->rc_cwnd <= rpc->rc_sent) {
1065 rpc->rc_cwnd +=
1066 (RPC_CWNDSCALE * RPC_CWNDSCALE +
1067 (rpc->rc_cwnd >> 1)) / rpc->rc_cwnd;
1068 if (rpc->rc_cwnd > RPC_MAXCWND)
1069 rpc->rc_cwnd = RPC_MAXCWND;
1070 }
1071 rep->r_flags &= ~R_SENT;
1072 rpc->rc_sent -= RPC_CWNDSCALE;
1073 /*
1074 * Update rtt using a gain of 0.125 on the
1075 * mean and a gain of 0.25 on the deviation.
1076 */
1077 if (rep->r_flags & R_TIMING) {
1078 /*
1079 * Since the timer resolution of
1080 * NFS_HZ is so course, it can often
1081 * result in r_rtt == 0. Since r_rtt
1082 * == N means that the actual rtt is
1083 * between N+dt and N+2-dt ticks, add
1084 * 1.
1085 */
1086 t1 = rep->r_rtt + 1;
1087 t1 -= (RPC_SRTT(rpc, rep) >> 3);
1088 RPC_SRTT(rpc, rep) += t1;
1089 if (t1 < 0)
1090 t1 = -t1;
1091 t1 -= (RPC_SDRTT(rpc, rep) >> 2);
1092 RPC_SDRTT(rpc, rep) += t1;
1093 }
1094 rpc->rc_timeouts = 0;
1095 break;
1096 }
1097 }
1098 /*
1099 * If not matched to a request, drop it. If it's mine, get
1100 * out.
1101 */
1102 if (rep == 0) {
1103 rpcstats.rpcunexpected++;
1104 RPCDEBUG("rpc reply not matched\n");
1105 m_freem(mrep);
1106 } else if (rep == myrep) {
1107 if (rep->r_mrep == NULL)
1108 panic("rpcreply nil");
1109 RPC_RETURN(0);
1110 }
1111 if (myrep->r_flags & R_GETONEREP)
1112 RPC_RETURN(0);
1113 }
1114 }
1115
1116 /* XXX: ignores tryagain! */
1117 /*
1118 * code from nfs_request - goes something like this - fill in task struct -
1119 * links task into list - calls nfs_send() for first transmit - calls
1120 * nfs_receive() to get reply - fills in reply (which should be initialized
1121 * prior to calling), which is valid when 0 is returned and is NEVER freed in
1122 * this function
1123 *
1124 * always frees the request header, but NEVER frees 'mrest'
1125 *
1126 */
1127 /*
1128 * ruthtype
1129 * pcclnt_setauth() should be used before calling this. EAUTH is returned if
1130 * authentication fails.
1131 */
1132 /*
1133 * note that reply->result_* are invalid unless reply->type ==
1134 * RPC_MSGACCEPTED and reply->status == RPC_SUCCESS and that reply->verf_*
1135 * are invalid unless reply->type == RPC_MSGACCEPTED
1136 */
1137 int
1138 rpcclnt_request(rpc, mrest, procnum, td, cred, reply)
1139 struct rpcclnt *rpc;
1140 struct mbuf *mrest;
1141 int procnum;
1142 RPC_EXEC_CTX td;
1143 struct ucred *cred;
1144 struct rpc_reply *reply;
1145 {
1146 struct mbuf *m, *mrep;
1147 struct rpctask *task;
1148 u_int32_t *tl;
1149 struct mbuf *md, *mheadend;
1150 caddr_t dpos, cp2;
1151 int t1, s, error = 0, mrest_len;
1152 u_int32_t xid;
1153
1154 #ifdef __OpenBSD__
1155 task = pool_get(&rpctask_pool, PR_WAITOK);
1156 #else
1157 MALLOC(task, struct rpctask *, sizeof(struct rpctask), M_RPC, (M_WAITOK | M_ZERO));
1158 #endif
1159
1160 task->r_rpcclnt = rpc;
1161 task->r_procnum = procnum;
1162 task->r_td = td;
1163
1164 mrest_len = m_length(mrest, NULL);
1165
1166 m = rpcclnt_buildheader(rpc, procnum, mrest, mrest_len, &xid, &mheadend,
1167 cred);
1168
1169 /*
1170 * For stream protocols, insert a Sun RPC Record Mark.
1171 */
1172 if (rpc->rc_sotype == SOCK_STREAM) {
1173 M_PREPEND(m, RPCX_UNSIGNED, M_TRYWAIT);
1174 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1175 (m->m_pkthdr.len - RPCX_UNSIGNED));
1176 }
1177 task->r_mreq = m;
1178 task->r_xid = xid;
1179
1180 if (rpc->rc_flag & RPCCLNT_SOFT)
1181 task->r_retry = rpc->rc_retry;
1182 else
1183 task->r_retry = RPC_MAXREXMIT + 1; /* past clip limit */
1184 task->r_rtt = task->r_rexmit = 0;
1185
1186 if (rpcclnt_proct(rpc, procnum) > 0)
1187 task->r_flags = R_TIMING;
1188 else
1189 task->r_flags = 0;
1190 task->r_mrep = NULL;
1191
1192 /*
1193 * Do the client side RPC.
1194 */
1195 rpcstats.rpcrequests++;
1196
1197 /*
1198 * Chain request into list of outstanding requests. Be sure to put it
1199 * LAST so timer finds oldest requests first.
1200 */
1201 s = splsoftclock();
1202 if (TAILQ_EMPTY(&rpctask_q))
1203 callout_reset(&rpcclnt_callout, rpcclnt_ticks, rpcclnt_timer,
1204 NULL);
1205 TAILQ_INSERT_TAIL(&rpctask_q, task, r_chain);
1206
1207 /*
1208 * If backing off another request or avoiding congestion, don't send
1209 * this one now but let timer do it. If not timing a request, do it
1210 * now.
1211 */
1212 if (rpc->rc_so && (rpc->rc_sotype != SOCK_DGRAM ||
1213 (rpc->rc_flag & RPCCLNT_DUMBTIMR) ||
1214 rpc->rc_sent < rpc->rc_cwnd)) {
1215 splx(s);
1216
1217 if (rpc->rc_soflags & PR_CONNREQUIRED)
1218 error = rpcclnt_sndlock(&rpc->rc_flag, task);
1219 if (!error) {
1220 error = rpcclnt_send(rpc->rc_so, rpc->rc_name,
1221 m_copym(m, 0, M_COPYALL, M_TRYWAIT),
1222 task);
1223 if (rpc->rc_soflags & PR_CONNREQUIRED)
1224 rpcclnt_sndunlock(&rpc->rc_flag);
1225 }
1226 if (!error && (task->r_flags & R_MUSTRESEND) == 0) {
1227 rpc->rc_sent += RPC_CWNDSCALE;
1228 task->r_flags |= R_SENT;
1229 }
1230 } else {
1231 splx(s);
1232 task->r_rtt = -1;
1233 }
1234
1235 /*
1236 * Wait for the reply from our send or the timer's.
1237 */
1238 if (!error || error == EPIPE)
1239 error = rpcclnt_reply(task, td);
1240
1241 /*
1242 * RPC done, unlink the request.
1243 */
1244 s = splsoftclock();
1245 TAILQ_REMOVE(&rpctask_q, task, r_chain);
1246 if (TAILQ_EMPTY(&rpctask_q))
1247 callout_stop(&rpcclnt_callout);
1248 splx(s);
1249
1250 /*
1251 * Decrement the outstanding request count.
1252 */
1253 if (task->r_flags & R_SENT) {
1254 task->r_flags &= ~R_SENT; /* paranoia */
1255 rpc->rc_sent -= RPC_CWNDSCALE;
1256 }
1257 /*
1258 * If there was a successful reply and a tprintf msg. tprintf a
1259 * response.
1260 */
1261 if (!error && (task->r_flags & R_TPRINTFMSG)) {
1262 mtx_lock(&Giant);
1263 rpcclnt_msg(task->r_td, rpc->rc_prog->prog_name,
1264 "is alive again");
1265 mtx_unlock(&Giant);
1266 }
1267
1268 /* free request header (leaving mrest) */
1269 mheadend->m_next = NULL;
1270 m_freem(task->r_mreq);
1271
1272 /* initialize reply */
1273 reply->mrep = task->r_mrep;
1274 reply->verf_md = NULL;
1275 reply->result_md = NULL;
1276
1277 mrep = task->r_mrep;
1278 md = task->r_md;
1279 dpos = task->r_dpos;
1280
1281 /* task structure is no longer needed */
1282 #ifdef __OpenBSD__
1283 pool_put(&rpctask_pool, task);
1284 #else
1285 FREE(task, M_RPC);
1286 #endif
1287
1288 if (error)
1289 goto rpcmout;
1290
1291 /*
1292 * break down the rpc header and check if ok
1293 */
1294
1295 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1296 reply->stat.type = fxdr_unsigned(u_int32_t, *tl);
1297
1298 if (reply->stat.type == RPC_MSGDENIED) {
1299 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1300 reply->stat.status = fxdr_unsigned(u_int32_t, *tl);
1301
1302 switch (reply->stat.status) {
1303 case RPC_MISMATCH:
1304 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1305 reply->stat.mismatch_info.low = fxdr_unsigned(u_int32_t, *tl++);
1306 reply->stat.mismatch_info.high = fxdr_unsigned(u_int32_t, *tl);
1307 error = EOPNOTSUPP;
1308 break;
1309 case RPC_AUTHERR:
1310 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1311 reply->stat.autherr = fxdr_unsigned(u_int32_t, *tl);
1312 error = EACCES;
1313 break;
1314 default:
1315 error = EBADRPC;
1316 break;
1317 }
1318 goto rpcmout;
1319 } else if (reply->stat.type != RPC_MSGACCEPTED) {
1320 error = EBADRPC;
1321 goto rpcmout;
1322 }
1323
1324 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1325
1326 reply->verf_md = md;
1327 reply->verf_dpos = dpos;
1328
1329 reply->verf_type = fxdr_unsigned(u_int32_t, *tl++);
1330 reply->verf_size = fxdr_unsigned(u_int32_t, *tl);
1331
1332 if (reply->verf_size != 0)
1333 rpcm_adv(rpcm_rndup(reply->verf_size));
1334
1335 rpcm_dissect(tl, u_int32_t *, RPCX_UNSIGNED);
1336 reply->stat.status = fxdr_unsigned(u_int32_t, *tl);
1337
1338 if (reply->stat.status == RPC_SUCCESS) {
1339 if ((uint32_t)(dpos - mtod(md, caddr_t)) >= md->m_len) {
1340 RPCDEBUG("where is the next mbuf?");
1341 RPCDEBUG("%d -> %d",
1342 (int)(dpos - mtod(md, caddr_t)), md->m_len);
1343 if (md->m_next == NULL) {
1344 error = EBADRPC;
1345 goto rpcmout;
1346 } else {
1347 reply->result_md = md->m_next;
1348 reply->result_dpos = mtod(reply->result_md,
1349 caddr_t);
1350 }
1351 } else {
1352 reply->result_md = md;
1353 reply->result_dpos = dpos;
1354 }
1355 } else if (reply->stat.status == RPC_PROGMISMATCH) {
1356 rpcm_dissect(tl, u_int32_t *, 2 * RPCX_UNSIGNED);
1357 reply->stat.mismatch_info.low = fxdr_unsigned(u_int32_t, *tl++);
1358 reply->stat.mismatch_info.high = fxdr_unsigned(u_int32_t, *tl);
1359 error = EOPNOTSUPP;
1360 goto rpcmout;
1361 } else {
1362 error = EPROTONOSUPPORT;
1363 goto rpcmout;
1364 }
1365 error = 0;
1366
1367 rpcmout:
1368 RPC_RETURN(error);
1369 }
1370
1371
1372 /*
1373 * Nfs timer routine Scan the nfsreq list and retranmit any requests that
1374 * have timed out To avoid retransmission attempts on STREAM sockets (in the
1375 * future) make sure to set the r_retry field to 0 (implies nm_retry == 0).
1376 */
1377 void
1378 rpcclnt_timer(arg)
1379 void *arg;
1380 {
1381 #ifdef __OpenBSD__
1382 struct timeout *to = (struct timeout *) arg;
1383 #endif
1384 struct rpctask *rep;
1385 struct mbuf *m;
1386 struct socket *so;
1387 struct rpcclnt *rpc;
1388 int timeo;
1389 int s, error;
1390
1391 #ifndef __OpenBSD__
1392 struct thread *td = curthread;
1393 #endif
1394
1395 #if __OpenBSD__
1396 s = splsoftnet();
1397 #else
1398 s = splnet();
1399 #endif
1400 mtx_lock(&Giant); /* rpc_msg -> tprintf */
1401 TAILQ_FOREACH(rep, &rpctask_q, r_chain) {
1402 rpc = rep->r_rpcclnt;
1403 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
1404 continue;
1405 if (rpcclnt_sigintr(rpc, rep, rep->r_td)) {
1406 rep->r_flags |= R_SOFTTERM;
1407 continue;
1408 }
1409 if (rep->r_rtt >= 0) {
1410 rep->r_rtt++;
1411 if (rpc->rc_flag & RPCCLNT_DUMBTIMR)
1412 timeo = rpc->rc_timeo;
1413 else
1414 timeo = RPC_RTO(rpc, rpcclnt_proct(rep->r_rpcclnt,
1415 rep->r_procnum));
1416 if (rpc->rc_timeouts > 0)
1417 timeo *= rpcclnt_backoff[rpc->rc_timeouts - 1];
1418 if (rep->r_rtt <= timeo)
1419 continue;
1420 if (rpc->rc_timeouts < 8)
1421 rpc->rc_timeouts++;
1422 }
1423 /*
1424 * Check for server not responding
1425 */
1426 if ((rep->r_flags & R_TPRINTFMSG) == 0 &&
1427 rep->r_rexmit > rpc->rc_deadthresh) {
1428 rpcclnt_msg(rep->r_td, rpc->rc_prog->prog_name,
1429 "not responding");
1430 rep->r_flags |= R_TPRINTFMSG;
1431 }
1432 if (rep->r_rexmit >= rep->r_retry) { /* too many */
1433 rpcstats.rpctimeouts++;
1434 rep->r_flags |= R_SOFTTERM;
1435 continue;
1436 }
1437 if (rpc->rc_sotype != SOCK_DGRAM) {
1438 if (++rep->r_rexmit > RPC_MAXREXMIT)
1439 rep->r_rexmit = RPC_MAXREXMIT;
1440 continue;
1441 }
1442 if ((so = rpc->rc_so) == NULL)
1443 continue;
1444
1445 /*
1446 * If there is enough space and the window allows.. Resend it
1447 * Set r_rtt to -1 in case we fail to send it now.
1448 */
1449 rep->r_rtt = -1;
1450 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
1451 ((rpc->rc_flag & RPCCLNT_DUMBTIMR) ||
1452 (rep->r_flags & R_SENT) ||
1453 rpc->rc_sent < rpc->rc_cwnd) &&
1454 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))) {
1455 if ((rpc->rc_flag & RPCCLNT_NOCONN) == 0)
1456 error = (*so->so_proto->pr_usrreqs->pru_send) (so, 0, m,
1457 NULL, NULL, td);
1458 else
1459 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, rpc->rc_name, NULL, td);
1460 if (error) {
1461 if (RPCIGNORE_SOERROR(rpc->rc_soflags, error))
1462 so->so_error = 0;
1463 } else {
1464 /*
1465 * Iff first send, start timing else turn
1466 * timing off, backoff timer and divide
1467 * congestion window by 2.
1468 */
1469 if (rep->r_flags & R_SENT) {
1470 rep->r_flags &= ~R_TIMING;
1471 if (++rep->r_rexmit > RPC_MAXREXMIT)
1472 rep->r_rexmit = RPC_MAXREXMIT;
1473 rpc->rc_cwnd >>= 1;
1474 if (rpc->rc_cwnd < RPC_CWNDSCALE)
1475 rpc->rc_cwnd = RPC_CWNDSCALE;
1476 rpcstats.rpcretries++;
1477 } else {
1478 rep->r_flags |= R_SENT;
1479 rpc->rc_sent += RPC_CWNDSCALE;
1480 }
1481 rep->r_rtt = 0;
1482 }
1483 }
1484 }
1485 mtx_unlock(&Giant); /* rpc_msg -> tprintf */
1486 splx(s);
1487
1488 #ifdef __OpenBSD__
1489 timeout_add(rpcclnt_timer, to, rpcclnt_ticks);
1490 #else
1491 callout_reset(&rpcclnt_callout, rpcclnt_ticks, rpcclnt_timer, NULL);
1492 #endif
1493 }
1494
1495 /*
1496 * Test for a termination condition pending on the process. This is used for
1497 * RPCCLNT_INT mounts.
1498 */
1499 int
1500 rpcclnt_sigintr(rpc, task, pr)
1501 struct rpcclnt *rpc;
1502 struct rpctask *task;
1503 RPC_EXEC_CTX pr;
1504 {
1505 struct proc *p;
1506
1507 sigset_t tmpset;
1508
1509 if (rpc == NULL)
1510 return EFAULT;
1511
1512 /* XXX deal with forced unmounts */
1513
1514 if (task && (task->r_flags & R_SOFTTERM))
1515 RPC_RETURN(EINTR);
1516
1517 if (!(rpc->rc_flag & RPCCLNT_INT))
1518 RPC_RETURN(0);
1519
1520 if (pr == NULL)
1521 return (0);
1522
1523 #ifdef __OpenBSD__
1524 p = pr;
1525 if (p && p->p_siglist &&
1526 (((p->p_siglist & ~p->p_sigmask) & ~p->p_sigignore) &
1527 RPCINT_SIGMASK))
1528 RPC_RETURN(EINTR);
1529 #else
1530 p = pr->td_proc;
1531 PROC_LOCK(p);
1532 tmpset = p->p_siglist;
1533 SIGSETNAND(tmpset, pr->td_sigmask);
1534 mtx_lock(&p->p_sigacts->ps_mtx);
1535 SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
1536 mtx_unlock(&p->p_sigacts->ps_mtx);
1537 if (SIGNOTEMPTY(p->p_siglist) && RPCCLNTINT_SIGMASK(tmpset)) {
1538 PROC_UNLOCK(p);
1539 RPC_RETURN(EINTR);
1540 }
1541 PROC_UNLOCK(p);
1542 #endif
1543 RPC_RETURN(0);
1544 }
1545
1546 /*
1547 * Lock a socket against others. Necessary for STREAM sockets to ensure you
1548 * get an entire rpc request/reply and also to avoid race conditions between
1549 * the processes with nfs requests in progress when a reconnect is necessary.
1550 */
1551 static int
1552 rpcclnt_sndlock(flagp, task)
1553 int *flagp;
1554 struct rpctask *task;
1555 {
1556 RPC_EXEC_CTX p;
1557 int slpflag = 0, slptimeo = 0;
1558
1559 p = task->r_td;
1560 if (task->r_rpcclnt->rc_flag & RPCCLNT_INT)
1561 slpflag = PCATCH;
1562 while (*flagp & RPCCLNT_SNDLOCK) {
1563 if (rpcclnt_sigintr(task->r_rpcclnt, task, p))
1564 RPC_RETURN(EINTR);
1565 *flagp |= RPCCLNT_WANTSND;
1566 (void)tsleep((caddr_t) flagp, slpflag | (PZERO - 1), "rpcsndlck",
1567 slptimeo);
1568 if (slpflag == PCATCH) {
1569 slpflag = 0;
1570 slptimeo = 2 * hz;
1571 }
1572 }
1573 *flagp |= RPCCLNT_SNDLOCK;
1574 RPC_RETURN(0);
1575 }
1576
1577 /*
1578 * Unlock the stream socket for others.
1579 */
1580 static void
1581 rpcclnt_sndunlock(flagp)
1582 int *flagp;
1583 {
1584
1585 if ((*flagp & RPCCLNT_SNDLOCK) == 0)
1586 panic("rpc sndunlock");
1587 *flagp &= ~RPCCLNT_SNDLOCK;
1588 if (*flagp & RPCCLNT_WANTSND) {
1589 *flagp &= ~RPCCLNT_WANTSND;
1590 wakeup((caddr_t) flagp);
1591 }
1592 }
1593
1594 static int
1595 rpcclnt_rcvlock(task)
1596 struct rpctask *task;
1597 {
1598 int *flagp = &task->r_rpcclnt->rc_flag;
1599 int slpflag, slptimeo = 0;
1600
1601 if (*flagp & RPCCLNT_INT)
1602 slpflag = PCATCH;
1603 else
1604 slpflag = 0;
1605 while (*flagp & RPCCLNT_RCVLOCK) {
1606 if (rpcclnt_sigintr(task->r_rpcclnt, task, task->r_td))
1607 RPC_RETURN(EINTR);
1608 *flagp |= RPCCLNT_WANTRCV;
1609 (void)tsleep((caddr_t) flagp, slpflag | (PZERO - 1), "rpcrcvlk",
1610 slptimeo);
1611 if (slpflag == PCATCH) {
1612 slpflag = 0;
1613 slptimeo = 2 * hz;
1614 }
1615 }
1616 *flagp |= RPCCLNT_RCVLOCK;
1617 RPC_RETURN(0);
1618 }
1619
1620 /*
1621 * Unlock the stream socket for others.
1622 */
1623 static void
1624 rpcclnt_rcvunlock(flagp)
1625 int *flagp;
1626 {
1627
1628 if ((*flagp & RPCCLNT_RCVLOCK) == 0)
1629 panic("nfs rcvunlock");
1630 *flagp &= ~RPCCLNT_RCVLOCK;
1631 if (*flagp & RPCCLNT_WANTRCV) {
1632 *flagp &= ~RPCCLNT_WANTRCV;
1633 wakeup((caddr_t) flagp);
1634 }
1635 }
1636
1637 #if 0
1638 /*
1639 * Check for badly aligned mbuf data areas and realign data in an mbuf list
1640 * by copying the data areas up, as required.
1641 */
1642 void
1643 rpcclnt_realign(m, hsiz)
1644 struct mbuf *m;
1645 int hsiz;
1646 {
1647 struct mbuf *m2;
1648 int siz, mlen, olen;
1649 caddr_t tcp, fcp;
1650 struct mbuf *mnew;
1651
1652 while (m) {
1653 /*
1654 * This never happens for UDP, rarely happens for TCP but
1655 * frequently happens for iso transport.
1656 */
1657 if ((m->m_len & 0x3) || (mtod(m, long)&0x3)) {
1658 olen = m->m_len;
1659 fcp = mtod(m, caddr_t);
1660 if ((long)fcp & 0x3) {
1661 if (m->m_flags & M_PKTHDR)
1662 m_tag_delete_chain(m, NULL);
1663 m->m_flags &= ~M_PKTHDR;
1664 if (m->m_flags & M_EXT)
1665 m->m_data = m->m_ext.ext_buf +
1666 ((m->m_ext.ext_size - olen) & ~0x3);
1667 else
1668 m->m_data = m->m_dat;
1669 }
1670 m->m_len = 0;
1671 tcp = mtod(m, caddr_t);
1672 mnew = m;
1673 m2 = m->m_next;
1674
1675 /*
1676 * If possible, only put the first invariant part of
1677 * the RPC header in the first mbuf.
1678 */
1679 mlen = M_TRAILINGSPACE(m);
1680 if (olen <= hsiz && mlen > hsiz)
1681 mlen = hsiz;
1682
1683 /* Loop through the mbuf list consolidating data. */
1684 while (m) {
1685 while (olen > 0) {
1686 if (mlen == 0) {
1687 if (m2->m_flags & M_PKTHDR)
1688 m_tag_delete_chain(m2, NULL);
1689 m2->m_flags &= ~M_PKTHDR;
1690 if (m2->m_flags & M_EXT)
1691 m2->m_data = m2->m_ext.ext_buf;
1692 else
1693 m2->m_data = m2->m_dat;
1694 m2->m_len = 0;
1695 mlen = M_TRAILINGSPACE(m2);
1696 tcp = mtod(m2, caddr_t);
1697 mnew = m2;
1698 m2 = m2->m_next;
1699 }
1700 siz = min(mlen, olen);
1701 if (tcp != fcp)
1702 bcopy(fcp, tcp, siz);
1703 mnew->m_len += siz;
1704 mlen -= siz;
1705 olen -= siz;
1706 tcp += siz;
1707 fcp += siz;
1708 }
1709 m = m->m_next;
1710 if (m) {
1711 olen = m->m_len;
1712 fcp = mtod(m, caddr_t);
1713 }
1714 }
1715
1716 /*
1717 * Finally, set m_len == 0 for any trailing mbufs
1718 * that have been copied out of.
1719 */
1720 while (m2) {
1721 m2->m_len = 0;
1722 m2 = m2->m_next;
1723 }
1724 return;
1725 }
1726 m = m->m_next;
1727 }
1728 }
1729 #else
1730 static void
1731 rpcclnt_realign(struct mbuf **pm, int hsiz)
1732 {
1733 struct mbuf *m;
1734 struct mbuf *n = NULL;
1735 int off = 0;
1736
1737 RPCDEBUG("in rpcclnt_realign()");
1738
1739 while ((m = *pm) != NULL) {
1740 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
1741 MGET(n, M_TRYWAIT, MT_DATA);
1742 if (m->m_len >= MINCLSIZE) {
1743 MCLGET(n, M_TRYWAIT);
1744 }
1745 n->m_len = 0;
1746 break;
1747 }
1748 pm = &m->m_next;
1749 }
1750
1751 /*
1752 * If n is non-NULL, loop on m copying data, then replace the
1753 * portion of the chain that had to be realigned.
1754 */
1755 if (n != NULL) {
1756 while (m) {
1757 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
1758 off += m->m_len;
1759 m = m->m_next;
1760 }
1761 m_freem(*pm);
1762 *pm = n;
1763 }
1764
1765 RPCDEBUG("leave rpcclnt_realign()");
1766 }
1767 #endif
1768
1769 static int
1770 rpcclnt_msg(p, server, msg)
1771 RPC_EXEC_CTX p;
1772 const char *server;
1773 char *msg;
1774 {
1775 #ifdef __OpenBSD__
1776 tpr_t tpr;
1777 struct proc *pr = p;
1778
1779 if (p)
1780 tpr = tprintf_open(p);
1781 else
1782 tpr = NULL;
1783 tprintf(tpr, "rpc server %s: %s\n", server, msg);
1784 tprintf_close(tpr);
1785 RPC_RETURN(0);
1786 #else
1787 GIANT_REQUIRED;
1788
1789 tprintf(p ? p->td_proc : NULL, LOG_INFO,
1790 "nfs server %s: %s\n", server, msg);
1791 RPC_RETURN(0);
1792 #endif
1793 }
1794
1795 /*
1796 * Build the RPC header and fill in the authorization info. The authorization
1797 * string argument is only used when the credentials come from outside of the
1798 * kernel (AUTH_KERB). (likewise, the ucred is only used when inside the
1799 * kernel) Returns the head of the mbuf list.
1800 */
1801 static struct mbuf *
1802 rpcclnt_buildheader(rc, procid, mrest, mrest_len, xidp, mheadend, cred)
1803 struct rpcclnt *rc;
1804 int procid;
1805 struct mbuf *mrest;
1806 u_int32_t mrest_len;
1807 int *xidp;
1808 struct mbuf **mheadend;
1809 struct ucred * cred;
1810 {
1811 /* register */ struct mbuf *mb;
1812 register u_int32_t *tl;
1813 /* register */ caddr_t bpos;
1814 struct mbuf *mreq, *mb2;
1815 int error;
1816
1817 MGETHDR(mb, M_TRYWAIT, MT_DATA);
1818 if (6 * RPCX_UNSIGNED >= MINCLSIZE) {
1819 MCLGET(mb, M_TRYWAIT);
1820 } else if (6 * RPCX_UNSIGNED < MHLEN) {
1821 MH_ALIGN(mb, 6 * RPCX_UNSIGNED);
1822 } else {
1823 RPCDEBUG("mbuf too small");
1824 panic("cheap bailout");
1825 }
1826 mb->m_len = 0;
1827 mreq = mb;
1828 bpos = mtod(mb, caddr_t);
1829
1830 /*
1831 * First the RPC header.
1832 */
1833 rpcm_build(tl, u_int32_t *, 6 * RPCX_UNSIGNED);
1834
1835 /* Get a new (non-zero) xid */
1836 if ((rpcclnt_xid == 0) && (rpcclnt_xid_touched == 0)) {
1837 rpcclnt_xid = arc4random();
1838 rpcclnt_xid_touched = 1;
1839 } else {
1840 while ((*xidp = arc4random() % 256) == 0);
1841 rpcclnt_xid += *xidp;
1842 }
1843
1844 /* XXX: funky... */
1845 *tl++ = *xidp = txdr_unsigned(rpcclnt_xid);
1846
1847 *tl++ = rpc_call;
1848 *tl++ = rpc_vers;
1849 *tl++ = txdr_unsigned(rc->rc_prog->prog_id);
1850 *tl++ = txdr_unsigned(rc->rc_prog->prog_version);
1851 *tl++ = txdr_unsigned(procid);
1852
1853 if ((error = rpcauth_buildheader(rc->rc_auth, cred, &mb, &bpos))) {
1854 RPCDEBUG("rpcauth_buildheader failed %d", error);
1855 return NULL;
1856 }
1857
1858 mb->m_next = mrest;
1859 *mheadend = mb;
1860 mreq->m_pkthdr.len = m_length(mreq, NULL);
1861 mreq->m_pkthdr.rcvif = NULL;
1862 return (mreq);
1863 }
1864
1865 /*
1866 * Help break down an mbuf chain by setting the first siz bytes contiguous
1867 * pointed to by returned val. This is used by the macros rpcm_dissect and
1868 * rpcm_dissecton for tough cases. (The macros use the vars. dpos and dpos2)
1869 */
1870 static int
1871 rpcm_disct(mdp, dposp, siz, left, cp2)
1872 struct mbuf **mdp;
1873 caddr_t *dposp;
1874 int siz;
1875 int left;
1876 caddr_t *cp2;
1877 {
1878 struct mbuf *mp, *mp2;
1879 int siz2, xfer;
1880 caddr_t p;
1881
1882 mp = *mdp;
1883 while (left == 0) {
1884 *mdp = mp = mp->m_next;
1885 if (mp == NULL)
1886 RPC_RETURN(EBADRPC);
1887 left = mp->m_len;
1888 *dposp = mtod(mp, caddr_t);
1889 }
1890 if (left >= siz) {
1891 *cp2 = *dposp;
1892 *dposp += siz;
1893 } else if (mp->m_next == NULL) {
1894 RPC_RETURN(EBADRPC);
1895 } else if (siz > MHLEN) {
1896 panic("rpc S too big");
1897 } else {
1898 MGET(mp2, M_TRYWAIT, MT_DATA);
1899 mp2->m_next = mp->m_next;
1900 mp->m_next = mp2;
1901 mp->m_len -= left;
1902 mp = mp2;
1903 *cp2 = p = mtod(mp, caddr_t);
1904 bcopy(*dposp, p, left); /* Copy what was left */
1905 siz2 = siz - left;
1906 p += left;
1907 mp2 = mp->m_next;
1908 /* Loop around copying up the siz2 bytes */
1909 while (siz2 > 0) {
1910 if (mp2 == NULL)
1911 RPC_RETURN(EBADRPC);
1912 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
1913 if (xfer > 0) {
1914 bcopy(mtod(mp2, caddr_t), p, xfer);
1915 RPCMADV(mp2, xfer);
1916 mp2->m_len -= xfer;
1917 p += xfer;
1918 siz2 -= xfer;
1919 }
1920 if (siz2 > 0)
1921 mp2 = mp2->m_next;
1922 }
1923 mp->m_len = siz;
1924 *mdp = mp2;
1925 *dposp = mtod(mp2, caddr_t);
1926 }
1927 RPC_RETURN(0);
1928 }
1929
1930
1931
1932 static u_int32_t
1933 rpcclnt_proct(rpc, procid)
1934 struct rpcclnt *rpc;
1935 u_int32_t procid;
1936 {
1937 if (rpc->rc_proctlen != 0 && rpc->rc_proct != NULL &&
1938 procid < rpc->rc_proctlen) {
1939 return rpc->rc_proct[procid];
1940 }
1941 return (0);
1942 }
1943
1944 static int
1945 rpc_adv(mdp, dposp, offs, left)
1946 struct mbuf **mdp;
1947 caddr_t *dposp;
1948 int offs;
1949 int left;
1950 {
1951 struct mbuf *m;
1952 int s;
1953
1954 m = *mdp;
1955 s = left;
1956 while (s < offs) {
1957 offs -= s;
1958 m = m->m_next;
1959 if (m == NULL)
1960 RPC_RETURN(EBADRPC);
1961 s = m->m_len;
1962 }
1963 *mdp = m;
1964 *dposp = mtod(m, caddr_t) + offs;
1965 RPC_RETURN(0);
1966 }
1967
1968 int
1969 rpcclnt_cancelreqs(rpc)
1970 struct rpcclnt *rpc;
1971 {
1972 struct rpctask *task;
1973 int i, s;
1974
1975 s = splnet();
1976 TAILQ_FOREACH(task, &rpctask_q, r_chain) {
1977 if (rpc != task->r_rpcclnt || task->r_mrep != NULL ||
1978 (task->r_flags & R_SOFTTERM))
1979 continue;
1980 rpcclnt_softterm(task);
1981 }
1982 splx(s);
1983
1984 for (i = 0; i < 30; i++) {
1985 s = splnet();
1986 TAILQ_FOREACH(task, &rpctask_q, r_chain) {
1987 if (rpc == task->r_rpcclnt)
1988 break;
1989 }
1990 splx(s);
1991 if (task == NULL)
1992 return (0);
1993 tsleep(&lbolt, PSOCK, "nfscancel", 0);
1994 }
1995 return (EBUSY);
1996 }
1997
1998 static void
1999 rpcclnt_softterm(struct rpctask * task)
2000 {
2001 task->r_flags |= R_SOFTTERM;
2002 if (task->r_flags & R_SENT) {
2003 task->r_rpcclnt->rc_sent -= RPC_CWNDSCALE;
2004 task->r_flags &= ~R_SENT;
2005 }
2006 }
2007
2008
2009 #ifndef __OpenBSD__
2010 /* called by rpcclnt_get() */
2011 void
2012 rpcclnt_create(struct rpcclnt ** rpc)
2013 {
2014 MALLOC(*rpc, struct rpcclnt *, sizeof(struct rpcclnt), M_RPC, M_WAITOK | M_ZERO);
2015 }
2016
2017 /* called by rpcclnt_put() */
2018 void
2019 rpcclnt_destroy(struct rpcclnt * rpc)
2020 {
2021 if (rpc != NULL) {
2022 FREE(rpc, M_RPC);
2023 } else {
2024 RPCDEBUG("attempting to free a NULL rpcclnt (not dereferenced)");
2025 }
2026 }
2027 #endif /* !__OpenBSD__ */
2028
2029
2030 /* XXX: add a lock around the auth structure in struct rpcclnt and make this
2031 * call safe for calling durring a connection */
2032 static int
2033 rpcauth_buildheader(struct rpc_auth * auth, struct ucred * cred, struct mbuf ** mhdr, caddr_t * bp)
2034 {
2035 size_t authsiz, verfsiz;
2036 uint32_t mlen, grpsiz;
2037 register struct mbuf *mb, *mb2;
2038 caddr_t bpos;
2039 register u_int32_t *tl;
2040 register int i;
2041
2042 if (auth == NULL || mhdr == NULL)
2043 return EFAULT;
2044
2045 switch (auth->auth_type) {
2046 case RPCAUTH_NULL:
2047 authsiz = 0;
2048 verfsiz = 0;
2049 break;
2050 case RPCAUTH_UNIX:
2051 authsiz = (5 + cred->cr_ngroups) * RPCX_UNSIGNED;
2052 verfsiz = 0;
2053 break;
2054 default:
2055 return EPROTONOSUPPORT;
2056 break;
2057 };
2058
2059 mlen = rpcm_rndup(authsiz) + rpcm_rndup(verfsiz) + 4 * RPCX_UNSIGNED;
2060
2061 mb = *mhdr;
2062 bpos = *bp;
2063
2064 rpcm_build(tl, u_int32_t *, mlen);
2065
2066 *bp = bpos;
2067 *mhdr = mb;
2068
2069 *tl++ = txdr_unsigned(auth->auth_type);
2070 *tl++ = txdr_unsigned(authsiz);
2071 switch (auth->auth_type) {
2072 case RPCAUTH_UNIX:
2073 *tl++ = 0;
2074 *tl++ = 0;
2075
2076 *tl++ = txdr_unsigned(cred->cr_uid);
2077 *tl++ = txdr_unsigned(cred->cr_groups[0]);
2078 grpsiz = cred->cr_ngroups;
2079 *tl++ = txdr_unsigned(grpsiz);
2080 /* XXX: groups[0] is already sent... */
2081 for (i = 0 ; i < grpsiz ; i++) {
2082 *tl++ = txdr_unsigned(cred->cr_groups[i]);
2083 }
2084
2085 /* null verification header */
2086 *tl++ = txdr_unsigned(RPCAUTH_NULL);
2087 *tl++ = 0;
2088 break;
2089 case RPCAUTH_NULL:
2090 /* just a null verf header */
2091 *tl++ = txdr_unsigned(RPCAUTH_NULL);
2092 *tl = 0;
2093 break;
2094 default:
2095 panic("inconsistent rpc auth type");
2096 break;
2097 }
2098
2099 return 0;
2100 }
Cache object: 589039e15bc99b7ab508530e82dc3bd0
|