FreeBSD/Linux Kernel Cross Reference
sys/rpc/svc.c
1 /* $NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2009, Sun Microsystems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 * - Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 * - Neither the name of Sun Microsystems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #if defined(LIBC_SCCS) && !defined(lint)
32 static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro";
33 static char *sccsid = "@(#)svc.c 2.4 88/08/11 4.0 RPCSRC";
34 #endif
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 /*
39 * svc.c, Server-side remote procedure call interface.
40 *
41 * There are two sets of procedures here. The xprt routines are
42 * for handling transport handles. The svc routines handle the
43 * list of service routines.
44 *
45 * Copyright (C) 1984, Sun Microsystems, Inc.
46 */
47
48 #include <sys/param.h>
49 #include <sys/lock.h>
50 #include <sys/kernel.h>
51 #include <sys/kthread.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/queue.h>
57 #include <sys/socketvar.h>
58 #include <sys/systm.h>
59 #include <sys/smp.h>
60 #include <sys/sx.h>
61 #include <sys/ucred.h>
62
63 #include <rpc/rpc.h>
64 #include <rpc/rpcb_clnt.h>
65 #include <rpc/replay.h>
66
67 #include <rpc/rpc_com.h>
68
69 #define SVC_VERSQUIET 0x0001 /* keep quiet about vers mismatch */
70 #define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET)
71
72 static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
73 char *);
74 static void svc_new_thread(SVCGROUP *grp);
75 static void xprt_unregister_locked(SVCXPRT *xprt);
76 static void svc_change_space_used(SVCPOOL *pool, int delta);
77 static bool_t svc_request_space_available(SVCPOOL *pool);
78
79 /* *************** SVCXPRT related stuff **************** */
80
81 static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS);
82 static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS);
83 static int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS);
84
85 SVCPOOL*
86 svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base)
87 {
88 SVCPOOL *pool;
89 SVCGROUP *grp;
90 int g;
91
92 pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
93
94 mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
95 pool->sp_name = name;
96 pool->sp_state = SVCPOOL_INIT;
97 pool->sp_proc = NULL;
98 TAILQ_INIT(&pool->sp_callouts);
99 TAILQ_INIT(&pool->sp_lcallouts);
100 pool->sp_minthreads = 1;
101 pool->sp_maxthreads = 1;
102 pool->sp_groupcount = 1;
103 for (g = 0; g < SVC_MAXGROUPS; g++) {
104 grp = &pool->sp_groups[g];
105 mtx_init(&grp->sg_lock, "sg_lock", NULL, MTX_DEF);
106 grp->sg_pool = pool;
107 grp->sg_state = SVCPOOL_ACTIVE;
108 TAILQ_INIT(&grp->sg_xlist);
109 TAILQ_INIT(&grp->sg_active);
110 LIST_INIT(&grp->sg_idlethreads);
111 grp->sg_minthreads = 1;
112 grp->sg_maxthreads = 1;
113 }
114
115 /*
116 * Don't use more than a quarter of mbuf clusters or more than
117 * 45Mb buffering requests.
118 */
119 pool->sp_space_high = nmbclusters * MCLBYTES / 4;
120 if (pool->sp_space_high > 45 << 20)
121 pool->sp_space_high = 45 << 20;
122 pool->sp_space_low = 2 * pool->sp_space_high / 3;
123
124 sysctl_ctx_init(&pool->sp_sysctl);
125 if (sysctl_base) {
126 SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
127 "minthreads", CTLTYPE_INT | CTLFLAG_RW,
128 pool, 0, svcpool_minthread_sysctl, "I",
129 "Minimal number of threads");
130 SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
131 "maxthreads", CTLTYPE_INT | CTLFLAG_RW,
132 pool, 0, svcpool_maxthread_sysctl, "I",
133 "Maximal number of threads");
134 SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
135 "threads", CTLTYPE_INT | CTLFLAG_RD,
136 pool, 0, svcpool_threads_sysctl, "I",
137 "Current number of threads");
138 SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
139 "groups", CTLFLAG_RD, &pool->sp_groupcount, 0,
140 "Number of thread groups");
141
142 SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
143 "request_space_used", CTLFLAG_RD,
144 &pool->sp_space_used, 0,
145 "Space in parsed but not handled requests.");
146
147 SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
148 "request_space_used_highest", CTLFLAG_RD,
149 &pool->sp_space_used_highest, 0,
150 "Highest space used since reboot.");
151
152 SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
153 "request_space_high", CTLFLAG_RW,
154 &pool->sp_space_high, 0,
155 "Maximum space in parsed but not handled requests.");
156
157 SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
158 "request_space_low", CTLFLAG_RW,
159 &pool->sp_space_low, 0,
160 "Low water mark for request space.");
161
162 SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
163 "request_space_throttled", CTLFLAG_RD,
164 &pool->sp_space_throttled, 0,
165 "Whether nfs requests are currently throttled");
166
167 SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
168 "request_space_throttle_count", CTLFLAG_RD,
169 &pool->sp_space_throttle_count, 0,
170 "Count of times throttling based on request space has occurred");
171 }
172
173 return pool;
174 }
175
176 void
177 svcpool_destroy(SVCPOOL *pool)
178 {
179 SVCGROUP *grp;
180 SVCXPRT *xprt, *nxprt;
181 struct svc_callout *s;
182 struct svc_loss_callout *sl;
183 struct svcxprt_list cleanup;
184 int g;
185
186 TAILQ_INIT(&cleanup);
187
188 for (g = 0; g < SVC_MAXGROUPS; g++) {
189 grp = &pool->sp_groups[g];
190 mtx_lock(&grp->sg_lock);
191 while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) {
192 xprt_unregister_locked(xprt);
193 TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
194 }
195 mtx_unlock(&grp->sg_lock);
196 }
197 TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
198 SVC_RELEASE(xprt);
199 }
200
201 mtx_lock(&pool->sp_lock);
202 while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) {
203 mtx_unlock(&pool->sp_lock);
204 svc_unreg(pool, s->sc_prog, s->sc_vers);
205 mtx_lock(&pool->sp_lock);
206 }
207 while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) {
208 mtx_unlock(&pool->sp_lock);
209 svc_loss_unreg(pool, sl->slc_dispatch);
210 mtx_lock(&pool->sp_lock);
211 }
212 mtx_unlock(&pool->sp_lock);
213
214 for (g = 0; g < SVC_MAXGROUPS; g++) {
215 grp = &pool->sp_groups[g];
216 mtx_destroy(&grp->sg_lock);
217 }
218 mtx_destroy(&pool->sp_lock);
219
220 if (pool->sp_rcache)
221 replay_freecache(pool->sp_rcache);
222
223 sysctl_ctx_free(&pool->sp_sysctl);
224 free(pool, M_RPC);
225 }
226
227 /*
228 * Sysctl handler to get the present thread count on a pool
229 */
230 static int
231 svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS)
232 {
233 SVCPOOL *pool;
234 int threads, error, g;
235
236 pool = oidp->oid_arg1;
237 threads = 0;
238 mtx_lock(&pool->sp_lock);
239 for (g = 0; g < pool->sp_groupcount; g++)
240 threads += pool->sp_groups[g].sg_threadcount;
241 mtx_unlock(&pool->sp_lock);
242 error = sysctl_handle_int(oidp, &threads, 0, req);
243 return (error);
244 }
245
246 /*
247 * Sysctl handler to set the minimum thread count on a pool
248 */
249 static int
250 svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)
251 {
252 SVCPOOL *pool;
253 int newminthreads, error, g;
254
255 pool = oidp->oid_arg1;
256 newminthreads = pool->sp_minthreads;
257 error = sysctl_handle_int(oidp, &newminthreads, 0, req);
258 if (error == 0 && newminthreads != pool->sp_minthreads) {
259 if (newminthreads > pool->sp_maxthreads)
260 return (EINVAL);
261 mtx_lock(&pool->sp_lock);
262 pool->sp_minthreads = newminthreads;
263 for (g = 0; g < pool->sp_groupcount; g++) {
264 pool->sp_groups[g].sg_minthreads = max(1,
265 pool->sp_minthreads / pool->sp_groupcount);
266 }
267 mtx_unlock(&pool->sp_lock);
268 }
269 return (error);
270 }
271
272 /*
273 * Sysctl handler to set the maximum thread count on a pool
274 */
275 static int
276 svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)
277 {
278 SVCPOOL *pool;
279 int newmaxthreads, error, g;
280
281 pool = oidp->oid_arg1;
282 newmaxthreads = pool->sp_maxthreads;
283 error = sysctl_handle_int(oidp, &newmaxthreads, 0, req);
284 if (error == 0 && newmaxthreads != pool->sp_maxthreads) {
285 if (newmaxthreads < pool->sp_minthreads)
286 return (EINVAL);
287 mtx_lock(&pool->sp_lock);
288 pool->sp_maxthreads = newmaxthreads;
289 for (g = 0; g < pool->sp_groupcount; g++) {
290 pool->sp_groups[g].sg_maxthreads = max(1,
291 pool->sp_maxthreads / pool->sp_groupcount);
292 }
293 mtx_unlock(&pool->sp_lock);
294 }
295 return (error);
296 }
297
298 /*
299 * Activate a transport handle.
300 */
301 void
302 xprt_register(SVCXPRT *xprt)
303 {
304 SVCPOOL *pool = xprt->xp_pool;
305 SVCGROUP *grp;
306 int g;
307
308 SVC_ACQUIRE(xprt);
309 g = atomic_fetchadd_int(&pool->sp_nextgroup, 1) % pool->sp_groupcount;
310 xprt->xp_group = grp = &pool->sp_groups[g];
311 mtx_lock(&grp->sg_lock);
312 xprt->xp_registered = TRUE;
313 xprt->xp_active = FALSE;
314 TAILQ_INSERT_TAIL(&grp->sg_xlist, xprt, xp_link);
315 mtx_unlock(&grp->sg_lock);
316 }
317
318 /*
319 * De-activate a transport handle. Note: the locked version doesn't
320 * release the transport - caller must do that after dropping the pool
321 * lock.
322 */
323 static void
324 xprt_unregister_locked(SVCXPRT *xprt)
325 {
326 SVCGROUP *grp = xprt->xp_group;
327
328 mtx_assert(&grp->sg_lock, MA_OWNED);
329 KASSERT(xprt->xp_registered == TRUE,
330 ("xprt_unregister_locked: not registered"));
331 xprt_inactive_locked(xprt);
332 TAILQ_REMOVE(&grp->sg_xlist, xprt, xp_link);
333 xprt->xp_registered = FALSE;
334 }
335
336 void
337 xprt_unregister(SVCXPRT *xprt)
338 {
339 SVCGROUP *grp = xprt->xp_group;
340
341 mtx_lock(&grp->sg_lock);
342 if (xprt->xp_registered == FALSE) {
343 /* Already unregistered by another thread */
344 mtx_unlock(&grp->sg_lock);
345 return;
346 }
347 xprt_unregister_locked(xprt);
348 mtx_unlock(&grp->sg_lock);
349
350 SVC_RELEASE(xprt);
351 }
352
353 /*
354 * Attempt to assign a service thread to this transport.
355 */
356 static int
357 xprt_assignthread(SVCXPRT *xprt)
358 {
359 SVCGROUP *grp = xprt->xp_group;
360 SVCTHREAD *st;
361
362 mtx_assert(&grp->sg_lock, MA_OWNED);
363 st = LIST_FIRST(&grp->sg_idlethreads);
364 if (st) {
365 LIST_REMOVE(st, st_ilink);
366 SVC_ACQUIRE(xprt);
367 xprt->xp_thread = st;
368 st->st_xprt = xprt;
369 cv_signal(&st->st_cond);
370 return (TRUE);
371 } else {
372 /*
373 * See if we can create a new thread. The
374 * actual thread creation happens in
375 * svc_run_internal because our locking state
376 * is poorly defined (we are typically called
377 * from a socket upcall). Don't create more
378 * than one thread per second.
379 */
380 if (grp->sg_state == SVCPOOL_ACTIVE
381 && grp->sg_lastcreatetime < time_uptime
382 && grp->sg_threadcount < grp->sg_maxthreads) {
383 grp->sg_state = SVCPOOL_THREADWANTED;
384 }
385 }
386 return (FALSE);
387 }
388
389 void
390 xprt_active(SVCXPRT *xprt)
391 {
392 SVCGROUP *grp = xprt->xp_group;
393
394 mtx_lock(&grp->sg_lock);
395
396 if (!xprt->xp_registered) {
397 /*
398 * Race with xprt_unregister - we lose.
399 */
400 mtx_unlock(&grp->sg_lock);
401 return;
402 }
403
404 if (!xprt->xp_active) {
405 xprt->xp_active = TRUE;
406 if (xprt->xp_thread == NULL) {
407 if (!svc_request_space_available(xprt->xp_pool) ||
408 !xprt_assignthread(xprt))
409 TAILQ_INSERT_TAIL(&grp->sg_active, xprt,
410 xp_alink);
411 }
412 }
413
414 mtx_unlock(&grp->sg_lock);
415 }
416
417 void
418 xprt_inactive_locked(SVCXPRT *xprt)
419 {
420 SVCGROUP *grp = xprt->xp_group;
421
422 mtx_assert(&grp->sg_lock, MA_OWNED);
423 if (xprt->xp_active) {
424 if (xprt->xp_thread == NULL)
425 TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
426 xprt->xp_active = FALSE;
427 }
428 }
429
430 void
431 xprt_inactive(SVCXPRT *xprt)
432 {
433 SVCGROUP *grp = xprt->xp_group;
434
435 mtx_lock(&grp->sg_lock);
436 xprt_inactive_locked(xprt);
437 mtx_unlock(&grp->sg_lock);
438 }
439
440 /*
441 * Variant of xprt_inactive() for use only when sure that port is
442 * assigned to thread. For example, withing receive handlers.
443 */
444 void
445 xprt_inactive_self(SVCXPRT *xprt)
446 {
447
448 KASSERT(xprt->xp_thread != NULL,
449 ("xprt_inactive_self(%p) with NULL xp_thread", xprt));
450 xprt->xp_active = FALSE;
451 }
452
453 /*
454 * Add a service program to the callout list.
455 * The dispatch routine will be called when a rpc request for this
456 * program number comes in.
457 */
458 bool_t
459 svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
460 void (*dispatch)(struct svc_req *, SVCXPRT *),
461 const struct netconfig *nconf)
462 {
463 SVCPOOL *pool = xprt->xp_pool;
464 struct svc_callout *s;
465 char *netid = NULL;
466 int flag = 0;
467
468 /* VARIABLES PROTECTED BY svc_lock: s, svc_head */
469
470 if (xprt->xp_netid) {
471 netid = strdup(xprt->xp_netid, M_RPC);
472 flag = 1;
473 } else if (nconf && nconf->nc_netid) {
474 netid = strdup(nconf->nc_netid, M_RPC);
475 flag = 1;
476 } /* must have been created with svc_raw_create */
477 if ((netid == NULL) && (flag == 1)) {
478 return (FALSE);
479 }
480
481 mtx_lock(&pool->sp_lock);
482 if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
483 if (netid)
484 free(netid, M_RPC);
485 if (s->sc_dispatch == dispatch)
486 goto rpcb_it; /* he is registering another xptr */
487 mtx_unlock(&pool->sp_lock);
488 return (FALSE);
489 }
490 s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
491 if (s == NULL) {
492 if (netid)
493 free(netid, M_RPC);
494 mtx_unlock(&pool->sp_lock);
495 return (FALSE);
496 }
497
498 s->sc_prog = prog;
499 s->sc_vers = vers;
500 s->sc_dispatch = dispatch;
501 s->sc_netid = netid;
502 TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
503
504 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
505 ((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
506
507 rpcb_it:
508 mtx_unlock(&pool->sp_lock);
509 /* now register the information with the local binder service */
510 if (nconf) {
511 bool_t dummy;
512 struct netconfig tnc;
513 struct netbuf nb;
514 tnc = *nconf;
515 nb.buf = &xprt->xp_ltaddr;
516 nb.len = xprt->xp_ltaddr.ss_len;
517 dummy = rpcb_set(prog, vers, &tnc, &nb);
518 return (dummy);
519 }
520 return (TRUE);
521 }
522
523 /*
524 * Remove a service program from the callout list.
525 */
526 void
527 svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
528 {
529 struct svc_callout *s;
530
531 /* unregister the information anyway */
532 (void) rpcb_unset(prog, vers, NULL);
533 mtx_lock(&pool->sp_lock);
534 while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
535 TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
536 if (s->sc_netid)
537 mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
538 mem_free(s, sizeof (struct svc_callout));
539 }
540 mtx_unlock(&pool->sp_lock);
541 }
542
543 /*
544 * Add a service connection loss program to the callout list.
545 * The dispatch routine will be called when some port in ths pool die.
546 */
547 bool_t
548 svc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *))
549 {
550 SVCPOOL *pool = xprt->xp_pool;
551 struct svc_loss_callout *s;
552
553 mtx_lock(&pool->sp_lock);
554 TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
555 if (s->slc_dispatch == dispatch)
556 break;
557 }
558 if (s != NULL) {
559 mtx_unlock(&pool->sp_lock);
560 return (TRUE);
561 }
562 s = malloc(sizeof(struct svc_loss_callout), M_RPC, M_NOWAIT);
563 if (s == NULL) {
564 mtx_unlock(&pool->sp_lock);
565 return (FALSE);
566 }
567 s->slc_dispatch = dispatch;
568 TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link);
569 mtx_unlock(&pool->sp_lock);
570 return (TRUE);
571 }
572
573 /*
574 * Remove a service connection loss program from the callout list.
575 */
576 void
577 svc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *))
578 {
579 struct svc_loss_callout *s;
580
581 mtx_lock(&pool->sp_lock);
582 TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
583 if (s->slc_dispatch == dispatch) {
584 TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link);
585 free(s, M_RPC);
586 break;
587 }
588 }
589 mtx_unlock(&pool->sp_lock);
590 }
591
592 /* ********************** CALLOUT list related stuff ************* */
593
594 /*
595 * Search the callout list for a program number, return the callout
596 * struct.
597 */
598 static struct svc_callout *
599 svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
600 {
601 struct svc_callout *s;
602
603 mtx_assert(&pool->sp_lock, MA_OWNED);
604 TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
605 if (s->sc_prog == prog && s->sc_vers == vers
606 && (netid == NULL || s->sc_netid == NULL ||
607 strcmp(netid, s->sc_netid) == 0))
608 break;
609 }
610
611 return (s);
612 }
613
614 /* ******************* REPLY GENERATION ROUTINES ************ */
615
616 static bool_t
617 svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply,
618 struct mbuf *body)
619 {
620 SVCXPRT *xprt = rqstp->rq_xprt;
621 bool_t ok;
622
623 if (rqstp->rq_args) {
624 m_freem(rqstp->rq_args);
625 rqstp->rq_args = NULL;
626 }
627
628 if (xprt->xp_pool->sp_rcache)
629 replay_setreply(xprt->xp_pool->sp_rcache,
630 rply, svc_getrpccaller(rqstp), body);
631
632 if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body))
633 return (FALSE);
634
635 ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq);
636 if (rqstp->rq_addr) {
637 free(rqstp->rq_addr, M_SONAME);
638 rqstp->rq_addr = NULL;
639 }
640
641 return (ok);
642 }
643
644 /*
645 * Send a reply to an rpc request
646 */
647 bool_t
648 svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location)
649 {
650 struct rpc_msg rply;
651 struct mbuf *m;
652 XDR xdrs;
653 bool_t ok;
654
655 rply.rm_xid = rqstp->rq_xid;
656 rply.rm_direction = REPLY;
657 rply.rm_reply.rp_stat = MSG_ACCEPTED;
658 rply.acpted_rply.ar_verf = rqstp->rq_verf;
659 rply.acpted_rply.ar_stat = SUCCESS;
660 rply.acpted_rply.ar_results.where = NULL;
661 rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
662
663 MGET(m, M_WAIT, MT_DATA);
664 MCLGET(m, M_WAIT);
665 m->m_len = 0;
666 xdrmbuf_create(&xdrs, m, XDR_ENCODE);
667 ok = xdr_results(&xdrs, xdr_location);
668 XDR_DESTROY(&xdrs);
669
670 if (ok) {
671 return (svc_sendreply_common(rqstp, &rply, m));
672 } else {
673 m_freem(m);
674 return (FALSE);
675 }
676 }
677
678 bool_t
679 svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m)
680 {
681 struct rpc_msg rply;
682
683 rply.rm_xid = rqstp->rq_xid;
684 rply.rm_direction = REPLY;
685 rply.rm_reply.rp_stat = MSG_ACCEPTED;
686 rply.acpted_rply.ar_verf = rqstp->rq_verf;
687 rply.acpted_rply.ar_stat = SUCCESS;
688 rply.acpted_rply.ar_results.where = NULL;
689 rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
690
691 return (svc_sendreply_common(rqstp, &rply, m));
692 }
693
694 /*
695 * No procedure error reply
696 */
697 void
698 svcerr_noproc(struct svc_req *rqstp)
699 {
700 SVCXPRT *xprt = rqstp->rq_xprt;
701 struct rpc_msg rply;
702
703 rply.rm_xid = rqstp->rq_xid;
704 rply.rm_direction = REPLY;
705 rply.rm_reply.rp_stat = MSG_ACCEPTED;
706 rply.acpted_rply.ar_verf = rqstp->rq_verf;
707 rply.acpted_rply.ar_stat = PROC_UNAVAIL;
708
709 if (xprt->xp_pool->sp_rcache)
710 replay_setreply(xprt->xp_pool->sp_rcache,
711 &rply, svc_getrpccaller(rqstp), NULL);
712
713 svc_sendreply_common(rqstp, &rply, NULL);
714 }
715
716 /*
717 * Can't decode args error reply
718 */
719 void
720 svcerr_decode(struct svc_req *rqstp)
721 {
722 SVCXPRT *xprt = rqstp->rq_xprt;
723 struct rpc_msg rply;
724
725 rply.rm_xid = rqstp->rq_xid;
726 rply.rm_direction = REPLY;
727 rply.rm_reply.rp_stat = MSG_ACCEPTED;
728 rply.acpted_rply.ar_verf = rqstp->rq_verf;
729 rply.acpted_rply.ar_stat = GARBAGE_ARGS;
730
731 if (xprt->xp_pool->sp_rcache)
732 replay_setreply(xprt->xp_pool->sp_rcache,
733 &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL);
734
735 svc_sendreply_common(rqstp, &rply, NULL);
736 }
737
738 /*
739 * Some system error
740 */
741 void
742 svcerr_systemerr(struct svc_req *rqstp)
743 {
744 SVCXPRT *xprt = rqstp->rq_xprt;
745 struct rpc_msg rply;
746
747 rply.rm_xid = rqstp->rq_xid;
748 rply.rm_direction = REPLY;
749 rply.rm_reply.rp_stat = MSG_ACCEPTED;
750 rply.acpted_rply.ar_verf = rqstp->rq_verf;
751 rply.acpted_rply.ar_stat = SYSTEM_ERR;
752
753 if (xprt->xp_pool->sp_rcache)
754 replay_setreply(xprt->xp_pool->sp_rcache,
755 &rply, svc_getrpccaller(rqstp), NULL);
756
757 svc_sendreply_common(rqstp, &rply, NULL);
758 }
759
760 /*
761 * Authentication error reply
762 */
763 void
764 svcerr_auth(struct svc_req *rqstp, enum auth_stat why)
765 {
766 SVCXPRT *xprt = rqstp->rq_xprt;
767 struct rpc_msg rply;
768
769 rply.rm_xid = rqstp->rq_xid;
770 rply.rm_direction = REPLY;
771 rply.rm_reply.rp_stat = MSG_DENIED;
772 rply.rjcted_rply.rj_stat = AUTH_ERROR;
773 rply.rjcted_rply.rj_why = why;
774
775 if (xprt->xp_pool->sp_rcache)
776 replay_setreply(xprt->xp_pool->sp_rcache,
777 &rply, svc_getrpccaller(rqstp), NULL);
778
779 svc_sendreply_common(rqstp, &rply, NULL);
780 }
781
782 /*
783 * Auth too weak error reply
784 */
785 void
786 svcerr_weakauth(struct svc_req *rqstp)
787 {
788
789 svcerr_auth(rqstp, AUTH_TOOWEAK);
790 }
791
792 /*
793 * Program unavailable error reply
794 */
795 void
796 svcerr_noprog(struct svc_req *rqstp)
797 {
798 SVCXPRT *xprt = rqstp->rq_xprt;
799 struct rpc_msg rply;
800
801 rply.rm_xid = rqstp->rq_xid;
802 rply.rm_direction = REPLY;
803 rply.rm_reply.rp_stat = MSG_ACCEPTED;
804 rply.acpted_rply.ar_verf = rqstp->rq_verf;
805 rply.acpted_rply.ar_stat = PROG_UNAVAIL;
806
807 if (xprt->xp_pool->sp_rcache)
808 replay_setreply(xprt->xp_pool->sp_rcache,
809 &rply, svc_getrpccaller(rqstp), NULL);
810
811 svc_sendreply_common(rqstp, &rply, NULL);
812 }
813
814 /*
815 * Program version mismatch error reply
816 */
817 void
818 svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers)
819 {
820 SVCXPRT *xprt = rqstp->rq_xprt;
821 struct rpc_msg rply;
822
823 rply.rm_xid = rqstp->rq_xid;
824 rply.rm_direction = REPLY;
825 rply.rm_reply.rp_stat = MSG_ACCEPTED;
826 rply.acpted_rply.ar_verf = rqstp->rq_verf;
827 rply.acpted_rply.ar_stat = PROG_MISMATCH;
828 rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
829 rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
830
831 if (xprt->xp_pool->sp_rcache)
832 replay_setreply(xprt->xp_pool->sp_rcache,
833 &rply, svc_getrpccaller(rqstp), NULL);
834
835 svc_sendreply_common(rqstp, &rply, NULL);
836 }
837
838 /*
839 * Allocate a new server transport structure. All fields are
840 * initialized to zero and xp_p3 is initialized to point at an
841 * extension structure to hold various flags and authentication
842 * parameters.
843 */
844 SVCXPRT *
845 svc_xprt_alloc(void)
846 {
847 SVCXPRT *xprt;
848 SVCXPRT_EXT *ext;
849
850 xprt = mem_alloc(sizeof(SVCXPRT));
851 ext = mem_alloc(sizeof(SVCXPRT_EXT));
852 xprt->xp_p3 = ext;
853 refcount_init(&xprt->xp_refs, 1);
854
855 return (xprt);
856 }
857
858 /*
859 * Free a server transport structure.
860 */
861 void
862 svc_xprt_free(SVCXPRT *xprt)
863 {
864
865 mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT));
866 mem_free(xprt, sizeof(SVCXPRT));
867 }
868
869 /* ******************* SERVER INPUT STUFF ******************* */
870
871 /*
872 * Read RPC requests from a transport and queue them to be
873 * executed. We handle authentication and replay cache replies here.
874 * Actually dispatching the RPC is deferred till svc_executereq.
875 */
876 static enum xprt_stat
877 svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret)
878 {
879 SVCPOOL *pool = xprt->xp_pool;
880 struct svc_req *r;
881 struct rpc_msg msg;
882 struct mbuf *args;
883 struct svc_loss_callout *s;
884 enum xprt_stat stat;
885
886 /* now receive msgs from xprtprt (support batch calls) */
887 r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO);
888
889 msg.rm_call.cb_cred.oa_base = r->rq_credarea;
890 msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES];
891 r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES];
892 if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) {
893 enum auth_stat why;
894
895 /*
896 * Handle replays and authenticate before queuing the
897 * request to be executed.
898 */
899 SVC_ACQUIRE(xprt);
900 r->rq_xprt = xprt;
901 if (pool->sp_rcache) {
902 struct rpc_msg repmsg;
903 struct mbuf *repbody;
904 enum replay_state rs;
905 rs = replay_find(pool->sp_rcache, &msg,
906 svc_getrpccaller(r), &repmsg, &repbody);
907 switch (rs) {
908 case RS_NEW:
909 break;
910 case RS_DONE:
911 SVC_REPLY(xprt, &repmsg, r->rq_addr,
912 repbody, &r->rq_reply_seq);
913 if (r->rq_addr) {
914 free(r->rq_addr, M_SONAME);
915 r->rq_addr = NULL;
916 }
917 m_freem(args);
918 goto call_done;
919
920 default:
921 m_freem(args);
922 goto call_done;
923 }
924 }
925
926 r->rq_xid = msg.rm_xid;
927 r->rq_prog = msg.rm_call.cb_prog;
928 r->rq_vers = msg.rm_call.cb_vers;
929 r->rq_proc = msg.rm_call.cb_proc;
930 r->rq_size = sizeof(*r) + m_length(args, NULL);
931 r->rq_args = args;
932 if ((why = _authenticate(r, &msg)) != AUTH_OK) {
933 /*
934 * RPCSEC_GSS uses this return code
935 * for requests that form part of its
936 * context establishment protocol and
937 * should not be dispatched to the
938 * application.
939 */
940 if (why != RPCSEC_GSS_NODISPATCH)
941 svcerr_auth(r, why);
942 goto call_done;
943 }
944
945 if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) {
946 svcerr_decode(r);
947 goto call_done;
948 }
949
950 /*
951 * Everything checks out, return request to caller.
952 */
953 *rqstp_ret = r;
954 r = NULL;
955 }
956 call_done:
957 if (r) {
958 svc_freereq(r);
959 r = NULL;
960 }
961 if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
962 TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link)
963 (*s->slc_dispatch)(xprt);
964 xprt_unregister(xprt);
965 }
966
967 return (stat);
968 }
969
970 static void
971 svc_executereq(struct svc_req *rqstp)
972 {
973 SVCXPRT *xprt = rqstp->rq_xprt;
974 SVCPOOL *pool = xprt->xp_pool;
975 int prog_found;
976 rpcvers_t low_vers;
977 rpcvers_t high_vers;
978 struct svc_callout *s;
979
980 /* now match message with a registered service*/
981 prog_found = FALSE;
982 low_vers = (rpcvers_t) -1L;
983 high_vers = (rpcvers_t) 0L;
984 TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
985 if (s->sc_prog == rqstp->rq_prog) {
986 if (s->sc_vers == rqstp->rq_vers) {
987 /*
988 * We hand ownership of r to the
989 * dispatch method - they must call
990 * svc_freereq.
991 */
992 (*s->sc_dispatch)(rqstp, xprt);
993 return;
994 } /* found correct version */
995 prog_found = TRUE;
996 if (s->sc_vers < low_vers)
997 low_vers = s->sc_vers;
998 if (s->sc_vers > high_vers)
999 high_vers = s->sc_vers;
1000 } /* found correct program */
1001 }
1002
1003 /*
1004 * if we got here, the program or version
1005 * is not served ...
1006 */
1007 if (prog_found)
1008 svcerr_progvers(rqstp, low_vers, high_vers);
1009 else
1010 svcerr_noprog(rqstp);
1011
1012 svc_freereq(rqstp);
1013 }
1014
1015 static void
1016 svc_checkidle(SVCGROUP *grp)
1017 {
1018 SVCXPRT *xprt, *nxprt;
1019 time_t timo;
1020 struct svcxprt_list cleanup;
1021
1022 TAILQ_INIT(&cleanup);
1023 TAILQ_FOREACH_SAFE(xprt, &grp->sg_xlist, xp_link, nxprt) {
1024 /*
1025 * Only some transports have idle timers. Don't time
1026 * something out which is just waking up.
1027 */
1028 if (!xprt->xp_idletimeout || xprt->xp_thread)
1029 continue;
1030
1031 timo = xprt->xp_lastactive + xprt->xp_idletimeout;
1032 if (time_uptime > timo) {
1033 xprt_unregister_locked(xprt);
1034 TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
1035 }
1036 }
1037
1038 mtx_unlock(&grp->sg_lock);
1039 TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
1040 SVC_RELEASE(xprt);
1041 }
1042 mtx_lock(&grp->sg_lock);
1043 }
1044
1045 static void
1046 svc_assign_waiting_sockets(SVCPOOL *pool)
1047 {
1048 SVCGROUP *grp;
1049 SVCXPRT *xprt;
1050 int g;
1051
1052 for (g = 0; g < pool->sp_groupcount; g++) {
1053 grp = &pool->sp_groups[g];
1054 mtx_lock(&grp->sg_lock);
1055 while ((xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1056 if (xprt_assignthread(xprt))
1057 TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1058 else
1059 break;
1060 }
1061 mtx_unlock(&grp->sg_lock);
1062 }
1063 }
1064
1065 static void
1066 svc_change_space_used(SVCPOOL *pool, int delta)
1067 {
1068 unsigned int value;
1069
1070 value = atomic_fetchadd_int(&pool->sp_space_used, delta) + delta;
1071 if (delta > 0) {
1072 if (value >= pool->sp_space_high && !pool->sp_space_throttled) {
1073 pool->sp_space_throttled = TRUE;
1074 pool->sp_space_throttle_count++;
1075 }
1076 if (value > pool->sp_space_used_highest)
1077 pool->sp_space_used_highest = value;
1078 } else {
1079 if (value < pool->sp_space_low && pool->sp_space_throttled) {
1080 pool->sp_space_throttled = FALSE;
1081 svc_assign_waiting_sockets(pool);
1082 }
1083 }
1084 }
1085
1086 static bool_t
1087 svc_request_space_available(SVCPOOL *pool)
1088 {
1089
1090 if (pool->sp_space_throttled)
1091 return (FALSE);
1092 return (TRUE);
1093 }
1094
1095 static void
1096 svc_run_internal(SVCGROUP *grp, bool_t ismaster)
1097 {
1098 SVCPOOL *pool = grp->sg_pool;
1099 SVCTHREAD *st, *stpref;
1100 SVCXPRT *xprt;
1101 enum xprt_stat stat;
1102 struct svc_req *rqstp;
1103 size_t sz;
1104 int error;
1105
1106 st = mem_alloc(sizeof(*st));
1107 mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF);
1108 st->st_pool = pool;
1109 st->st_xprt = NULL;
1110 STAILQ_INIT(&st->st_reqs);
1111 cv_init(&st->st_cond, "rpcsvc");
1112
1113 mtx_lock(&grp->sg_lock);
1114
1115 /*
1116 * If we are a new thread which was spawned to cope with
1117 * increased load, set the state back to SVCPOOL_ACTIVE.
1118 */
1119 if (grp->sg_state == SVCPOOL_THREADSTARTING)
1120 grp->sg_state = SVCPOOL_ACTIVE;
1121
1122 while (grp->sg_state != SVCPOOL_CLOSING) {
1123 /*
1124 * Create new thread if requested.
1125 */
1126 if (grp->sg_state == SVCPOOL_THREADWANTED) {
1127 grp->sg_state = SVCPOOL_THREADSTARTING;
1128 grp->sg_lastcreatetime = time_uptime;
1129 mtx_unlock(&grp->sg_lock);
1130 svc_new_thread(grp);
1131 mtx_lock(&grp->sg_lock);
1132 continue;
1133 }
1134
1135 /*
1136 * Check for idle transports once per second.
1137 */
1138 if (time_uptime > grp->sg_lastidlecheck) {
1139 grp->sg_lastidlecheck = time_uptime;
1140 svc_checkidle(grp);
1141 }
1142
1143 xprt = st->st_xprt;
1144 if (!xprt) {
1145 /*
1146 * Enforce maxthreads count.
1147 */
1148 if (grp->sg_threadcount > grp->sg_maxthreads)
1149 break;
1150
1151 /*
1152 * Before sleeping, see if we can find an
1153 * active transport which isn't being serviced
1154 * by a thread.
1155 */
1156 if (svc_request_space_available(pool) &&
1157 (xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1158 TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1159 SVC_ACQUIRE(xprt);
1160 xprt->xp_thread = st;
1161 st->st_xprt = xprt;
1162 continue;
1163 }
1164
1165 LIST_INSERT_HEAD(&grp->sg_idlethreads, st, st_ilink);
1166 if (ismaster || (!ismaster &&
1167 grp->sg_threadcount > grp->sg_minthreads))
1168 error = cv_timedwait_sig(&st->st_cond,
1169 &grp->sg_lock, 5 * hz);
1170 else
1171 error = cv_wait_sig(&st->st_cond,
1172 &grp->sg_lock);
1173 if (st->st_xprt == NULL)
1174 LIST_REMOVE(st, st_ilink);
1175
1176 /*
1177 * Reduce worker thread count when idle.
1178 */
1179 if (error == EWOULDBLOCK) {
1180 if (!ismaster
1181 && (grp->sg_threadcount
1182 > grp->sg_minthreads)
1183 && !st->st_xprt)
1184 break;
1185 } else if (error) {
1186 mtx_unlock(&grp->sg_lock);
1187 svc_exit(pool);
1188 mtx_lock(&grp->sg_lock);
1189 break;
1190 }
1191 continue;
1192 }
1193 mtx_unlock(&grp->sg_lock);
1194
1195 /*
1196 * Drain the transport socket and queue up any RPCs.
1197 */
1198 xprt->xp_lastactive = time_uptime;
1199 do {
1200 if (!svc_request_space_available(pool))
1201 break;
1202 rqstp = NULL;
1203 stat = svc_getreq(xprt, &rqstp);
1204 if (rqstp) {
1205 svc_change_space_used(pool, rqstp->rq_size);
1206 /*
1207 * See if the application has a preference
1208 * for some other thread.
1209 */
1210 if (pool->sp_assign) {
1211 stpref = pool->sp_assign(st, rqstp);
1212 rqstp->rq_thread = stpref;
1213 STAILQ_INSERT_TAIL(&stpref->st_reqs,
1214 rqstp, rq_link);
1215 mtx_unlock(&stpref->st_lock);
1216 if (stpref != st)
1217 rqstp = NULL;
1218 } else {
1219 rqstp->rq_thread = st;
1220 STAILQ_INSERT_TAIL(&st->st_reqs,
1221 rqstp, rq_link);
1222 }
1223 }
1224 } while (rqstp == NULL && stat == XPRT_MOREREQS
1225 && grp->sg_state != SVCPOOL_CLOSING);
1226
1227 /*
1228 * Move this transport to the end of the active list to
1229 * ensure fairness when multiple transports are active.
1230 * If this was the last queued request, svc_getreq will end
1231 * up calling xprt_inactive to remove from the active list.
1232 */
1233 mtx_lock(&grp->sg_lock);
1234 xprt->xp_thread = NULL;
1235 st->st_xprt = NULL;
1236 if (xprt->xp_active) {
1237 if (!svc_request_space_available(pool) ||
1238 !xprt_assignthread(xprt))
1239 TAILQ_INSERT_TAIL(&grp->sg_active,
1240 xprt, xp_alink);
1241 }
1242 mtx_unlock(&grp->sg_lock);
1243 SVC_RELEASE(xprt);
1244
1245 /*
1246 * Execute what we have queued.
1247 */
1248 sz = 0;
1249 mtx_lock(&st->st_lock);
1250 while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) {
1251 STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link);
1252 mtx_unlock(&st->st_lock);
1253 sz += rqstp->rq_size;
1254 svc_executereq(rqstp);
1255 mtx_lock(&st->st_lock);
1256 }
1257 mtx_unlock(&st->st_lock);
1258 svc_change_space_used(pool, -sz);
1259 mtx_lock(&grp->sg_lock);
1260 }
1261
1262 if (st->st_xprt) {
1263 xprt = st->st_xprt;
1264 st->st_xprt = NULL;
1265 SVC_RELEASE(xprt);
1266 }
1267 KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit"));
1268 mtx_destroy(&st->st_lock);
1269 cv_destroy(&st->st_cond);
1270 mem_free(st, sizeof(*st));
1271
1272 grp->sg_threadcount--;
1273 if (!ismaster)
1274 wakeup(grp);
1275 mtx_unlock(&grp->sg_lock);
1276 }
1277
1278 static void
1279 svc_thread_start(void *arg)
1280 {
1281
1282 svc_run_internal((SVCGROUP *) arg, FALSE);
1283 kthread_exit();
1284 }
1285
1286 static void
1287 svc_new_thread(SVCGROUP *grp)
1288 {
1289 SVCPOOL *pool = grp->sg_pool;
1290 struct thread *td;
1291
1292 mtx_lock(&grp->sg_lock);
1293 grp->sg_threadcount++;
1294 mtx_unlock(&grp->sg_lock);
1295 kthread_add(svc_thread_start, grp, pool->sp_proc, &td, 0, 0,
1296 "%s: service", pool->sp_name);
1297 }
1298
1299 void
1300 svc_run(SVCPOOL *pool)
1301 {
1302 int g, i;
1303 struct proc *p;
1304 struct thread *td;
1305 SVCGROUP *grp;
1306
1307 p = curproc;
1308 td = curthread;
1309 snprintf(td->td_name, sizeof(td->td_name),
1310 "%s: master", pool->sp_name);
1311 pool->sp_state = SVCPOOL_ACTIVE;
1312 pool->sp_proc = p;
1313
1314 /* Choose group count based on number of threads and CPUs. */
1315 pool->sp_groupcount = max(1, min(SVC_MAXGROUPS,
1316 min(pool->sp_maxthreads / 2, mp_ncpus) / 6));
1317 for (g = 0; g < pool->sp_groupcount; g++) {
1318 grp = &pool->sp_groups[g];
1319 grp->sg_minthreads = max(1,
1320 pool->sp_minthreads / pool->sp_groupcount);
1321 grp->sg_maxthreads = max(1,
1322 pool->sp_maxthreads / pool->sp_groupcount);
1323 grp->sg_lastcreatetime = time_uptime;
1324 }
1325
1326 /* Starting threads */
1327 pool->sp_groups[0].sg_threadcount++;
1328 for (g = 0; g < pool->sp_groupcount; g++) {
1329 grp = &pool->sp_groups[g];
1330 for (i = ((g == 0) ? 1 : 0); i < grp->sg_minthreads; i++)
1331 svc_new_thread(grp);
1332 }
1333 svc_run_internal(&pool->sp_groups[0], TRUE);
1334
1335 /* Waiting for threads to stop. */
1336 for (g = 0; g < pool->sp_groupcount; g++) {
1337 grp = &pool->sp_groups[g];
1338 mtx_lock(&grp->sg_lock);
1339 while (grp->sg_threadcount > 0)
1340 msleep(grp, &grp->sg_lock, 0, "svcexit", 0);
1341 mtx_unlock(&grp->sg_lock);
1342 }
1343 }
1344
1345 void
1346 svc_exit(SVCPOOL *pool)
1347 {
1348 SVCGROUP *grp;
1349 SVCTHREAD *st;
1350 int g;
1351
1352 pool->sp_state = SVCPOOL_CLOSING;
1353 for (g = 0; g < pool->sp_groupcount; g++) {
1354 grp = &pool->sp_groups[g];
1355 mtx_lock(&grp->sg_lock);
1356 if (grp->sg_state != SVCPOOL_CLOSING) {
1357 grp->sg_state = SVCPOOL_CLOSING;
1358 LIST_FOREACH(st, &grp->sg_idlethreads, st_ilink)
1359 cv_signal(&st->st_cond);
1360 }
1361 mtx_unlock(&grp->sg_lock);
1362 }
1363 }
1364
1365 bool_t
1366 svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1367 {
1368 struct mbuf *m;
1369 XDR xdrs;
1370 bool_t stat;
1371
1372 m = rqstp->rq_args;
1373 rqstp->rq_args = NULL;
1374
1375 xdrmbuf_create(&xdrs, m, XDR_DECODE);
1376 stat = xargs(&xdrs, args);
1377 XDR_DESTROY(&xdrs);
1378
1379 return (stat);
1380 }
1381
1382 bool_t
1383 svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1384 {
1385 XDR xdrs;
1386
1387 if (rqstp->rq_addr) {
1388 free(rqstp->rq_addr, M_SONAME);
1389 rqstp->rq_addr = NULL;
1390 }
1391
1392 xdrs.x_op = XDR_FREE;
1393 return (xargs(&xdrs, args));
1394 }
1395
1396 void
1397 svc_freereq(struct svc_req *rqstp)
1398 {
1399 SVCTHREAD *st;
1400 SVCPOOL *pool;
1401
1402 st = rqstp->rq_thread;
1403 if (st) {
1404 pool = st->st_pool;
1405 if (pool->sp_done)
1406 pool->sp_done(st, rqstp);
1407 }
1408
1409 if (rqstp->rq_auth.svc_ah_ops)
1410 SVCAUTH_RELEASE(&rqstp->rq_auth);
1411
1412 if (rqstp->rq_xprt) {
1413 SVC_RELEASE(rqstp->rq_xprt);
1414 }
1415
1416 if (rqstp->rq_addr)
1417 free(rqstp->rq_addr, M_SONAME);
1418
1419 if (rqstp->rq_args)
1420 m_freem(rqstp->rq_args);
1421
1422 free(rqstp, M_RPC);
1423 }
Cache object: 92c559d79f1d74ec076089ddf81a8d99
|