FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sa.c
1 /* $NetBSD: kern_sa.c,v 1.60.2.3 2005/10/28 20:07:02 jmc Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2004, 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_sa.c,v 1.60.2.3 2005/10/28 20:07:02 jmc Exp $");
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/pool.h>
45 #include <sys/proc.h>
46 #include <sys/types.h>
47 #include <sys/ucontext.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/sa.h>
51 #include <sys/savar.h>
52 #include <sys/syscallargs.h>
53
54 #include <uvm/uvm_extern.h>
55
56 static struct sadata_vp *sa_newsavp(struct sadata *);
57 static __inline int sa_stackused(struct sastack *, struct sadata *);
58 static __inline void sa_setstackfree(struct sastack *, struct sadata *);
59 static struct sastack *sa_getstack(struct sadata *);
60 static __inline struct sastack *sa_getstack0(struct sadata *);
61 static __inline int sast_compare(struct sastack *, struct sastack *);
62 #ifdef MULTIPROCESSOR
63 static int sa_increaseconcurrency(struct lwp *, int);
64 #endif
65 static void sa_setwoken(struct lwp *);
66 static void sa_switchcall(void *);
67 static int sa_newcachelwp(struct lwp *);
68 static __inline void sa_makeupcalls(struct lwp *);
69 static struct lwp *sa_vp_repossess(struct lwp *l);
70
71 static __inline int sa_pagefault(struct lwp *, ucontext_t *);
72
73 static void sa_upcall0(struct sadata_upcall *, int, struct lwp *, struct lwp *,
74 size_t, void *, void (*)(void *));
75 static void sa_upcall_getstate(union sau_state *, struct lwp *);
76
77 MALLOC_DEFINE(M_SA, "sa", "Scheduler activations");
78
79 #define SA_DEBUG
80
81 #ifdef SA_DEBUG
82 #define DPRINTF(x) do { if (sadebug) printf_nolog x; } while (0)
83 #define DPRINTFN(n,x) do { if (sadebug & (1<<(n-1))) printf_nolog x; } while (0)
84 int sadebug = 0;
85 #else
86 #define DPRINTF(x)
87 #define DPRINTFN(n,x)
88 #endif
89
90
91 #define SA_LWP_STATE_LOCK(l, f) do { \
92 (f) = (l)->l_flag; \
93 (l)->l_flag &= ~L_SA; \
94 } while (/*CONSTCOND*/ 0)
95
96 #define SA_LWP_STATE_UNLOCK(l, f) do { \
97 (l)->l_flag |= (f) & L_SA; \
98 } while (/*CONSTCOND*/ 0)
99
100 SPLAY_PROTOTYPE(sasttree, sastack, sast_node, sast_compare);
101 SPLAY_GENERATE(sasttree, sastack, sast_node, sast_compare);
102
103
104 /*
105 * sadata_upcall_alloc:
106 *
107 * Allocate an sadata_upcall structure.
108 */
109 struct sadata_upcall *
110 sadata_upcall_alloc(int waitok)
111 {
112 struct sadata_upcall *sau;
113
114 sau = pool_get(&saupcall_pool, waitok ? PR_WAITOK : PR_NOWAIT);
115 if (sau) {
116 sau->sau_arg = NULL;
117 }
118 return sau;
119 }
120
121 /*
122 * sadata_upcall_free:
123 *
124 * Free an sadata_upcall structure and any associated argument data.
125 */
126 void
127 sadata_upcall_free(struct sadata_upcall *sau)
128 {
129
130 if (sau == NULL) {
131 return;
132 }
133 if (sau->sau_arg) {
134 (*sau->sau_argfreefunc)(sau->sau_arg);
135 }
136 pool_put(&saupcall_pool, sau);
137 }
138
139 static struct sadata_vp *
140 sa_newsavp(struct sadata *sa)
141 {
142 struct sadata_vp *vp, *qvp;
143
144 /* Allocate virtual processor data structure */
145 vp = pool_get(&savp_pool, PR_WAITOK);
146 /* Initialize. */
147 memset(vp, 0, sizeof(*vp));
148 simple_lock_init(&vp->savp_lock);
149 vp->savp_lwp = NULL;
150 vp->savp_wokenq_head = NULL;
151 vp->savp_faultaddr = 0;
152 vp->savp_ofaultaddr = 0;
153 LIST_INIT(&vp->savp_lwpcache);
154 vp->savp_ncached = 0;
155 SIMPLEQ_INIT(&vp->savp_upcalls);
156
157 simple_lock(&sa->sa_lock);
158 /* find first free savp_id and add vp to sorted slist */
159 if (SLIST_EMPTY(&sa->sa_vps) ||
160 SLIST_FIRST(&sa->sa_vps)->savp_id != 0) {
161 vp->savp_id = 0;
162 SLIST_INSERT_HEAD(&sa->sa_vps, vp, savp_next);
163 } else {
164 SLIST_FOREACH(qvp, &sa->sa_vps, savp_next) {
165 if (SLIST_NEXT(qvp, savp_next) == NULL ||
166 SLIST_NEXT(qvp, savp_next)->savp_id !=
167 qvp->savp_id + 1)
168 break;
169 }
170 vp->savp_id = qvp->savp_id + 1;
171 SLIST_INSERT_AFTER(qvp, vp, savp_next);
172 }
173 simple_unlock(&sa->sa_lock);
174
175 return (vp);
176 }
177
178 int
179 sys_sa_register(struct lwp *l, void *v, register_t *retval)
180 {
181 struct sys_sa_register_args /* {
182 syscallarg(sa_upcall_t) new;
183 syscallarg(sa_upcall_t *) old;
184 syscallarg(int) flags;
185 syscallarg(ssize_t) stackinfo_offset;
186 } */ *uap = v;
187 struct proc *p = l->l_proc;
188 struct sadata *sa;
189 sa_upcall_t prev;
190 int error;
191
192 if (p->p_sa == NULL) {
193 /* Allocate scheduler activations data structure */
194 sa = pool_get(&sadata_pool, PR_WAITOK);
195 /* Initialize. */
196 memset(sa, 0, sizeof(*sa));
197 simple_lock_init(&sa->sa_lock);
198 sa->sa_flag = SCARG(uap, flags) & SA_FLAG_ALL;
199 sa->sa_maxconcurrency = 1;
200 sa->sa_concurrency = 1;
201 SPLAY_INIT(&sa->sa_stackstree);
202 sa->sa_stacknext = NULL;
203 if (SCARG(uap, flags) & SA_FLAG_STACKINFO)
204 sa->sa_stackinfo_offset = SCARG(uap, stackinfo_offset);
205 else
206 sa->sa_stackinfo_offset = 0;
207 sa->sa_nstacks = 0;
208 SLIST_INIT(&sa->sa_vps);
209 p->p_sa = sa;
210 KASSERT(l->l_savp == NULL);
211 }
212 if (l->l_savp == NULL) {
213 l->l_savp = sa_newsavp(p->p_sa);
214 sa_newcachelwp(l);
215 }
216
217 prev = p->p_sa->sa_upcall;
218 p->p_sa->sa_upcall = SCARG(uap, new);
219 if (SCARG(uap, old)) {
220 error = copyout(&prev, SCARG(uap, old),
221 sizeof(prev));
222 if (error)
223 return (error);
224 }
225
226 return (0);
227 }
228
229 void
230 sa_release(struct proc *p)
231 {
232 struct sadata *sa;
233 struct sastack *sast, *next;
234 struct sadata_vp *vp;
235 struct lwp *l;
236
237 sa = p->p_sa;
238 KDASSERT(sa != NULL);
239 KASSERT(p->p_nlwps <= 1);
240
241 for (sast = SPLAY_MIN(sasttree, &sa->sa_stackstree); sast != NULL;
242 sast = next) {
243 next = SPLAY_NEXT(sasttree, &sa->sa_stackstree, sast);
244 SPLAY_REMOVE(sasttree, &sa->sa_stackstree, sast);
245 pool_put(&sastack_pool, sast);
246 }
247
248 p->p_flag &= ~P_SA;
249 while ((vp = SLIST_FIRST(&p->p_sa->sa_vps)) != NULL) {
250 SLIST_REMOVE_HEAD(&p->p_sa->sa_vps, savp_next);
251 pool_put(&savp_pool, vp);
252 }
253 pool_put(&sadata_pool, sa);
254 p->p_sa = NULL;
255 l = LIST_FIRST(&p->p_lwps);
256 if (l) {
257 KASSERT(LIST_NEXT(l, l_sibling) == NULL);
258 l->l_savp = NULL;
259 }
260 }
261
262
263 static __inline int
264 sa_stackused(struct sastack *sast, struct sadata *sa)
265 {
266 unsigned int gen;
267
268 if (copyin((void *)&((struct sa_stackinfo_t *)
269 ((char *)sast->sast_stack.ss_sp +
270 sa->sa_stackinfo_offset))->sasi_stackgen,
271 &gen, sizeof(unsigned int)) != 0) {
272 #ifdef DIAGNOSTIC
273 printf("sa_stackused: couldn't copyin sasi_stackgen");
274 #endif
275 sigexit(curlwp, SIGILL);
276 /* NOTREACHED */
277 }
278 return (sast->sast_gen != gen);
279 }
280
281 static __inline void
282 sa_setstackfree(struct sastack *sast, struct sadata *sa)
283 {
284
285 if (copyin((void *)&((struct sa_stackinfo_t *)
286 ((char *)sast->sast_stack.ss_sp +
287 sa->sa_stackinfo_offset))->sasi_stackgen,
288 &sast->sast_gen, sizeof(unsigned int)) != 0) {
289 #ifdef DIAGNOSTIC
290 printf("sa_setstackfree: couldn't copyin sasi_stackgen");
291 #endif
292 sigexit(curlwp, SIGILL);
293 /* NOTREACHED */
294 }
295 }
296
297 /*
298 * Find next free stack, starting at sa->sa_stacknext.
299 */
300 static struct sastack *
301 sa_getstack(struct sadata *sa)
302 {
303 struct sastack *sast;
304
305 SCHED_ASSERT_UNLOCKED();
306
307 if ((sast = sa->sa_stacknext) == NULL || sa_stackused(sast, sa))
308 sast = sa_getstack0(sa);
309
310 if (sast == NULL)
311 return NULL;
312
313 sast->sast_gen++;
314
315 return sast;
316 }
317
318 static __inline struct sastack *
319 sa_getstack0(struct sadata *sa)
320 {
321 struct sastack *start;
322
323 if (sa->sa_stacknext == NULL) {
324 sa->sa_stacknext = SPLAY_MIN(sasttree, &sa->sa_stackstree);
325 if (sa->sa_stacknext == NULL)
326 return NULL;
327 }
328 start = sa->sa_stacknext;
329
330 while (sa_stackused(sa->sa_stacknext, sa)) {
331 sa->sa_stacknext = SPLAY_NEXT(sasttree, &sa->sa_stackstree,
332 sa->sa_stacknext);
333 if (sa->sa_stacknext == NULL)
334 sa->sa_stacknext = SPLAY_MIN(sasttree,
335 &sa->sa_stackstree);
336 if (sa->sa_stacknext == start)
337 return NULL;
338 }
339 return sa->sa_stacknext;
340 }
341
342 static __inline int
343 sast_compare(struct sastack *a, struct sastack *b)
344 {
345 if ((vaddr_t)a->sast_stack.ss_sp + a->sast_stack.ss_size <=
346 (vaddr_t)b->sast_stack.ss_sp)
347 return (-1);
348 if ((vaddr_t)a->sast_stack.ss_sp >=
349 (vaddr_t)b->sast_stack.ss_sp + b->sast_stack.ss_size)
350 return (1);
351 return (0);
352 }
353
354 int
355 sys_sa_stacks(struct lwp *l, void *v, register_t *retval)
356 {
357 struct sys_sa_stacks_args /* {
358 syscallarg(int) num;
359 syscallarg(stack_t *) stacks;
360 } */ *uap = v;
361 struct sadata *sa = l->l_proc->p_sa;
362 struct sastack *sast, newsast;
363 int count, error, f, i;
364
365 /* We have to be using scheduler activations */
366 if (sa == NULL)
367 return (EINVAL);
368
369 count = SCARG(uap, num);
370 if (count < 0)
371 return (EINVAL);
372
373 SA_LWP_STATE_LOCK(l, f);
374
375 error = 0;
376
377 for (i = 0; i < count; i++) {
378 error = copyin(SCARG(uap, stacks) + i, &newsast.sast_stack,
379 sizeof(stack_t));
380 if (error) {
381 count = i;
382 break;
383 }
384 if ((sast = SPLAY_FIND(sasttree, &sa->sa_stackstree, &newsast))) {
385 DPRINTFN(9, ("sa_stacks(%d.%d) returning stack %p\n",
386 l->l_proc->p_pid, l->l_lid,
387 newsast.sast_stack.ss_sp));
388 if (sa_stackused(sast, sa) == 0) {
389 count = i;
390 error = EEXIST;
391 break;
392 }
393 } else if (sa->sa_nstacks >= SA_MAXNUMSTACKS * sa->sa_concurrency) {
394 DPRINTFN(9, ("sa_stacks(%d.%d) already using %d stacks\n",
395 l->l_proc->p_pid, l->l_lid,
396 SA_MAXNUMSTACKS * sa->sa_concurrency));
397 count = i;
398 error = ENOMEM;
399 break;
400 } else {
401 DPRINTFN(9, ("sa_stacks(%d.%d) adding stack %p\n",
402 l->l_proc->p_pid, l->l_lid,
403 newsast.sast_stack.ss_sp));
404 sast = pool_get(&sastack_pool, PR_WAITOK);
405 sast->sast_stack = newsast.sast_stack;
406 SPLAY_INSERT(sasttree, &sa->sa_stackstree, sast);
407 sa->sa_nstacks++;
408 }
409 sa_setstackfree(sast, sa);
410 }
411
412 SA_LWP_STATE_UNLOCK(l, f);
413
414 *retval = count;
415 return (error);
416 }
417
418
419 int
420 sys_sa_enable(struct lwp *l, void *v, register_t *retval)
421 {
422 struct proc *p = l->l_proc;
423 struct sadata *sa = p->p_sa;
424 struct sadata_vp *vp = l->l_savp;
425 int error;
426
427 DPRINTF(("sys_sa_enable(%d.%d)\n", l->l_proc->p_pid,
428 l->l_lid));
429
430 /* We have to be using scheduler activations */
431 if (sa == NULL || vp == NULL)
432 return (EINVAL);
433
434 if (p->p_flag & P_SA) /* Already running! */
435 return (EBUSY);
436
437 error = sa_upcall(l, SA_UPCALL_NEWPROC, l, NULL, 0, NULL, NULL);
438 if (error)
439 return (error);
440
441 /* Assign this LWP to the virtual processor */
442 vp->savp_lwp = l;
443
444 p->p_flag |= P_SA;
445 l->l_flag |= L_SA; /* We are now an activation LWP */
446
447 /* This will not return to the place in user space it came from. */
448 return (0);
449 }
450
451
452 #ifdef MULTIPROCESSOR
453 static int
454 sa_increaseconcurrency(struct lwp *l, int concurrency)
455 {
456 struct proc *p;
457 struct lwp *l2;
458 struct sadata *sa;
459 vaddr_t uaddr;
460 boolean_t inmem;
461 int addedconcurrency, error, s;
462
463 p = l->l_proc;
464 sa = p->p_sa;
465
466 addedconcurrency = 0;
467 simple_lock(&sa->sa_lock);
468 while (sa->sa_maxconcurrency < concurrency) {
469 sa->sa_maxconcurrency++;
470 sa->sa_concurrency++;
471 simple_unlock(&sa->sa_lock);
472
473 inmem = uvm_uarea_alloc(&uaddr);
474 if (__predict_false(uaddr == 0)) {
475 /* reset concurrency */
476 simple_lock(&sa->sa_lock);
477 sa->sa_maxconcurrency--;
478 sa->sa_concurrency--;
479 simple_unlock(&sa->sa_lock);
480 return (addedconcurrency);
481 } else {
482 newlwp(l, p, uaddr, inmem, 0, NULL, 0,
483 child_return, 0, &l2);
484 l2->l_flag |= L_SA;
485 l2->l_savp = sa_newsavp(sa);
486 if (l2->l_savp) {
487 l2->l_savp->savp_lwp = l2;
488 cpu_setfunc(l2, sa_switchcall, NULL);
489 error = sa_upcall(l2, SA_UPCALL_NEWPROC,
490 NULL, NULL, 0, NULL, NULL);
491 if (error) {
492 /* free new savp */
493 SLIST_REMOVE(&sa->sa_vps, l2->l_savp,
494 sadata_vp, savp_next);
495 pool_put(&savp_pool, l2->l_savp);
496 }
497 } else
498 error = 1;
499 if (error) {
500 /* put l2 into l's LWP cache */
501 l2->l_savp = l->l_savp;
502 PHOLD(l2);
503 SCHED_LOCK(s);
504 sa_putcachelwp(p, l2);
505 SCHED_UNLOCK(s);
506 /* reset concurrency */
507 simple_lock(&sa->sa_lock);
508 sa->sa_maxconcurrency--;
509 sa->sa_concurrency--;
510 simple_unlock(&sa->sa_lock);
511 return (addedconcurrency);
512 }
513 SCHED_LOCK(s);
514 setrunnable(l2);
515 SCHED_UNLOCK(s);
516 addedconcurrency++;
517 }
518 simple_lock(&sa->sa_lock);
519 }
520 simple_unlock(&sa->sa_lock);
521
522 return (addedconcurrency);
523 }
524 #endif
525
526 int
527 sys_sa_setconcurrency(struct lwp *l, void *v, register_t *retval)
528 {
529 struct sys_sa_setconcurrency_args /* {
530 syscallarg(int) concurrency;
531 } */ *uap = v;
532 struct sadata *sa = l->l_proc->p_sa;
533 #ifdef MULTIPROCESSOR
534 struct sadata_vp *vp = l->l_savp;
535 int ncpus, s;
536 struct cpu_info *ci;
537 CPU_INFO_ITERATOR cii;
538 #endif
539
540 DPRINTFN(11,("sys_sa_concurrency(%d.%d)\n", l->l_proc->p_pid,
541 l->l_lid));
542
543 /* We have to be using scheduler activations */
544 if (sa == NULL)
545 return (EINVAL);
546
547 if ((l->l_proc->p_flag & P_SA) == 0)
548 return (EINVAL);
549
550 if (SCARG(uap, concurrency) < 1)
551 return (EINVAL);
552
553 *retval = 0;
554 /*
555 * Concurrency greater than the number of physical CPUs does
556 * not make sense.
557 * XXX Should we ever support hot-plug CPUs, this will need
558 * adjustment.
559 */
560 #ifdef MULTIPROCESSOR
561 if (SCARG(uap, concurrency) > sa->sa_maxconcurrency) {
562 ncpus = 0;
563 for (CPU_INFO_FOREACH(cii, ci))
564 ncpus++;
565 *retval += sa_increaseconcurrency(l,
566 min(SCARG(uap, concurrency), ncpus));
567 }
568 #endif
569
570 DPRINTFN(11,("sys_sa_concurrency(%d.%d) want %d, have %d, max %d\n",
571 l->l_proc->p_pid, l->l_lid, SCARG(uap, concurrency),
572 sa->sa_concurrency, sa->sa_maxconcurrency));
573 #ifdef MULTIPROCESSOR
574 if (SCARG(uap, concurrency) > sa->sa_concurrency) {
575 SCHED_LOCK(s);
576 SLIST_FOREACH(vp, &sa->sa_vps, savp_next) {
577 if (vp->savp_lwp->l_flag & L_SA_IDLE) {
578 vp->savp_lwp->l_flag &=
579 ~(L_SA_IDLE|L_SA_YIELD|L_SINTR);
580 SCHED_UNLOCK(s);
581 DPRINTFN(11,("sys_sa_concurrency(%d.%d) "
582 "NEWPROC vp %d\n",
583 l->l_proc->p_pid, l->l_lid,
584 vp->savp_id));
585 cpu_setfunc(vp->savp_lwp, sa_switchcall, NULL);
586 /* error = */ sa_upcall(vp->savp_lwp,
587 SA_UPCALL_NEWPROC,
588 NULL, NULL, 0, NULL, NULL);
589 SCHED_LOCK(s);
590 sa->sa_concurrency++;
591 setrunnable(vp->savp_lwp);
592 KDASSERT((vp->savp_lwp->l_flag & L_SINTR) == 0);
593 (*retval)++;
594 }
595 if (sa->sa_concurrency == SCARG(uap, concurrency))
596 break;
597 }
598 SCHED_UNLOCK(s);
599 }
600 #endif
601
602 return (0);
603 }
604
605 int
606 sys_sa_yield(struct lwp *l, void *v, register_t *retval)
607 {
608 struct proc *p = l->l_proc;
609
610 if (p->p_sa == NULL || !(p->p_flag & P_SA)) {
611 DPRINTFN(1,("sys_sa_yield(%d.%d) proc %p not SA (p_sa %p, flag %s)\n",
612 p->p_pid, l->l_lid, p, p->p_sa, p->p_flag & P_SA ? "T" : "F"));
613 return (EINVAL);
614 }
615
616 sa_yield(l);
617
618 return (EJUSTRETURN);
619 }
620
621 void
622 sa_yield(struct lwp *l)
623 {
624 struct proc *p = l->l_proc;
625 struct sadata *sa = p->p_sa;
626 struct sadata_vp *vp = l->l_savp;
627 int ret;
628
629 KERNEL_LOCK_ASSERT_LOCKED();
630
631 if (vp->savp_lwp != l) {
632 /*
633 * We lost the VP on our way here, this happens for
634 * instance when we sleep in systrace. This will end
635 * in an SA_UNBLOCKED_UPCALL in sa_setwoken().
636 */
637 DPRINTFN(1,("sa_yield(%d.%d) lost VP\n",
638 p->p_pid, l->l_lid));
639 KDASSERT(l->l_flag & L_SA_BLOCKING);
640 return;
641 }
642
643 /*
644 * If we're the last running LWP, stick around to receive
645 * signals.
646 */
647 KDASSERT((l->l_flag & L_SA_YIELD) == 0);
648 DPRINTFN(1,("sa_yield(%d.%d) going dormant\n",
649 p->p_pid, l->l_lid));
650 /*
651 * A signal will probably wake us up. Worst case, the upcall
652 * happens and just causes the process to yield again.
653 */
654 /* s = splsched(); */ /* Protect from timer expirations */
655 KDASSERT(vp->savp_lwp == l);
656 /*
657 * If we were told to make an upcall or exit before
658 * the splsched(), make sure we process it instead of
659 * going to sleep. It might make more sense for this to
660 * be handled inside of tsleep....
661 */
662 ret = 0;
663 l->l_flag |= L_SA_YIELD;
664 if (l->l_flag & L_SA_UPCALL) {
665 /* KERNEL_PROC_UNLOCK(l); in upcallret() */
666 upcallret(l);
667 KERNEL_PROC_LOCK(l);
668 }
669 while (l->l_flag & L_SA_YIELD) {
670 DPRINTFN(1,("sa_yield(%d.%d) really going dormant\n",
671 p->p_pid, l->l_lid));
672
673 simple_lock(&sa->sa_lock);
674 sa->sa_concurrency--;
675 simple_unlock(&sa->sa_lock);
676
677 ret = tsleep((caddr_t) l, PUSER | PCATCH, "sawait", 0);
678
679 simple_lock(&sa->sa_lock);
680 sa->sa_concurrency++;
681 simple_unlock(&sa->sa_lock);
682
683 KDASSERT(vp->savp_lwp == l || p->p_flag & P_WEXIT);
684
685 /* KERNEL_PROC_UNLOCK(l); in upcallret() */
686 upcallret(l);
687 KERNEL_PROC_LOCK(l);
688 }
689 /* splx(s); */
690 DPRINTFN(1,("sa_yield(%d.%d) returned, ret %d, userret %p\n",
691 p->p_pid, l->l_lid, ret, p->p_userret));
692 }
693
694
695 int
696 sys_sa_preempt(struct lwp *l, void *v, register_t *retval)
697 {
698
699 /* XXX Implement me. */
700 return (ENOSYS);
701 }
702
703
704 /* XXX Hm, naming collision. */
705 void
706 sa_preempt(struct lwp *l)
707 {
708 struct proc *p = l->l_proc;
709 struct sadata *sa = p->p_sa;
710
711 /*
712 * Defer saving the lwp's state because on some ports
713 * preemption can occur between generating an unblocked upcall
714 * and processing the upcall queue.
715 */
716 if (sa->sa_flag & SA_FLAG_PREEMPT)
717 sa_upcall(l, SA_UPCALL_PREEMPTED | SA_UPCALL_DEFER_EVENT,
718 l, NULL, 0, NULL, NULL);
719 }
720
721
722 /*
723 * Set up the user-level stack and trapframe to do an upcall.
724 *
725 * NOTE: This routine WILL FREE "arg" in the case of failure! Callers
726 * should not touch the "arg" pointer once calling sa_upcall().
727 */
728 int
729 sa_upcall(struct lwp *l, int type, struct lwp *event, struct lwp *interrupted,
730 size_t argsize, void *arg, void (*func)(void *))
731 {
732 struct sadata_upcall *sau;
733 struct sadata *sa = l->l_proc->p_sa;
734 struct sadata_vp *vp = l->l_savp;
735 struct sastack *sast;
736 int f;
737
738 /* XXX prevent recursive upcalls if we sleep for memory */
739 SA_LWP_STATE_LOCK(l, f);
740 sast = sa_getstack(sa);
741 SA_LWP_STATE_UNLOCK(l, f);
742 if (sast == NULL) {
743 return (ENOMEM);
744 }
745 DPRINTFN(9,("sa_upcall(%d.%d) using stack %p\n",
746 l->l_proc->p_pid, l->l_lid, sast->sast_stack.ss_sp));
747
748 SA_LWP_STATE_LOCK(l, f);
749 sau = sadata_upcall_alloc(1);
750 SA_LWP_STATE_UNLOCK(l, f);
751 sa_upcall0(sau, type, event, interrupted, argsize, arg, func);
752 sau->sau_stack = sast->sast_stack;
753
754 SIMPLEQ_INSERT_TAIL(&vp->savp_upcalls, sau, sau_next);
755 l->l_flag |= L_SA_UPCALL;
756
757 return (0);
758 }
759
760 static void
761 sa_upcall0(struct sadata_upcall *sau, int type, struct lwp *event,
762 struct lwp *interrupted, size_t argsize, void *arg, void (*func)(void *))
763 {
764
765 KDASSERT((event == NULL) || (event != interrupted));
766
767 sau->sau_flags = 0;
768
769 if (type & SA_UPCALL_DEFER_EVENT) {
770 sau->sau_event.ss_deferred.ss_lwp = event;
771 sau->sau_flags |= SAU_FLAG_DEFERRED_EVENT;
772 } else
773 sa_upcall_getstate(&sau->sau_event, event);
774 if (type & SA_UPCALL_DEFER_INTERRUPTED) {
775 sau->sau_interrupted.ss_deferred.ss_lwp = interrupted;
776 sau->sau_flags |= SAU_FLAG_DEFERRED_INTERRUPTED;
777 } else
778 sa_upcall_getstate(&sau->sau_interrupted, interrupted);
779
780 sau->sau_type = type & SA_UPCALL_TYPE_MASK;
781 sau->sau_argsize = argsize;
782 sau->sau_arg = arg;
783 sau->sau_argfreefunc = func;
784 }
785
786
787 static void
788 sa_upcall_getstate(union sau_state *ss, struct lwp *l)
789 {
790 caddr_t sp;
791 size_t ucsize;
792
793 if (l) {
794 l->l_flag |= L_SA_SWITCHING;
795 getucontext(l, &ss->ss_captured.ss_ctx);
796 l->l_flag &= ~L_SA_SWITCHING;
797 sp = (void *)
798 ((intptr_t)_UC_MACHINE_SP(&ss->ss_captured.ss_ctx));
799 sp = STACK_ALIGN(sp, ~_UC_UCONTEXT_ALIGN);
800 ucsize = roundup(sizeof(ucontext_t), (~_UC_UCONTEXT_ALIGN) + 1);
801 ss->ss_captured.ss_sa.sa_context = (ucontext_t *)
802 STACK_ALLOC(sp, ucsize);
803 ss->ss_captured.ss_sa.sa_id = l->l_lid;
804 ss->ss_captured.ss_sa.sa_cpu = l->l_savp->savp_id;
805 } else
806 ss->ss_captured.ss_sa.sa_context = NULL;
807 }
808
809
810 /*
811 * Detect double pagefaults and pagefaults on upcalls.
812 * - double pagefaults are detected by comparing the previous faultaddr
813 * against the current faultaddr
814 * - pagefaults on upcalls are detected by checking if the userspace
815 * thread is running on an upcall stack
816 */
817 static __inline int
818 sa_pagefault(struct lwp *l, ucontext_t *l_ctx)
819 {
820 struct proc *p;
821 struct sadata *sa;
822 struct sadata_vp *vp;
823 struct sastack sast;
824
825 p = l->l_proc;
826 sa = p->p_sa;
827 vp = l->l_savp;
828
829 KDASSERT(vp->savp_lwp == l);
830
831 if (vp->savp_faultaddr == vp->savp_ofaultaddr) {
832 DPRINTFN(10,("sa_pagefault(%d.%d) double page fault\n",
833 p->p_pid, l->l_lid));
834 return 1;
835 }
836
837 sast.sast_stack.ss_sp = (void *)(intptr_t)_UC_MACHINE_SP(l_ctx);
838 sast.sast_stack.ss_size = 1;
839
840 if (SPLAY_FIND(sasttree, &sa->sa_stackstree, &sast)) {
841 DPRINTFN(10,("sa_pagefault(%d.%d) upcall page fault\n",
842 p->p_pid, l->l_lid));
843 return 1;
844 }
845
846 vp->savp_ofaultaddr = vp->savp_faultaddr;
847 return 0;
848 }
849
850
851 /*
852 * Called by tsleep(). Block current LWP and switch to another.
853 *
854 * WE ARE NOT ALLOWED TO SLEEP HERE! WE ARE CALLED FROM WITHIN
855 * TSLEEP() ITSELF! We are called with sched_lock held, and must
856 * hold it right through the mi_switch() call.
857 */
858
859 void
860 sa_switch(struct lwp *l, struct sadata_upcall *sau, int type)
861 {
862 struct proc *p = l->l_proc;
863 struct sadata_vp *vp = l->l_savp;
864 struct lwp *l2;
865 struct sadata_upcall *freesau = NULL;
866 int s;
867
868 DPRINTFN(4,("sa_switch(%d.%d type %d VP %d)\n", p->p_pid, l->l_lid,
869 type, vp->savp_lwp ? vp->savp_lwp->l_lid : 0));
870
871 SCHED_ASSERT_LOCKED();
872
873 if (p->p_flag & P_WEXIT) {
874 mi_switch(l, NULL);
875 sadata_upcall_free(sau);
876 return;
877 }
878
879 if (l->l_flag & L_SA_YIELD) {
880
881 /*
882 * Case 0: we're blocking in sa_yield
883 */
884 if (vp->savp_wokenq_head == NULL && p->p_userret == NULL) {
885 l->l_flag |= L_SA_IDLE;
886 mi_switch(l, NULL);
887 } else {
888 /* make us running again. */
889 unsleep(l);
890 l->l_stat = LSONPROC;
891 l->l_proc->p_nrlwps++;
892 s = splsched();
893 SCHED_UNLOCK(s);
894 }
895 sadata_upcall_free(sau);
896 return;
897 } else if (vp->savp_lwp == l) {
898 /*
899 * Case 1: we're blocking for the first time; generate
900 * a SA_BLOCKED upcall and allocate resources for the
901 * UNBLOCKED upcall.
902 */
903
904 if (sau == NULL) {
905 #ifdef DIAGNOSTIC
906 printf("sa_switch(%d.%d): no upcall data.\n",
907 p->p_pid, l->l_lid);
908 #endif
909 mi_switch(l, NULL);
910 return;
911 }
912
913 /*
914 * The process of allocating a new LWP could cause
915 * sleeps. We're called from inside sleep, so that
916 * would be Bad. Therefore, we must use a cached new
917 * LWP. The first thing that this new LWP must do is
918 * allocate another LWP for the cache. */
919 l2 = sa_getcachelwp(vp);
920 if (l2 == NULL) {
921 /* XXXSMP */
922 /* No upcall for you! */
923 /* XXX The consequences of this are more subtle and
924 * XXX the recovery from this situation deserves
925 * XXX more thought.
926 */
927
928 /* XXXUPSXXX Should only happen with concurrency > 1 */
929 #ifdef DIAGNOSTIC
930 printf("sa_switch(%d.%d): no cached LWP for upcall.\n",
931 p->p_pid, l->l_lid);
932 #endif
933 mi_switch(l, NULL);
934 sadata_upcall_free(sau);
935 return;
936 }
937
938 cpu_setfunc(l2, sa_switchcall, sau);
939 sa_upcall0(sau, SA_UPCALL_BLOCKED, l, NULL, 0, NULL, NULL);
940
941 /*
942 * Perform the double/upcall pagefault check.
943 * We do this only here since we need l's ucontext to
944 * get l's userspace stack. sa_upcall0 above has saved
945 * it for us.
946 * The L_SA_PAGEFAULT flag is set in the MD
947 * pagefault code to indicate a pagefault. The MD
948 * pagefault code also saves the faultaddr for us.
949 */
950 if ((l->l_flag & L_SA_PAGEFAULT) && sa_pagefault(l,
951 &sau->sau_event.ss_captured.ss_ctx) != 0) {
952 cpu_setfunc(l2, sa_switchcall, NULL);
953 sa_putcachelwp(p, l2); /* PHOLD from sa_getcachelwp */
954 mi_switch(l, NULL);
955 sadata_upcall_free(sau);
956 DPRINTFN(10,("sa_switch(%d.%d) page fault resolved\n",
957 p->p_pid, l->l_lid));
958 if (vp->savp_faultaddr == vp->savp_ofaultaddr)
959 vp->savp_ofaultaddr = -1;
960 return;
961 }
962
963 DPRINTFN(8,("sa_switch(%d.%d) blocked upcall %d\n",
964 p->p_pid, l->l_lid, l2->l_lid));
965
966 l->l_flag |= L_SA_BLOCKING;
967 l2->l_priority = l2->l_usrpri;
968 vp->savp_blocker = l;
969 vp->savp_lwp = l2;
970 setrunnable(l2);
971 PRELE(l2); /* Remove the artificial hold-count */
972
973 KDASSERT(l2 != l);
974 } else if (vp->savp_lwp != NULL) {
975
976 /*
977 * Case 2: We've been woken up while another LWP was
978 * on the VP, but we're going back to sleep without
979 * having returned to userland and delivering the
980 * SA_UNBLOCKED upcall (select and poll cause this
981 * kind of behavior a lot). We just switch back to the
982 * LWP that had been running and let it have another
983 * go. If the LWP on the VP was idling, don't make it
984 * run again, though.
985 */
986 freesau = sau;
987 if (vp->savp_lwp->l_flag & L_SA_YIELD)
988 l2 = NULL;
989 else {
990 /* XXXUPSXXX Unfair advantage for l2 ? */
991 l2 = vp->savp_lwp;
992 if (l2->l_stat != LSRUN || (l2->l_flag & L_INMEM) == 0)
993 l2 = NULL;
994 }
995 } else {
996 /* NOTREACHED */
997 panic("sa_vp empty");
998 }
999
1000 DPRINTFN(4,("sa_switch(%d.%d) switching to LWP %d.\n",
1001 p->p_pid, l->l_lid, l2 ? l2->l_lid : 0));
1002 mi_switch(l, l2);
1003 sadata_upcall_free(freesau);
1004 DPRINTFN(4,("sa_switch(%d.%d flag %x) returned.\n",
1005 p->p_pid, l->l_lid, l->l_flag));
1006 KDASSERT(l->l_wchan == 0);
1007
1008 SCHED_ASSERT_UNLOCKED();
1009 }
1010
1011 static void
1012 sa_switchcall(void *arg)
1013 {
1014 struct lwp *l, *l2;
1015 struct proc *p;
1016 struct sadata_vp *vp;
1017 struct sadata_upcall *sau;
1018 struct sastack *sast;
1019 int s;
1020
1021 l2 = curlwp;
1022 p = l2->l_proc;
1023 vp = l2->l_savp;
1024 sau = arg;
1025
1026 if (p->p_flag & P_WEXIT) {
1027 sadata_upcall_free(sau);
1028 lwp_exit(l2);
1029 }
1030
1031 KDASSERT(vp->savp_lwp == l2);
1032 DPRINTFN(6,("sa_switchcall(%d.%d)\n", p->p_pid, l2->l_lid));
1033
1034 l2->l_flag &= ~L_SA;
1035 if (LIST_EMPTY(&vp->savp_lwpcache)) {
1036 /* Allocate the next cache LWP */
1037 DPRINTFN(6,("sa_switchcall(%d.%d) allocating LWP\n",
1038 p->p_pid, l2->l_lid));
1039 sa_newcachelwp(l2);
1040 }
1041 if (sau) {
1042 l = vp->savp_blocker;
1043 sast = sa_getstack(p->p_sa);
1044 if (sast) {
1045 sau->sau_stack = sast->sast_stack;
1046 SIMPLEQ_INSERT_TAIL(&vp->savp_upcalls, sau, sau_next);
1047 l2->l_flag |= L_SA_UPCALL;
1048 } else {
1049 #ifdef DIAGNOSTIC
1050 printf("sa_switchcall(%d.%d flag %x): Not enough stacks.\n",
1051 p->p_pid, l->l_lid, l->l_flag);
1052 #endif
1053 sadata_upcall_free(sau);
1054 PHOLD(l2);
1055 SCHED_LOCK(s);
1056 sa_putcachelwp(p, l2); /* sets L_SA */
1057 vp->savp_lwp = l;
1058 mi_switch(l2, NULL);
1059 /* mostly NOTREACHED */
1060 SCHED_ASSERT_UNLOCKED();
1061 splx(s);
1062 }
1063 }
1064 l2->l_flag |= L_SA;
1065
1066 upcallret(l2);
1067 }
1068
1069 static int
1070 sa_newcachelwp(struct lwp *l)
1071 {
1072 struct proc *p;
1073 struct lwp *l2;
1074 vaddr_t uaddr;
1075 boolean_t inmem;
1076 int s;
1077
1078 p = l->l_proc;
1079 if (p->p_flag & P_WEXIT)
1080 return (0);
1081
1082 inmem = uvm_uarea_alloc(&uaddr);
1083 if (__predict_false(uaddr == 0)) {
1084 return (ENOMEM);
1085 } else {
1086 newlwp(l, p, uaddr, inmem, 0, NULL, 0, child_return, 0, &l2);
1087 /* We don't want this LWP on the process's main LWP list, but
1088 * newlwp helpfully puts it there. Unclear if newlwp should
1089 * be tweaked.
1090 */
1091 PHOLD(l2);
1092 SCHED_LOCK(s);
1093 l2->l_savp = l->l_savp;
1094 sa_putcachelwp(p, l2);
1095 SCHED_UNLOCK(s);
1096 }
1097
1098 return (0);
1099 }
1100
1101 /*
1102 * Take a normal process LWP and place it in the SA cache.
1103 * LWP must not be running!
1104 */
1105 void
1106 sa_putcachelwp(struct proc *p, struct lwp *l)
1107 {
1108 struct sadata_vp *vp;
1109
1110 SCHED_ASSERT_LOCKED();
1111
1112 vp = l->l_savp;
1113
1114 LIST_REMOVE(l, l_sibling);
1115 p->p_nlwps--;
1116 l->l_stat = LSSUSPENDED;
1117 l->l_flag |= (L_DETACHED | L_SA);
1118 /* XXX lock sadata */
1119 DPRINTFN(5,("sa_putcachelwp(%d.%d) Adding LWP %d to cache\n",
1120 p->p_pid, curlwp->l_lid, l->l_lid));
1121 LIST_INSERT_HEAD(&vp->savp_lwpcache, l, l_sibling);
1122 vp->savp_ncached++;
1123 /* XXX unlock */
1124 }
1125
1126 /*
1127 * Fetch a LWP from the cache.
1128 */
1129 struct lwp *
1130 sa_getcachelwp(struct sadata_vp *vp)
1131 {
1132 struct lwp *l;
1133 struct proc *p;
1134
1135 SCHED_ASSERT_LOCKED();
1136
1137 l = NULL;
1138 /* XXX lock sadata */
1139 if (vp->savp_ncached > 0) {
1140 vp->savp_ncached--;
1141 l = LIST_FIRST(&vp->savp_lwpcache);
1142 LIST_REMOVE(l, l_sibling);
1143 p = l->l_proc;
1144 LIST_INSERT_HEAD(&p->p_lwps, l, l_sibling);
1145 p->p_nlwps++;
1146 DPRINTFN(5,("sa_getcachelwp(%d.%d) Got LWP %d from cache.\n",
1147 p->p_pid, curlwp->l_lid, l->l_lid));
1148 }
1149 /* XXX unlock */
1150 return l;
1151 }
1152
1153
1154 void
1155 sa_unblock_userret(struct lwp *l)
1156 {
1157 struct proc *p;
1158 struct lwp *l2;
1159 struct sadata *sa;
1160 struct sadata_vp *vp;
1161 struct sadata_upcall *sau;
1162 struct sastack *sast;
1163 int f, s;
1164
1165 p = l->l_proc;
1166 sa = p->p_sa;
1167 vp = l->l_savp;
1168
1169 if (p->p_flag & P_WEXIT)
1170 return;
1171
1172 SCHED_ASSERT_UNLOCKED();
1173
1174 KERNEL_PROC_LOCK(l);
1175 SA_LWP_STATE_LOCK(l, f);
1176
1177 DPRINTFN(7,("sa_unblock_userret(%d.%d %x) \n", p->p_pid, l->l_lid,
1178 l->l_flag));
1179
1180 sa_setwoken(l);
1181 /* maybe NOTREACHED */
1182
1183 SCHED_LOCK(s);
1184 if (l != vp->savp_lwp) {
1185 /* Invoke an "unblocked" upcall */
1186 DPRINTFN(8,("sa_unblock_userret(%d.%d) unblocking\n",
1187 p->p_pid, l->l_lid));
1188
1189 l2 = sa_vp_repossess(l);
1190
1191 SCHED_UNLOCK(s);
1192
1193 if (l2 == NULL)
1194 lwp_exit(l);
1195
1196 sast = sa_getstack(sa);
1197 if (p->p_flag & P_WEXIT)
1198 lwp_exit(l);
1199
1200 sau = sadata_upcall_alloc(1);
1201 if (p->p_flag & P_WEXIT) {
1202 sadata_upcall_free(sau);
1203 lwp_exit(l);
1204 }
1205
1206 PHOLD(l2);
1207
1208 KDASSERT(sast != NULL);
1209 DPRINTFN(9,("sa_unblock_userret(%d.%d) using stack %p\n",
1210 l->l_proc->p_pid, l->l_lid, sast->sast_stack.ss_sp));
1211
1212 /*
1213 * Defer saving the event lwp's state because a
1214 * PREEMPT upcall could be on the queue already.
1215 */
1216 sa_upcall0(sau, SA_UPCALL_UNBLOCKED | SA_UPCALL_DEFER_EVENT,
1217 l, l2, 0, NULL, NULL);
1218 sau->sau_stack = sast->sast_stack;
1219
1220 SCHED_LOCK(s);
1221 SIMPLEQ_INSERT_TAIL(&vp->savp_upcalls, sau, sau_next);
1222 l->l_flag |= L_SA_UPCALL;
1223 l->l_flag &= ~L_SA_BLOCKING;
1224 sa_putcachelwp(p, l2);
1225 }
1226 SCHED_UNLOCK(s);
1227
1228 SA_LWP_STATE_UNLOCK(l, f);
1229 KERNEL_PROC_UNLOCK(l);
1230 }
1231
1232 void
1233 sa_upcall_userret(struct lwp *l)
1234 {
1235 struct lwp *l2;
1236 struct proc *p;
1237 struct sadata *sa;
1238 struct sadata_vp *vp;
1239 struct sadata_upcall *sau;
1240 struct sastack *sast;
1241 int f, s;
1242
1243 p = l->l_proc;
1244 sa = p->p_sa;
1245 vp = l->l_savp;
1246
1247 SCHED_ASSERT_UNLOCKED();
1248
1249 KERNEL_PROC_LOCK(l);
1250 SA_LWP_STATE_LOCK(l, f);
1251
1252 DPRINTFN(7,("sa_upcall_userret(%d.%d %x) \n", p->p_pid, l->l_lid,
1253 l->l_flag));
1254
1255 KDASSERT((l->l_flag & L_SA_BLOCKING) == 0);
1256
1257 sast = NULL;
1258 if (SIMPLEQ_EMPTY(&vp->savp_upcalls) && vp->savp_wokenq_head != NULL)
1259 sast = sa_getstack(sa);
1260 SCHED_LOCK(s);
1261 if (SIMPLEQ_EMPTY(&vp->savp_upcalls) && vp->savp_wokenq_head != NULL &&
1262 sast != NULL) {
1263 /* Invoke an "unblocked" upcall */
1264 l2 = vp->savp_wokenq_head;
1265 vp->savp_wokenq_head = l2->l_forw;
1266
1267 DPRINTFN(9,("sa_upcall_userret(%d.%d) using stack %p\n",
1268 l->l_proc->p_pid, l->l_lid, sast->sast_stack.ss_sp));
1269
1270 SCHED_UNLOCK(s);
1271
1272 if (p->p_flag & P_WEXIT)
1273 lwp_exit(l);
1274
1275 DPRINTFN(8,("sa_upcall_userret(%d.%d) unblocking %d\n",
1276 p->p_pid, l->l_lid, l2->l_lid));
1277
1278 sau = sadata_upcall_alloc(1);
1279 if (p->p_flag & P_WEXIT) {
1280 sadata_upcall_free(sau);
1281 lwp_exit(l);
1282 }
1283
1284 sa_upcall0(sau, SA_UPCALL_UNBLOCKED, l2, l, 0, NULL, NULL);
1285 sau->sau_stack = sast->sast_stack;
1286
1287 SIMPLEQ_INSERT_TAIL(&vp->savp_upcalls, sau, sau_next);
1288
1289 l2->l_flag &= ~L_SA_BLOCKING;
1290 SCHED_LOCK(s);
1291 sa_putcachelwp(p, l2); /* PHOLD from sa_setwoken */
1292 SCHED_UNLOCK(s);
1293 } else {
1294 SCHED_UNLOCK(s);
1295 if (sast)
1296 sa_setstackfree(sast, sa);
1297 }
1298
1299 KDASSERT(vp->savp_lwp == l);
1300
1301 while (!SIMPLEQ_EMPTY(&vp->savp_upcalls))
1302 sa_makeupcalls(l);
1303
1304 if (vp->savp_wokenq_head == NULL)
1305 l->l_flag &= ~L_SA_UPCALL;
1306
1307 SA_LWP_STATE_UNLOCK(l, f);
1308 KERNEL_PROC_UNLOCK(l);
1309 return;
1310 }
1311
1312 static __inline void
1313 sa_makeupcalls(struct lwp *l)
1314 {
1315 struct lwp *l2, *eventq;
1316 struct proc *p;
1317 struct sadata *sa;
1318 struct sadata_vp *vp;
1319 struct sa_t **sapp, *sap;
1320 struct sa_t self_sa;
1321 struct sa_t *sas[3], *sasp;
1322 struct sadata_upcall *sau;
1323 union sau_state e_ss;
1324 void *stack, *ap;
1325 ucontext_t u, *up;
1326 size_t sz;
1327 int i, nint, nevents, s, type;
1328
1329 p = l->l_proc;
1330 sa = p->p_sa;
1331 vp = l->l_savp;
1332
1333 sau = SIMPLEQ_FIRST(&vp->savp_upcalls);
1334 SIMPLEQ_REMOVE_HEAD(&vp->savp_upcalls, sau_next);
1335
1336 if (sau->sau_flags & SAU_FLAG_DEFERRED_EVENT)
1337 sa_upcall_getstate(&sau->sau_event,
1338 sau->sau_event.ss_deferred.ss_lwp);
1339 if (sau->sau_flags & SAU_FLAG_DEFERRED_INTERRUPTED)
1340 sa_upcall_getstate(&sau->sau_interrupted,
1341 sau->sau_interrupted.ss_deferred.ss_lwp);
1342
1343 #ifdef __MACHINE_STACK_GROWS_UP
1344 stack = sau->sau_stack.ss_sp;
1345 #else
1346 stack = (caddr_t)sau->sau_stack.ss_sp + sau->sau_stack.ss_size;
1347 #endif
1348 stack = STACK_ALIGN(stack, ALIGNBYTES);
1349
1350 self_sa.sa_id = l->l_lid;
1351 self_sa.sa_cpu = vp->savp_id;
1352 sas[0] = &self_sa;
1353 nevents = 0;
1354 nint = 0;
1355 if (sau->sau_event.ss_captured.ss_sa.sa_context != NULL) {
1356 if (copyout(&sau->sau_event.ss_captured.ss_ctx,
1357 sau->sau_event.ss_captured.ss_sa.sa_context,
1358 sizeof(ucontext_t)) != 0) {
1359 #ifdef DIAGNOSTIC
1360 printf("sa_makeupcalls(%d.%d): couldn't copyout"
1361 " context of event LWP %d\n",
1362 p->p_pid, l->l_lid,
1363 sau->sau_event.ss_captured.ss_sa.sa_id);
1364 #endif
1365 sigexit(l, SIGILL);
1366 /* NOTREACHED */
1367 }
1368 sas[1] = &sau->sau_event.ss_captured.ss_sa;
1369 nevents = 1;
1370 }
1371 if (sau->sau_interrupted.ss_captured.ss_sa.sa_context != NULL) {
1372 KDASSERT(sau->sau_interrupted.ss_captured.ss_sa.sa_context !=
1373 sau->sau_event.ss_captured.ss_sa.sa_context);
1374 if (copyout(&sau->sau_interrupted.ss_captured.ss_ctx,
1375 sau->sau_interrupted.ss_captured.ss_sa.sa_context,
1376 sizeof(ucontext_t)) != 0) {
1377 #ifdef DIAGNOSTIC
1378 printf("sa_makeupcalls(%d.%d): couldn't copyout"
1379 " context of interrupted LWP %d\n",
1380 p->p_pid, l->l_lid,
1381 sau->sau_interrupted.ss_captured.ss_sa.sa_id);
1382 #endif
1383 sigexit(l, SIGILL);
1384 /* NOTREACHED */
1385 }
1386 sas[2] = &sau->sau_interrupted.ss_captured.ss_sa;
1387 nint = 1;
1388 }
1389 eventq = NULL;
1390 if (sau->sau_type == SA_UPCALL_UNBLOCKED) {
1391 SCHED_LOCK(s);
1392 eventq = vp->savp_wokenq_head;
1393 vp->savp_wokenq_head = NULL;
1394 SCHED_UNLOCK(s);
1395 l2 = eventq;
1396 while (l2 != NULL) {
1397 nevents++;
1398 l2 = l2->l_forw;
1399 }
1400 }
1401
1402 /* Copy out the activation's ucontext */
1403 u.uc_stack = sau->sau_stack;
1404 u.uc_flags = _UC_STACK;
1405
1406 up = (void *)STACK_ALLOC(stack, sizeof(ucontext_t));
1407 stack = STACK_GROW(stack, sizeof(ucontext_t));
1408
1409 if (copyout(&u, up, sizeof(ucontext_t)) != 0) {
1410 sadata_upcall_free(sau);
1411 #ifdef DIAGNOSTIC
1412 printf("sa_makeupcalls: couldn't copyout activation"
1413 " ucontext for %d.%d to %p\n", l->l_proc->p_pid, l->l_lid,
1414 up);
1415 #endif
1416 sigexit(l, SIGILL);
1417 /* NOTREACHED */
1418 }
1419 sas[0]->sa_context = up;
1420
1421 /* Next, copy out the sa_t's and pointers to them. */
1422
1423 sz = (1 + nevents + nint) * sizeof(struct sa_t);
1424 sap = (void *)STACK_ALLOC(stack, sz);
1425 sap += 1 + nevents + nint;
1426 stack = STACK_GROW(stack, sz);
1427
1428 sz = (1 + nevents + nint) * sizeof(struct sa_t *);
1429 sapp = (void *)STACK_ALLOC(stack, sz);
1430 sapp += 1 + nevents + nint;
1431 stack = STACK_GROW(stack, sz);
1432
1433 KDASSERT(nint <= 1);
1434 for (i = nevents + nint; i >= 0; i--) {
1435 sap--;
1436 sapp--;
1437 if (i == 1 + nevents) /* interrupted sa */
1438 sasp = sas[2];
1439 else if (i <= 1) /* self_sa and event sa */
1440 sasp = sas[i];
1441 else { /* extra sas */
1442 KDASSERT(sau->sau_type == SA_UPCALL_UNBLOCKED);
1443 KDASSERT(eventq != NULL);
1444 l2 = eventq;
1445 KDASSERT(l2 != NULL);
1446 eventq = l2->l_forw;
1447 DPRINTFN(8,("sa_makeupcalls(%d.%d) unblocking extra %d\n",
1448 p->p_pid, l->l_lid, l2->l_lid));
1449 sa_upcall_getstate(&e_ss, l2);
1450 SCHED_LOCK(s);
1451 l2->l_flag &= ~L_SA_BLOCKING;
1452 sa_putcachelwp(p, l2); /* PHOLD from sa_setwoken */
1453 SCHED_UNLOCK(s);
1454
1455 if (copyout(&e_ss.ss_captured.ss_ctx,
1456 e_ss.ss_captured.ss_sa.sa_context,
1457 sizeof(ucontext_t)) != 0) {
1458 #ifdef DIAGNOSTIC
1459 printf("sa_makeupcalls(%d.%d): couldn't copyout"
1460 " context of event LWP %d\n",
1461 p->p_pid, l->l_lid, e_ss.ss_captured.ss_sa.sa_id);
1462 #endif
1463 sigexit(l, SIGILL);
1464 /* NOTREACHED */
1465 }
1466 sasp = &e_ss.ss_captured.ss_sa;
1467 }
1468 if ((copyout(sasp, sap, sizeof(struct sa_t)) != 0) ||
1469 (copyout(&sap, sapp, sizeof(struct sa_t *)) != 0)) {
1470 /* Copying onto the stack didn't work. Die. */
1471 sadata_upcall_free(sau);
1472 #ifdef DIAGNOSTIC
1473 printf("sa_makeupcalls: couldn't copyout sa_t "
1474 "%d for %d.%d\n", i, p->p_pid, l->l_lid);
1475 #endif
1476 sigexit(l, SIGILL);
1477 /* NOTREACHED */
1478 }
1479 }
1480 KDASSERT(eventq == NULL);
1481
1482 /* Copy out the arg, if any */
1483 /* xxx assume alignment works out; everything so far has been
1484 * a structure, so...
1485 */
1486 if (sau->sau_arg) {
1487 ap = STACK_ALLOC(stack, sau->sau_argsize);
1488 stack = STACK_GROW(stack, sau->sau_argsize);
1489 if (copyout(sau->sau_arg, ap, sau->sau_argsize) != 0) {
1490 /* Copying onto the stack didn't work. Die. */
1491 sadata_upcall_free(sau);
1492 #ifdef DIAGNOSTIC
1493 printf("sa_makeupcalls(%d.%d): couldn't copyout"
1494 " sadata_upcall arg %p size %ld to %p \n",
1495 p->p_pid, l->l_lid,
1496 sau->sau_arg, (long) sau->sau_argsize, ap);
1497 #endif
1498 sigexit(l, SIGILL);
1499 /* NOTREACHED */
1500 }
1501 } else {
1502 ap = NULL;
1503 #ifdef __hppa__
1504 stack = STACK_ALIGN(stack, HPPA_FRAME_SIZE);
1505 #endif
1506 }
1507 type = sau->sau_type;
1508
1509 sadata_upcall_free(sau);
1510
1511 DPRINTFN(7,("sa_makeupcalls(%d.%d): type %d\n", p->p_pid,
1512 l->l_lid, type));
1513
1514 cpu_upcall(l, type, nevents, nint, sapp, ap, stack, sa->sa_upcall);
1515
1516 l->l_flag &= ~L_SA_YIELD;
1517 }
1518
1519 static void
1520 sa_setwoken(struct lwp *l)
1521 {
1522 struct lwp *l2, *vp_lwp;
1523 struct proc *p;
1524 struct sadata *sa;
1525 struct sadata_vp *vp;
1526 int s;
1527
1528 SCHED_LOCK(s);
1529
1530 if ((l->l_flag & L_SA_BLOCKING) == 0) {
1531 SCHED_UNLOCK(s);
1532 return;
1533 }
1534
1535 p = l->l_proc;
1536 sa = p->p_sa;
1537 vp = l->l_savp;
1538 vp_lwp = vp->savp_lwp;
1539 l2 = NULL;
1540
1541 KDASSERT(vp_lwp != NULL);
1542 DPRINTFN(3,("sa_setwoken(%d.%d) woken, flags %x, vp %d\n",
1543 l->l_proc->p_pid, l->l_lid, l->l_flag,
1544 vp_lwp->l_lid));
1545
1546 #if notyet
1547 if (vp_lwp->l_flag & L_SA_IDLE) {
1548 KDASSERT((vp_lwp->l_flag & L_SA_UPCALL) == 0);
1549 KDASSERT(vp->savp_wokenq_head == NULL);
1550 DPRINTFN(3,("sa_setwoken(%d.%d) repossess: idle vp_lwp %d state %d\n",
1551 l->l_proc->p_pid, l->l_lid,
1552 vp_lwp->l_lid, vp_lwp->l_stat));
1553 vp_lwp->l_flag &= ~L_SA_IDLE;
1554 SCHED_UNLOCK(s);
1555 return;
1556 }
1557 #endif
1558
1559 DPRINTFN(3,("sa_setwoken(%d.%d) put on wokenq: vp_lwp %d state %d\n",
1560 l->l_proc->p_pid, l->l_lid, vp_lwp->l_lid,
1561 vp_lwp->l_stat));
1562
1563 PHOLD(l);
1564 if (vp->savp_wokenq_head == NULL)
1565 vp->savp_wokenq_head = l;
1566 else
1567 *vp->savp_wokenq_tailp = l;
1568 *(vp->savp_wokenq_tailp = &l->l_forw) = NULL;
1569
1570 switch (vp_lwp->l_stat) {
1571 case LSONPROC:
1572 if (vp_lwp->l_flag & L_SA_UPCALL)
1573 break;
1574 vp_lwp->l_flag |= L_SA_UPCALL;
1575 if (vp_lwp->l_flag & L_SA_YIELD)
1576 break;
1577 /* XXX IPI vp_lwp->l_cpu */
1578 break;
1579 case LSSLEEP:
1580 if (vp_lwp->l_flag & L_SA_IDLE) {
1581 vp_lwp->l_flag &= ~L_SA_IDLE;
1582 vp_lwp->l_flag |= L_SA_UPCALL;
1583 setrunnable(vp_lwp);
1584 break;
1585 }
1586 vp_lwp->l_flag |= L_SA_UPCALL;
1587 break;
1588 case LSSUSPENDED:
1589 #ifdef DIAGNOSTIC
1590 printf("sa_setwoken(%d.%d) vp lwp %d LSSUSPENDED\n",
1591 l->l_proc->p_pid, l->l_lid, vp_lwp->l_lid);
1592 #endif
1593 break;
1594 case LSSTOP:
1595 vp_lwp->l_flag |= L_SA_UPCALL;
1596 break;
1597 case LSRUN:
1598 if (vp_lwp->l_flag & L_SA_UPCALL)
1599 break;
1600 vp_lwp->l_flag |= L_SA_UPCALL;
1601 if (vp_lwp->l_flag & L_SA_YIELD)
1602 break;
1603 if (vp_lwp->l_slptime > 1) {
1604 void updatepri(struct lwp *);
1605 updatepri(vp_lwp);
1606 }
1607 vp_lwp->l_slptime = 0;
1608 if (vp_lwp->l_flag & L_INMEM) {
1609 if (vp_lwp->l_cpu == curcpu())
1610 l2 = vp_lwp;
1611 else
1612 need_resched(vp_lwp->l_cpu);
1613 } else
1614 sched_wakeup(&proc0);
1615 break;
1616 default:
1617 panic("sa_vp LWP not sleeping/onproc/runnable");
1618 }
1619
1620 l->l_stat = LSSUSPENDED;
1621 p->p_nrlwps--;
1622 mi_switch(l, l2);
1623 /* maybe NOTREACHED */
1624 SCHED_ASSERT_UNLOCKED();
1625 splx(s);
1626 if (p->p_flag & P_WEXIT)
1627 lwp_exit(l);
1628 }
1629
1630 static struct lwp *
1631 sa_vp_repossess(struct lwp *l)
1632 {
1633 struct lwp *l2;
1634 struct proc *p = l->l_proc;
1635 struct sadata_vp *vp = l->l_savp;
1636
1637 SCHED_ASSERT_LOCKED();
1638
1639 /*
1640 * Put ourselves on the virtual processor and note that the
1641 * previous occupant of that position was interrupted.
1642 */
1643 l2 = vp->savp_lwp;
1644 vp->savp_lwp = l;
1645 if (l2->l_flag & L_SA_YIELD)
1646 l2->l_flag &= ~(L_SA_YIELD|L_SA_IDLE);
1647
1648 DPRINTFN(1,("sa_vp_repossess(%d.%d) vp lwp %d state %d\n",
1649 p->p_pid, l->l_lid, l2->l_lid, l2->l_stat));
1650
1651 KDASSERT(l2 != l);
1652 if (l2) {
1653 switch (l2->l_stat) {
1654 case LSRUN:
1655 remrunqueue(l2);
1656 p->p_nrlwps--;
1657 break;
1658 case LSSLEEP:
1659 unsleep(l2);
1660 l2->l_flag &= ~L_SINTR;
1661 break;
1662 case LSSUSPENDED:
1663 #ifdef DIAGNOSTIC
1664 printf("sa_vp_repossess(%d.%d) vp lwp %d LSSUSPENDED\n",
1665 l->l_proc->p_pid, l->l_lid, l2->l_lid);
1666 #endif
1667 break;
1668 #ifdef DIAGNOSTIC
1669 default:
1670 panic("SA VP %d.%d is in state %d, not running"
1671 " or sleeping\n", p->p_pid, l2->l_lid,
1672 l2->l_stat);
1673 #endif
1674 }
1675 l2->l_stat = LSSUSPENDED;
1676 }
1677 return l2;
1678 }
1679
1680
1681
1682 #ifdef DEBUG
1683 int debug_print_sa(struct proc *);
1684 int debug_print_lwp(struct lwp *);
1685 int debug_print_proc(int);
1686
1687 int
1688 debug_print_proc(int pid)
1689 {
1690 struct proc *p;
1691
1692 p = pfind(pid);
1693 if (p == NULL)
1694 printf("No process %d\n", pid);
1695 else
1696 debug_print_sa(p);
1697
1698 return 0;
1699 }
1700
1701 int
1702 debug_print_sa(struct proc *p)
1703 {
1704 struct lwp *l;
1705 struct sadata *sa;
1706 struct sadata_vp *vp;
1707
1708 printf("Process %d (%s), state %d, address %p, flags %x\n",
1709 p->p_pid, p->p_comm, p->p_stat, p, p->p_flag);
1710 printf("LWPs: %d (%d running, %d zombies)\n",
1711 p->p_nlwps, p->p_nrlwps, p->p_nzlwps);
1712 LIST_FOREACH(l, &p->p_lwps, l_sibling)
1713 debug_print_lwp(l);
1714 sa = p->p_sa;
1715 if (sa) {
1716 SLIST_FOREACH(vp, &sa->sa_vps, savp_next) {
1717 if (vp->savp_lwp)
1718 printf("SA VP: %d %s\n", vp->savp_lwp->l_lid,
1719 vp->savp_lwp->l_flag & L_SA_YIELD ?
1720 (vp->savp_lwp->l_flag & L_SA_IDLE ?
1721 "idle" : "yielding") : "");
1722 printf("SAs: %d cached LWPs\n", vp->savp_ncached);
1723 LIST_FOREACH(l, &vp->savp_lwpcache, l_sibling)
1724 debug_print_lwp(l);
1725 }
1726 }
1727
1728 return 0;
1729 }
1730
1731 int
1732 debug_print_lwp(struct lwp *l)
1733 {
1734
1735 printf("LWP %d address %p ", l->l_lid, l);
1736 printf("state %d flags %x ", l->l_stat, l->l_flag);
1737 if (l->l_wchan)
1738 printf("wait %p %s", l->l_wchan, l->l_wmesg);
1739 printf("\n");
1740
1741 return 0;
1742 }
1743
1744 #endif
Cache object: ae69e2347cde2005bf58d69f93c2c95e
|