FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_kse.c
1 /*
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/5.3/sys/kern/kern_kse.c 136588 2004-10-16 08:43:07Z cvs2svn $");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/ptrace.h>
39 #include <sys/smp.h>
40 #include <sys/sysproto.h>
41 #include <sys/sched.h>
42 #include <sys/signalvar.h>
43 #include <sys/sleepqueue.h>
44 #include <sys/kse.h>
45 #include <sys/ktr.h>
46 #include <vm/uma.h>
47
48 /*
49 * KSEGRP related storage.
50 */
51 static uma_zone_t upcall_zone;
52
53 /* DEBUG ONLY */
54 extern int virtual_cpu;
55 extern int thread_debug;
56
57 extern int max_threads_per_proc;
58 extern int max_groups_per_proc;
59 extern int max_threads_hits;
60 extern struct mtx kse_zombie_lock;
61
62
63 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
64
65 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
66 TAILQ_HEAD_INITIALIZER(zombie_upcalls);
67
68 static int thread_update_usr_ticks(struct thread *td);
69 static void thread_alloc_spare(struct thread *td);
70
71 struct kse_upcall *
72 upcall_alloc(void)
73 {
74 struct kse_upcall *ku;
75
76 ku = uma_zalloc(upcall_zone, M_WAITOK);
77 bzero(ku, sizeof(*ku));
78 return (ku);
79 }
80
81 void
82 upcall_free(struct kse_upcall *ku)
83 {
84
85 uma_zfree(upcall_zone, ku);
86 }
87
88 void
89 upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
90 {
91
92 mtx_assert(&sched_lock, MA_OWNED);
93 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
94 ku->ku_ksegrp = kg;
95 kg->kg_numupcalls++;
96 }
97
98 void
99 upcall_unlink(struct kse_upcall *ku)
100 {
101 struct ksegrp *kg = ku->ku_ksegrp;
102
103 mtx_assert(&sched_lock, MA_OWNED);
104 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
105 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
106 kg->kg_numupcalls--;
107 upcall_stash(ku);
108 }
109
110 void
111 upcall_remove(struct thread *td)
112 {
113
114 if (td->td_upcall) {
115 td->td_upcall->ku_owner = NULL;
116 upcall_unlink(td->td_upcall);
117 td->td_upcall = 0;
118 }
119 }
120
121 #ifndef _SYS_SYSPROTO_H_
122 struct kse_switchin_args {
123 struct kse_thr_mailbox *tmbx;
124 int flags;
125 };
126 #endif
127
128 int
129 kse_switchin(struct thread *td, struct kse_switchin_args *uap)
130 {
131 struct kse_thr_mailbox tmbx;
132 struct kse_upcall *ku;
133 int error;
134
135 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
136 return (EINVAL);
137 error = (uap->tmbx == NULL) ? EINVAL : 0;
138 if (!error)
139 error = copyin(uap->tmbx, &tmbx, sizeof(tmbx));
140 if (!error && (uap->flags & KSE_SWITCHIN_SETTMBX))
141 error = (suword(&ku->ku_mailbox->km_curthread,
142 (long)uap->tmbx) != 0 ? EINVAL : 0);
143 if (!error)
144 error = set_mcontext(td, &tmbx.tm_context.uc_mcontext);
145 if (!error) {
146 suword32(&uap->tmbx->tm_lwp, td->td_tid);
147 if (uap->flags & KSE_SWITCHIN_SETTMBX) {
148 td->td_mailbox = uap->tmbx;
149 td->td_pflags |= TDP_CAN_UNBIND;
150 }
151 if (td->td_proc->p_flag & P_TRACED) {
152 if (tmbx.tm_dflags & TMDF_SSTEP)
153 ptrace_single_step(td);
154 else
155 ptrace_clear_single_step(td);
156 if (tmbx.tm_dflags & TMDF_SUSPEND) {
157 mtx_lock_spin(&sched_lock);
158 /* fuword can block, check again */
159 if (td->td_upcall)
160 ku->ku_flags |= KUF_DOUPCALL;
161 mtx_unlock_spin(&sched_lock);
162 }
163 }
164 }
165 return ((error == 0) ? EJUSTRETURN : error);
166 }
167
168 /*
169 struct kse_thr_interrupt_args {
170 struct kse_thr_mailbox * tmbx;
171 int cmd;
172 long data;
173 };
174 */
175 int
176 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
177 {
178 struct proc *p;
179 struct thread *td2;
180 struct kse_upcall *ku;
181 struct kse_thr_mailbox *tmbx;
182 uint32_t flags;
183
184 p = td->td_proc;
185
186 if (!(p->p_flag & P_SA))
187 return (EINVAL);
188
189 switch (uap->cmd) {
190 case KSE_INTR_SENDSIG:
191 if (uap->data < 0 || uap->data > _SIG_MAXSIG)
192 return (EINVAL);
193 case KSE_INTR_INTERRUPT:
194 case KSE_INTR_RESTART:
195 PROC_LOCK(p);
196 mtx_lock_spin(&sched_lock);
197 FOREACH_THREAD_IN_PROC(p, td2) {
198 if (td2->td_mailbox == uap->tmbx)
199 break;
200 }
201 if (td2 == NULL) {
202 mtx_unlock_spin(&sched_lock);
203 PROC_UNLOCK(p);
204 return (ESRCH);
205 }
206 if (uap->cmd == KSE_INTR_SENDSIG) {
207 if (uap->data > 0) {
208 td2->td_flags &= ~TDF_INTERRUPT;
209 mtx_unlock_spin(&sched_lock);
210 tdsignal(td2, (int)uap->data, SIGTARGET_TD);
211 } else {
212 mtx_unlock_spin(&sched_lock);
213 }
214 } else {
215 td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING;
216 if (TD_CAN_UNBIND(td2))
217 td2->td_upcall->ku_flags |= KUF_DOUPCALL;
218 if (uap->cmd == KSE_INTR_INTERRUPT)
219 td2->td_intrval = EINTR;
220 else
221 td2->td_intrval = ERESTART;
222 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR))
223 sleepq_abort(td2);
224 mtx_unlock_spin(&sched_lock);
225 }
226 PROC_UNLOCK(p);
227 break;
228 case KSE_INTR_SIGEXIT:
229 if (uap->data < 1 || uap->data > _SIG_MAXSIG)
230 return (EINVAL);
231 PROC_LOCK(p);
232 sigexit(td, (int)uap->data);
233 break;
234
235 case KSE_INTR_DBSUSPEND:
236 /* this sub-function is only for bound thread */
237 if (td->td_pflags & TDP_SA)
238 return (EINVAL);
239 ku = td->td_upcall;
240 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
241 if (tmbx == NULL || tmbx == (void *)-1)
242 return (EINVAL);
243 flags = 0;
244 while ((p->p_flag & P_TRACED) && !(p->p_flag & P_SINGLE_EXIT)) {
245 flags = fuword32(&tmbx->tm_dflags);
246 if (!(flags & TMDF_SUSPEND))
247 break;
248 PROC_LOCK(p);
249 mtx_lock_spin(&sched_lock);
250 thread_stopped(p);
251 thread_suspend_one(td);
252 PROC_UNLOCK(p);
253 mi_switch(SW_VOL, NULL);
254 mtx_unlock_spin(&sched_lock);
255 }
256 return (0);
257
258 default:
259 return (EINVAL);
260 }
261 return (0);
262 }
263
264 /*
265 struct kse_exit_args {
266 register_t dummy;
267 };
268 */
269 int
270 kse_exit(struct thread *td, struct kse_exit_args *uap)
271 {
272 struct proc *p;
273 struct ksegrp *kg;
274 struct kse_upcall *ku, *ku2;
275 int error, count;
276
277 p = td->td_proc;
278 /*
279 * Ensure that this is only called from the UTS
280 */
281 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
282 return (EINVAL);
283
284 kg = td->td_ksegrp;
285 count = 0;
286
287 /*
288 * Calculate the existing non-exiting upcalls in this ksegroup.
289 * If we are the last upcall but there are still other threads,
290 * then do not exit. We need the other threads to be able to
291 * complete whatever they are doing.
292 * XXX This relies on the userland knowing what to do if we return.
293 * It may be a better choice to convert ourselves into a kse_release
294 * ( or similar) and wait in the kernel to be needed.
295 */
296 PROC_LOCK(p);
297 mtx_lock_spin(&sched_lock);
298 FOREACH_UPCALL_IN_GROUP(kg, ku2) {
299 if (ku2->ku_flags & KUF_EXITING)
300 count++;
301 }
302 if ((kg->kg_numupcalls - count) == 1 &&
303 (kg->kg_numthreads > 1)) {
304 mtx_unlock_spin(&sched_lock);
305 PROC_UNLOCK(p);
306 return (EDEADLK);
307 }
308 ku->ku_flags |= KUF_EXITING;
309 mtx_unlock_spin(&sched_lock);
310 PROC_UNLOCK(p);
311
312 /*
313 * Mark the UTS mailbox as having been finished with.
314 * If that fails then just go for a segfault.
315 * XXX need to check it that can be deliverred without a mailbox.
316 */
317 error = suword32(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
318 if (!(td->td_pflags & TDP_SA))
319 if (suword32(&td->td_mailbox->tm_lwp, 0))
320 error = EFAULT;
321 PROC_LOCK(p);
322 if (error)
323 psignal(p, SIGSEGV);
324 mtx_lock_spin(&sched_lock);
325 upcall_remove(td);
326 if (p->p_numthreads != 1) {
327 /*
328 * If we are not the last thread, but we are the last
329 * thread in this ksegrp, then by definition this is not
330 * the last group and we need to clean it up as well.
331 * thread_exit will clean up the kseg as needed.
332 */
333 thread_stopped(p);
334 thread_exit();
335 /* NOTREACHED */
336 }
337 /*
338 * This is the last thread. Just return to the user.
339 * We know that there is only one ksegrp too, as any others
340 * would have been discarded in previous calls to thread_exit().
341 * Effectively we have left threading mode..
342 * The only real thing left to do is ensure that the
343 * scheduler sets out concurrency back to 1 as that may be a
344 * resource leak otherwise.
345 * This is an A[PB]I issue.. what SHOULD we do?
346 * One possibility is to return to the user. It may not cope well.
347 * The other possibility would be to let the process exit.
348 */
349 thread_unthread(td);
350 mtx_unlock_spin(&sched_lock);
351 PROC_UNLOCK(p);
352 #if 1
353 return (0);
354 #else
355 exit1(td, 0);
356 #endif
357 }
358
359 /*
360 * Either becomes an upcall or waits for an awakening event and
361 * then becomes an upcall. Only error cases return.
362 */
363 /*
364 struct kse_release_args {
365 struct timespec *timeout;
366 };
367 */
368 int
369 kse_release(struct thread *td, struct kse_release_args *uap)
370 {
371 struct proc *p;
372 struct ksegrp *kg;
373 struct kse_upcall *ku;
374 struct timespec timeout;
375 struct timeval tv;
376 sigset_t sigset;
377 int error;
378
379 p = td->td_proc;
380 kg = td->td_ksegrp;
381 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
382 return (EINVAL);
383 if (uap->timeout != NULL) {
384 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
385 return (error);
386 TIMESPEC_TO_TIMEVAL(&tv, &timeout);
387 }
388 if (td->td_pflags & TDP_SA)
389 td->td_pflags |= TDP_UPCALLING;
390 else {
391 ku->ku_mflags = fuword32(&ku->ku_mailbox->km_flags);
392 if (ku->ku_mflags == -1) {
393 PROC_LOCK(p);
394 sigexit(td, SIGSEGV);
395 }
396 }
397 PROC_LOCK(p);
398 if (ku->ku_mflags & KMF_WAITSIGEVENT) {
399 /* UTS wants to wait for signal event */
400 if (!(p->p_flag & P_SIGEVENT) &&
401 !(ku->ku_flags & KUF_DOUPCALL)) {
402 td->td_kflags |= TDK_KSERELSIG;
403 error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH,
404 "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0));
405 td->td_kflags &= ~(TDK_KSERELSIG | TDK_WAKEUP);
406 }
407 p->p_flag &= ~P_SIGEVENT;
408 sigset = p->p_siglist;
409 PROC_UNLOCK(p);
410 error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught,
411 sizeof(sigset));
412 } else {
413 if ((ku->ku_flags & KUF_DOUPCALL) == 0 &&
414 ((ku->ku_mflags & KMF_NOCOMPLETED) ||
415 (kg->kg_completed == NULL))) {
416 kg->kg_upsleeps++;
417 td->td_kflags |= TDK_KSEREL;
418 error = msleep(&kg->kg_completed, &p->p_mtx,
419 PPAUSE|PCATCH, "kserel",
420 (uap->timeout ? tvtohz(&tv) : 0));
421 td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP);
422 kg->kg_upsleeps--;
423 }
424 PROC_UNLOCK(p);
425 }
426 if (ku->ku_flags & KUF_DOUPCALL) {
427 mtx_lock_spin(&sched_lock);
428 ku->ku_flags &= ~KUF_DOUPCALL;
429 mtx_unlock_spin(&sched_lock);
430 }
431 return (0);
432 }
433
434 /* struct kse_wakeup_args {
435 struct kse_mailbox *mbx;
436 }; */
437 int
438 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
439 {
440 struct proc *p;
441 struct ksegrp *kg;
442 struct kse_upcall *ku;
443 struct thread *td2;
444
445 p = td->td_proc;
446 td2 = NULL;
447 ku = NULL;
448 /* KSE-enabled processes only, please. */
449 if (!(p->p_flag & P_SA))
450 return (EINVAL);
451 PROC_LOCK(p);
452 mtx_lock_spin(&sched_lock);
453 if (uap->mbx) {
454 FOREACH_KSEGRP_IN_PROC(p, kg) {
455 FOREACH_UPCALL_IN_GROUP(kg, ku) {
456 if (ku->ku_mailbox == uap->mbx)
457 break;
458 }
459 if (ku)
460 break;
461 }
462 } else {
463 kg = td->td_ksegrp;
464 if (kg->kg_upsleeps) {
465 mtx_unlock_spin(&sched_lock);
466 wakeup(&kg->kg_completed);
467 PROC_UNLOCK(p);
468 return (0);
469 }
470 ku = TAILQ_FIRST(&kg->kg_upcalls);
471 }
472 if (ku == NULL) {
473 mtx_unlock_spin(&sched_lock);
474 PROC_UNLOCK(p);
475 return (ESRCH);
476 }
477 if ((td2 = ku->ku_owner) == NULL) {
478 mtx_unlock_spin(&sched_lock);
479 panic("%s: no owner", __func__);
480 } else if (td2->td_kflags & (TDK_KSEREL | TDK_KSERELSIG)) {
481 mtx_unlock_spin(&sched_lock);
482 if (!(td2->td_kflags & TDK_WAKEUP)) {
483 td2->td_kflags |= TDK_WAKEUP;
484 if (td2->td_kflags & TDK_KSEREL)
485 sleepq_remove(td2, &kg->kg_completed);
486 else
487 sleepq_remove(td2, &p->p_siglist);
488 }
489 } else {
490 ku->ku_flags |= KUF_DOUPCALL;
491 mtx_unlock_spin(&sched_lock);
492 }
493 PROC_UNLOCK(p);
494 return (0);
495 }
496
497 /*
498 * No new KSEG: first call: use current KSE, don't schedule an upcall
499 * All other situations, do allocate max new KSEs and schedule an upcall.
500 *
501 * XXX should be changed so that 'first' behaviour lasts for as long
502 * as you have not made a kse in this ksegrp. i.e. as long as we do not have
503 * a mailbox..
504 */
505 /* struct kse_create_args {
506 struct kse_mailbox *mbx;
507 int newgroup;
508 }; */
509 int
510 kse_create(struct thread *td, struct kse_create_args *uap)
511 {
512 struct ksegrp *newkg;
513 struct ksegrp *kg;
514 struct proc *p;
515 struct kse_mailbox mbx;
516 struct kse_upcall *newku;
517 int err, ncpus, sa = 0, first = 0;
518 struct thread *newtd;
519
520 p = td->td_proc;
521 kg = td->td_ksegrp;
522 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
523 return (err);
524
525 ncpus = mp_ncpus;
526 if (virtual_cpu != 0)
527 ncpus = virtual_cpu;
528 /*
529 * If the new UTS mailbox says that this
530 * will be a BOUND lwp, then it had better
531 * have its thread mailbox already there.
532 * In addition, this ksegrp will be limited to
533 * a concurrency of 1. There is more on this later.
534 */
535 if (mbx.km_flags & KMF_BOUND) {
536 if (mbx.km_curthread == NULL)
537 return (EINVAL);
538 ncpus = 1;
539 } else {
540 sa = TDP_SA;
541 }
542
543 PROC_LOCK(p);
544 /*
545 * Processes using the other threading model can't
546 * suddenly start calling this one
547 */
548 if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) {
549 PROC_UNLOCK(p);
550 return (EINVAL);
551 }
552
553 /*
554 * Limit it to NCPU upcall contexts per ksegrp in any case.
555 * There is a small race here as we don't hold proclock
556 * until we inc the ksegrp count, but it's not really a big problem
557 * if we get one too many, but we save a proc lock.
558 */
559 if ((!uap->newgroup) && (kg->kg_numupcalls >= ncpus)) {
560 PROC_UNLOCK(p);
561 return (EPROCLIM);
562 }
563
564 if (!(p->p_flag & P_SA)) {
565 first = 1;
566 p->p_flag |= P_SA|P_HADTHREADS;
567 }
568
569 PROC_UNLOCK(p);
570 /*
571 * Now pay attention!
572 * If we are going to be bound, then we need to be either
573 * a new group, or the first call ever. In either
574 * case we will be creating (or be) the only thread in a group.
575 * and the concurrency will be set to 1.
576 * This is not quite right, as we may still make ourself
577 * bound after making other ksegrps but it will do for now.
578 * The library will only try do this much.
579 */
580 if (!sa && !(uap->newgroup || first))
581 return (EINVAL);
582
583 if (uap->newgroup) {
584 newkg = ksegrp_alloc();
585 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
586 kg_startzero, kg_endzero));
587 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
588 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
589 sched_init_concurrency(newkg);
590 PROC_LOCK(p);
591 if (p->p_numksegrps >= max_groups_per_proc) {
592 PROC_UNLOCK(p);
593 ksegrp_free(newkg);
594 return (EPROCLIM);
595 }
596 ksegrp_link(newkg, p);
597 mtx_lock_spin(&sched_lock);
598 sched_fork_ksegrp(td, newkg);
599 mtx_unlock_spin(&sched_lock);
600 PROC_UNLOCK(p);
601 } else {
602 /*
603 * We want to make a thread in our own ksegrp.
604 * If we are just the first call, either kind
605 * is ok, but if not then either we must be
606 * already an upcallable thread to make another,
607 * or a bound thread to make one of those.
608 * Once again, not quite right but good enough for now.. XXXKSE
609 */
610 if (!first && ((td->td_pflags & TDP_SA) != sa))
611 return (EINVAL);
612
613 newkg = kg;
614 }
615
616 /*
617 * This test is a bit "indirect".
618 * It might simplify things if we made a direct way of testing
619 * if a ksegrp has been worked on before.
620 * In the case of a bound request and the concurrency being set to
621 * one, the concurrency will already be 1 so it's just inefficient
622 * but not dangerous to call this again. XXX
623 */
624 if (newkg->kg_numupcalls == 0) {
625 /*
626 * Initialize KSE group with the appropriate
627 * concurrency.
628 *
629 * For a multiplexed group, create as as much concurrency
630 * as the number of physical cpus.
631 * This increases concurrency in the kernel even if the
632 * userland is not MP safe and can only run on a single CPU.
633 * In an ideal world, every physical cpu should execute a
634 * thread. If there is enough concurrency, threads in the
635 * kernel can be executed parallel on different cpus at
636 * full speed without being restricted by the number of
637 * upcalls the userland provides.
638 * Adding more upcall structures only increases concurrency
639 * in userland.
640 *
641 * For a bound thread group, because there is only one thread
642 * in the group, we only set the concurrency for the group
643 * to 1. A thread in this kind of group will never schedule
644 * an upcall when blocked. This simulates pthread system
645 * scope thread behaviour.
646 */
647 sched_set_concurrency(newkg, ncpus);
648 }
649 /*
650 * Even bound LWPs get a mailbox and an upcall to hold it.
651 */
652 newku = upcall_alloc();
653 newku->ku_mailbox = uap->mbx;
654 newku->ku_func = mbx.km_func;
655 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
656
657 /*
658 * For the first call this may not have been set.
659 * Of course nor may it actually be needed.
660 */
661 if (td->td_standin == NULL)
662 thread_alloc_spare(td);
663
664 PROC_LOCK(p);
665 if (newkg->kg_numupcalls >= ncpus) {
666 PROC_UNLOCK(p);
667 upcall_free(newku);
668 return (EPROCLIM);
669 }
670
671 /*
672 * If we are the first time, and a normal thread,
673 * then trnasfer all the signals back to the 'process'.
674 * SA threading will make a special thread to handle them.
675 */
676 if (first && sa) {
677 SIGSETOR(p->p_siglist, td->td_siglist);
678 SIGEMPTYSET(td->td_siglist);
679 SIGFILLSET(td->td_sigmask);
680 SIG_CANTMASK(td->td_sigmask);
681 }
682
683 /*
684 * Make the new upcall available to the ksegrp,.
685 * It may or may not use it, but its available.
686 */
687 mtx_lock_spin(&sched_lock);
688 PROC_UNLOCK(p);
689 upcall_link(newku, newkg);
690 if (mbx.km_quantum)
691 newkg->kg_upquantum = max(1, mbx.km_quantum/tick);
692
693 /*
694 * Each upcall structure has an owner thread, find which
695 * one owns it.
696 */
697 if (uap->newgroup) {
698 /*
699 * Because the new ksegrp hasn't a thread,
700 * create an initial upcall thread to own it.
701 */
702 newtd = thread_schedule_upcall(td, newku);
703 } else {
704 /*
705 * If the current thread hasn't an upcall structure,
706 * just assign the upcall to it.
707 * It'll just return.
708 */
709 if (td->td_upcall == NULL) {
710 newku->ku_owner = td;
711 td->td_upcall = newku;
712 newtd = td;
713 } else {
714 /*
715 * Create a new upcall thread to own it.
716 */
717 newtd = thread_schedule_upcall(td, newku);
718 }
719 }
720 mtx_unlock_spin(&sched_lock);
721
722 /*
723 * Let the UTS instance know its LWPID.
724 * It doesn't really care. But the debugger will.
725 */
726 suword32(&newku->ku_mailbox->km_lwp, newtd->td_tid);
727
728 /*
729 * In the same manner, if the UTS has a current user thread,
730 * then it is also running on this LWP so set it as well.
731 * The library could do that of course.. but why not..
732 */
733 if (mbx.km_curthread)
734 suword32(&mbx.km_curthread->tm_lwp, newtd->td_tid);
735
736
737 if (sa) {
738 newtd->td_pflags |= TDP_SA;
739 } else {
740 newtd->td_pflags &= ~TDP_SA;
741
742 /*
743 * Since a library will use the mailbox pointer to
744 * identify even a bound thread, and the mailbox pointer
745 * will never be allowed to change after this syscall
746 * for a bound thread, set it here so the library can
747 * find the thread after the syscall returns.
748 */
749 newtd->td_mailbox = mbx.km_curthread;
750
751 if (newtd != td) {
752 /*
753 * If we did create a new thread then
754 * make sure it goes to the right place
755 * when it starts up, and make sure that it runs
756 * at full speed when it gets there.
757 * thread_schedule_upcall() copies all cpu state
758 * to the new thread, so we should clear single step
759 * flag here.
760 */
761 cpu_set_upcall_kse(newtd, newku);
762 if (p->p_flag & P_TRACED)
763 ptrace_clear_single_step(newtd);
764 }
765 }
766
767 /*
768 * If we are starting a new thread, kick it off.
769 */
770 if (newtd != td) {
771 mtx_lock_spin(&sched_lock);
772 setrunqueue(newtd, SRQ_BORING);
773 mtx_unlock_spin(&sched_lock);
774 }
775 return (0);
776 }
777
778 /*
779 * Initialize global thread allocation resources.
780 */
781 void
782 kseinit(void)
783 {
784
785 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
786 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
787 }
788
789 /*
790 * Stash an embarasingly extra upcall into the zombie upcall queue.
791 */
792
793 void
794 upcall_stash(struct kse_upcall *ku)
795 {
796 mtx_lock_spin(&kse_zombie_lock);
797 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
798 mtx_unlock_spin(&kse_zombie_lock);
799 }
800
801 /*
802 * Reap zombie kse resource.
803 */
804 void
805 kse_GC(void)
806 {
807 struct kse_upcall *ku_first, *ku_next;
808
809 /*
810 * Don't even bother to lock if none at this instant,
811 * we really don't care about the next instant..
812 */
813 if (!TAILQ_EMPTY(&zombie_upcalls)) {
814 mtx_lock_spin(&kse_zombie_lock);
815 ku_first = TAILQ_FIRST(&zombie_upcalls);
816 if (ku_first)
817 TAILQ_INIT(&zombie_upcalls);
818 mtx_unlock_spin(&kse_zombie_lock);
819 while (ku_first) {
820 ku_next = TAILQ_NEXT(ku_first, ku_link);
821 upcall_free(ku_first);
822 ku_first = ku_next;
823 }
824 }
825 }
826
827 /*
828 * Store the thread context in the UTS's mailbox.
829 * then add the mailbox at the head of a list we are building in user space.
830 * The list is anchored in the ksegrp structure.
831 */
832 int
833 thread_export_context(struct thread *td, int willexit)
834 {
835 struct proc *p;
836 struct ksegrp *kg;
837 uintptr_t mbx;
838 void *addr;
839 int error = 0, sig;
840 mcontext_t mc;
841
842 p = td->td_proc;
843 kg = td->td_ksegrp;
844
845 /*
846 * Post sync signal, or process SIGKILL and SIGSTOP.
847 * For sync signal, it is only possible when the signal is not
848 * caught by userland or process is being debugged.
849 */
850 PROC_LOCK(p);
851 if (td->td_flags & TDF_NEEDSIGCHK) {
852 mtx_lock_spin(&sched_lock);
853 td->td_flags &= ~TDF_NEEDSIGCHK;
854 mtx_unlock_spin(&sched_lock);
855 mtx_lock(&p->p_sigacts->ps_mtx);
856 while ((sig = cursig(td)) != 0)
857 postsig(sig);
858 mtx_unlock(&p->p_sigacts->ps_mtx);
859 }
860 if (willexit)
861 SIGFILLSET(td->td_sigmask);
862 PROC_UNLOCK(p);
863
864 /* Export the user/machine context. */
865 get_mcontext(td, &mc, 0);
866 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
867 error = copyout(&mc, addr, sizeof(mcontext_t));
868 if (error)
869 goto bad;
870
871 addr = (caddr_t)(&td->td_mailbox->tm_lwp);
872 if (suword32(addr, 0)) {
873 error = EFAULT;
874 goto bad;
875 }
876
877 /* Get address in latest mbox of list pointer */
878 addr = (void *)(&td->td_mailbox->tm_next);
879 /*
880 * Put the saved address of the previous first
881 * entry into this one
882 */
883 for (;;) {
884 mbx = (uintptr_t)kg->kg_completed;
885 if (suword(addr, mbx)) {
886 error = EFAULT;
887 goto bad;
888 }
889 PROC_LOCK(p);
890 if (mbx == (uintptr_t)kg->kg_completed) {
891 kg->kg_completed = td->td_mailbox;
892 /*
893 * The thread context may be taken away by
894 * other upcall threads when we unlock
895 * process lock. it's no longer valid to
896 * use it again in any other places.
897 */
898 td->td_mailbox = NULL;
899 PROC_UNLOCK(p);
900 break;
901 }
902 PROC_UNLOCK(p);
903 }
904 td->td_usticks = 0;
905 return (0);
906
907 bad:
908 PROC_LOCK(p);
909 sigexit(td, SIGILL);
910 return (error);
911 }
912
913 /*
914 * Take the list of completed mailboxes for this KSEGRP and put them on this
915 * upcall's mailbox as it's the next one going up.
916 */
917 static int
918 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
919 {
920 struct proc *p = kg->kg_proc;
921 void *addr;
922 uintptr_t mbx;
923
924 addr = (void *)(&ku->ku_mailbox->km_completed);
925 for (;;) {
926 mbx = (uintptr_t)kg->kg_completed;
927 if (suword(addr, mbx)) {
928 PROC_LOCK(p);
929 psignal(p, SIGSEGV);
930 PROC_UNLOCK(p);
931 return (EFAULT);
932 }
933 PROC_LOCK(p);
934 if (mbx == (uintptr_t)kg->kg_completed) {
935 kg->kg_completed = NULL;
936 PROC_UNLOCK(p);
937 break;
938 }
939 PROC_UNLOCK(p);
940 }
941 return (0);
942 }
943
944 /*
945 * This function should be called at statclock interrupt time
946 */
947 int
948 thread_statclock(int user)
949 {
950 struct thread *td = curthread;
951
952 if (!(td->td_pflags & TDP_SA))
953 return (0);
954 if (user) {
955 /* Current always do via ast() */
956 mtx_lock_spin(&sched_lock);
957 td->td_flags |= TDF_ASTPENDING;
958 mtx_unlock_spin(&sched_lock);
959 td->td_uuticks++;
960 } else if (td->td_mailbox != NULL)
961 td->td_usticks++;
962 return (0);
963 }
964
965 /*
966 * Export state clock ticks for userland
967 */
968 static int
969 thread_update_usr_ticks(struct thread *td)
970 {
971 struct proc *p = td->td_proc;
972 caddr_t addr;
973 u_int uticks;
974
975 if (td->td_mailbox == NULL)
976 return (-1);
977
978 if ((uticks = td->td_uuticks) != 0) {
979 td->td_uuticks = 0;
980 addr = (caddr_t)&td->td_mailbox->tm_uticks;
981 if (suword32(addr, uticks+fuword32(addr)))
982 goto error;
983 }
984 if ((uticks = td->td_usticks) != 0) {
985 td->td_usticks = 0;
986 addr = (caddr_t)&td->td_mailbox->tm_sticks;
987 if (suword32(addr, uticks+fuword32(addr)))
988 goto error;
989 }
990 return (0);
991
992 error:
993 PROC_LOCK(p);
994 psignal(p, SIGSEGV);
995 PROC_UNLOCK(p);
996 return (-2);
997 }
998
999 /*
1000 * This function is intended to be used to initialize a spare thread
1001 * for upcall. Initialize thread's large data area outside sched_lock
1002 * for thread_schedule_upcall(). The crhold is also here to get it out
1003 * from the schedlock as it has a mutex op itself.
1004 * XXX BUG.. we need to get the cr ref after the thread has
1005 * checked and chenged its own, not 6 months before...
1006 */
1007 void
1008 thread_alloc_spare(struct thread *td)
1009 {
1010 struct thread *spare;
1011
1012 if (td->td_standin)
1013 return;
1014 spare = thread_alloc();
1015 td->td_standin = spare;
1016 bzero(&spare->td_startzero,
1017 (unsigned) RANGEOF(struct thread, td_startzero, td_endzero));
1018 spare->td_proc = td->td_proc;
1019 spare->td_ucred = crhold(td->td_ucred);
1020 }
1021
1022 /*
1023 * Create a thread and schedule it for upcall on the KSE given.
1024 * Use our thread's standin so that we don't have to allocate one.
1025 */
1026 struct thread *
1027 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1028 {
1029 struct thread *td2;
1030
1031 mtx_assert(&sched_lock, MA_OWNED);
1032
1033 /*
1034 * Schedule an upcall thread on specified kse_upcall,
1035 * the kse_upcall must be free.
1036 * td must have a spare thread.
1037 */
1038 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1039 if ((td2 = td->td_standin) != NULL) {
1040 td->td_standin = NULL;
1041 } else {
1042 panic("no reserve thread when scheduling an upcall");
1043 return (NULL);
1044 }
1045 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1046 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1047 /*
1048 * Bzero already done in thread_alloc_spare() because we can't
1049 * do the crhold here because we are in schedlock already.
1050 */
1051 bcopy(&td->td_startcopy, &td2->td_startcopy,
1052 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1053 thread_link(td2, ku->ku_ksegrp);
1054 /* inherit parts of blocked thread's context as a good template */
1055 cpu_set_upcall(td2, td);
1056 /* Let the new thread become owner of the upcall */
1057 ku->ku_owner = td2;
1058 td2->td_upcall = ku;
1059 td2->td_flags = 0;
1060 td2->td_pflags = TDP_SA|TDP_UPCALLING;
1061 td2->td_state = TDS_CAN_RUN;
1062 td2->td_inhibitors = 0;
1063 SIGFILLSET(td2->td_sigmask);
1064 SIG_CANTMASK(td2->td_sigmask);
1065 sched_fork_thread(td, td2);
1066 return (td2); /* bogus.. should be a void function */
1067 }
1068
1069 /*
1070 * It is only used when thread generated a trap and process is being
1071 * debugged.
1072 */
1073 void
1074 thread_signal_add(struct thread *td, int sig)
1075 {
1076 struct proc *p;
1077 siginfo_t siginfo;
1078 struct sigacts *ps;
1079 int error;
1080
1081 p = td->td_proc;
1082 PROC_LOCK_ASSERT(p, MA_OWNED);
1083 ps = p->p_sigacts;
1084 mtx_assert(&ps->ps_mtx, MA_OWNED);
1085
1086 cpu_thread_siginfo(sig, 0, &siginfo);
1087 mtx_unlock(&ps->ps_mtx);
1088 SIGADDSET(td->td_sigmask, sig);
1089 PROC_UNLOCK(p);
1090 error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, sizeof(siginfo));
1091 if (error) {
1092 PROC_LOCK(p);
1093 sigexit(td, SIGSEGV);
1094 }
1095 PROC_LOCK(p);
1096 mtx_lock(&ps->ps_mtx);
1097 }
1098 #include "opt_sched.h"
1099 struct thread *
1100 thread_switchout(struct thread *td, int flags, struct thread *nextthread)
1101 {
1102 struct kse_upcall *ku;
1103 struct thread *td2;
1104
1105 mtx_assert(&sched_lock, MA_OWNED);
1106
1107 /*
1108 * If the outgoing thread is in threaded group and has never
1109 * scheduled an upcall, decide whether this is a short
1110 * or long term event and thus whether or not to schedule
1111 * an upcall.
1112 * If it is a short term event, just suspend it in
1113 * a way that takes its KSE with it.
1114 * Select the events for which we want to schedule upcalls.
1115 * For now it's just sleep or if thread is suspended but
1116 * process wide suspending flag is not set (debugger
1117 * suspends thread).
1118 * XXXKSE eventually almost any inhibition could do.
1119 */
1120 if (TD_CAN_UNBIND(td) && (td->td_standin) &&
1121 (TD_ON_SLEEPQ(td) || (TD_IS_SUSPENDED(td) &&
1122 !P_SHOULDSTOP(td->td_proc)))) {
1123 /*
1124 * Release ownership of upcall, and schedule an upcall
1125 * thread, this new upcall thread becomes the owner of
1126 * the upcall structure. It will be ahead of us in the
1127 * run queue, so as we are stopping, it should either
1128 * start up immediatly, or at least before us if
1129 * we release our slot.
1130 */
1131 ku = td->td_upcall;
1132 ku->ku_owner = NULL;
1133 td->td_upcall = NULL;
1134 td->td_pflags &= ~TDP_CAN_UNBIND;
1135 td2 = thread_schedule_upcall(td, ku);
1136 #ifdef SCHED_4BSD
1137 if (flags & SW_INVOL || nextthread) {
1138 setrunqueue(td2, SRQ_YIELDING);
1139 } else {
1140 /* Keep up with reality.. we have one extra thread
1141 * in the picture.. and it's 'running'.
1142 */
1143 return td2;
1144 }
1145 #else
1146 setrunqueue(td2, SRQ_YIELDING);
1147 #endif
1148 }
1149 return (nextthread);
1150 }
1151
1152 /*
1153 * Setup done on the thread when it enters the kernel.
1154 */
1155 void
1156 thread_user_enter(struct thread *td)
1157 {
1158 struct proc *p = td->td_proc;
1159 struct ksegrp *kg;
1160 struct kse_upcall *ku;
1161 struct kse_thr_mailbox *tmbx;
1162 uint32_t flags;
1163
1164 /*
1165 * First check that we shouldn't just abort. we
1166 * can suspend it here or just exit.
1167 */
1168 if (__predict_false(P_SHOULDSTOP(p))) {
1169 PROC_LOCK(p);
1170 thread_suspend_check(0);
1171 PROC_UNLOCK(p);
1172 }
1173
1174 if (!(td->td_pflags & TDP_SA))
1175 return;
1176
1177 /*
1178 * If we are doing a syscall in a KSE environment,
1179 * note where our mailbox is.
1180 */
1181
1182 kg = td->td_ksegrp;
1183 ku = td->td_upcall;
1184
1185 KASSERT(ku != NULL, ("no upcall owned"));
1186 KASSERT(ku->ku_owner == td, ("wrong owner"));
1187 KASSERT(!TD_CAN_UNBIND(td), ("can unbind"));
1188
1189 if (td->td_standin == NULL)
1190 thread_alloc_spare(td);
1191 ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
1192 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1193 if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
1194 (ku->ku_mflags & KMF_NOUPCALL)) {
1195 td->td_mailbox = NULL;
1196 } else {
1197 flags = fuword32(&tmbx->tm_flags);
1198 /*
1199 * On some architectures, TP register points to thread
1200 * mailbox but not points to kse mailbox, and userland
1201 * can not atomically clear km_curthread, but can
1202 * use TP register, and set TMF_NOUPCALL in thread
1203 * flag to indicate a critical region.
1204 */
1205 if (flags & TMF_NOUPCALL) {
1206 td->td_mailbox = NULL;
1207 } else {
1208 td->td_mailbox = tmbx;
1209 td->td_pflags |= TDP_CAN_UNBIND;
1210 if (__predict_false(p->p_flag & P_TRACED)) {
1211 flags = fuword32(&tmbx->tm_dflags);
1212 if (flags & TMDF_SUSPEND) {
1213 mtx_lock_spin(&sched_lock);
1214 /* fuword can block, check again */
1215 if (td->td_upcall)
1216 ku->ku_flags |= KUF_DOUPCALL;
1217 mtx_unlock_spin(&sched_lock);
1218 }
1219 }
1220 }
1221 }
1222 }
1223
1224 /*
1225 * The extra work we go through if we are a threaded process when we
1226 * return to userland.
1227 *
1228 * If we are a KSE process and returning to user mode, check for
1229 * extra work to do before we return (e.g. for more syscalls
1230 * to complete first). If we were in a critical section, we should
1231 * just return to let it finish. Same if we were in the UTS (in
1232 * which case the mailbox's context's busy indicator will be set).
1233 * The only traps we suport will have set the mailbox.
1234 * We will clear it here.
1235 */
1236 int
1237 thread_userret(struct thread *td, struct trapframe *frame)
1238 {
1239 struct kse_upcall *ku;
1240 struct ksegrp *kg, *kg2;
1241 struct proc *p;
1242 struct timespec ts;
1243 int error = 0, upcalls, uts_crit;
1244
1245 /* Nothing to do with bound thread */
1246 if (!(td->td_pflags & TDP_SA))
1247 return (0);
1248
1249 /*
1250 * Update stat clock count for userland
1251 */
1252 if (td->td_mailbox != NULL) {
1253 thread_update_usr_ticks(td);
1254 uts_crit = 0;
1255 } else {
1256 uts_crit = 1;
1257 }
1258
1259 p = td->td_proc;
1260 kg = td->td_ksegrp;
1261 ku = td->td_upcall;
1262
1263 /*
1264 * Optimisation:
1265 * This thread has not started any upcall.
1266 * If there is no work to report other than ourself,
1267 * then it can return direct to userland.
1268 */
1269 if (TD_CAN_UNBIND(td)) {
1270 td->td_pflags &= ~TDP_CAN_UNBIND;
1271 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1272 (kg->kg_completed == NULL) &&
1273 (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1274 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
1275 nanotime(&ts);
1276 error = copyout(&ts,
1277 (caddr_t)&ku->ku_mailbox->km_timeofday,
1278 sizeof(ts));
1279 td->td_mailbox = 0;
1280 ku->ku_mflags = 0;
1281 if (error)
1282 goto out;
1283 return (0);
1284 }
1285 thread_export_context(td, 0);
1286 /*
1287 * There is something to report, and we own an upcall
1288 * strucuture, we can go to userland.
1289 * Turn ourself into an upcall thread.
1290 */
1291 td->td_pflags |= TDP_UPCALLING;
1292 } else if (td->td_mailbox && (ku == NULL)) {
1293 thread_export_context(td, 1);
1294 PROC_LOCK(p);
1295 if (kg->kg_upsleeps)
1296 wakeup(&kg->kg_completed);
1297 mtx_lock_spin(&sched_lock);
1298 thread_stopped(p);
1299 thread_exit();
1300 /* NOTREACHED */
1301 }
1302
1303 KASSERT(ku != NULL, ("upcall is NULL"));
1304 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1305
1306 if (p->p_numthreads > max_threads_per_proc) {
1307 max_threads_hits++;
1308 PROC_LOCK(p);
1309 mtx_lock_spin(&sched_lock);
1310 p->p_maxthrwaits++;
1311 while (p->p_numthreads > max_threads_per_proc) {
1312 upcalls = 0;
1313 FOREACH_KSEGRP_IN_PROC(p, kg2) {
1314 if (kg2->kg_numupcalls == 0)
1315 upcalls++;
1316 else
1317 upcalls += kg2->kg_numupcalls;
1318 }
1319 if (upcalls >= max_threads_per_proc)
1320 break;
1321 mtx_unlock_spin(&sched_lock);
1322 if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1323 "maxthreads", 0)) {
1324 mtx_lock_spin(&sched_lock);
1325 break;
1326 } else {
1327 mtx_lock_spin(&sched_lock);
1328 }
1329 }
1330 p->p_maxthrwaits--;
1331 mtx_unlock_spin(&sched_lock);
1332 PROC_UNLOCK(p);
1333 }
1334
1335 if (td->td_pflags & TDP_UPCALLING) {
1336 uts_crit = 0;
1337 kg->kg_nextupcall = ticks+kg->kg_upquantum;
1338 /*
1339 * There is no more work to do and we are going to ride
1340 * this thread up to userland as an upcall.
1341 * Do the last parts of the setup needed for the upcall.
1342 */
1343 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1344 td, p->p_pid, td->td_proc->p_comm);
1345
1346 td->td_pflags &= ~TDP_UPCALLING;
1347 if (ku->ku_flags & KUF_DOUPCALL) {
1348 mtx_lock_spin(&sched_lock);
1349 ku->ku_flags &= ~KUF_DOUPCALL;
1350 mtx_unlock_spin(&sched_lock);
1351 }
1352 /*
1353 * Set user context to the UTS
1354 */
1355 if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1356 cpu_set_upcall_kse(td, ku);
1357 if (p->p_flag & P_TRACED)
1358 ptrace_clear_single_step(td);
1359 error = suword32(&ku->ku_mailbox->km_lwp,
1360 td->td_tid);
1361 if (error)
1362 goto out;
1363 error = suword(&ku->ku_mailbox->km_curthread, 0);
1364 if (error)
1365 goto out;
1366 }
1367
1368 /*
1369 * Unhook the list of completed threads.
1370 * anything that completes after this gets to
1371 * come in next time.
1372 * Put the list of completed thread mailboxes on
1373 * this KSE's mailbox.
1374 */
1375 if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1376 (error = thread_link_mboxes(kg, ku)) != 0)
1377 goto out;
1378 }
1379 if (!uts_crit) {
1380 nanotime(&ts);
1381 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1382 }
1383
1384 out:
1385 if (error) {
1386 /*
1387 * Things are going to be so screwed we should just kill
1388 * the process.
1389 * how do we do that?
1390 */
1391 PROC_LOCK(p);
1392 psignal(p, SIGSEGV);
1393 PROC_UNLOCK(p);
1394 } else {
1395 /*
1396 * Optimisation:
1397 * Ensure that we have a spare thread available,
1398 * for when we re-enter the kernel.
1399 */
1400 if (td->td_standin == NULL)
1401 thread_alloc_spare(td);
1402 }
1403
1404 ku->ku_mflags = 0;
1405 td->td_mailbox = NULL;
1406 td->td_usticks = 0;
1407 return (error); /* go sync */
1408 }
1409
1410 int
1411 thread_upcall_check(struct thread *td)
1412 {
1413 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
1414 if (td->td_kflags & TDK_WAKEUP)
1415 return (1);
1416 else
1417 return (0);
1418 }
1419
1420 /*
1421 * called after ptrace resumed a process, force all
1422 * virtual CPUs to schedule upcall for SA process,
1423 * because debugger may have changed something in userland,
1424 * we should notice UTS as soon as possible.
1425 */
1426 void
1427 thread_continued(struct proc *p)
1428 {
1429 struct ksegrp *kg;
1430 struct kse_upcall *ku;
1431 struct thread *td;
1432
1433 PROC_LOCK_ASSERT(p, MA_OWNED);
1434 mtx_assert(&sched_lock, MA_OWNED);
1435
1436 if (!(p->p_flag & P_SA))
1437 return;
1438
1439 if (p->p_flag & P_TRACED) {
1440 FOREACH_KSEGRP_IN_PROC(p, kg) {
1441 td = TAILQ_FIRST(&kg->kg_threads);
1442 if (td == NULL)
1443 continue;
1444 /* not a SA group, nothing to do */
1445 if (!(td->td_pflags & TDP_SA))
1446 continue;
1447 FOREACH_UPCALL_IN_GROUP(kg, ku) {
1448 ku->ku_flags |= KUF_DOUPCALL;
1449 wakeup(&kg->kg_completed);
1450 if (TD_IS_SUSPENDED(ku->ku_owner)) {
1451 thread_unsuspend_one(ku->ku_owner);
1452 }
1453 }
1454 }
1455 }
1456 }
Cache object: ac6a7cf9247d1c580f57be7225eb0785
|