FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_kse.c
1 /*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/7.3/sys/kern/kern_kse.c 192493 2009-05-20 22:30:57Z kmacy $");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/imgact.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/ptrace.h>
40 #include <sys/smp.h>
41 #include <sys/syscallsubr.h>
42 #include <sys/sysproto.h>
43 #include <sys/sched.h>
44 #include <sys/signalvar.h>
45 #include <sys/sleepqueue.h>
46 #include <sys/syslog.h>
47 #include <sys/kse.h>
48 #include <sys/ktr.h>
49 #include <vm/uma.h>
50
51 #ifdef KSE
52 static uma_zone_t upcall_zone;
53
54 /* DEBUG ONLY */
55 extern int virtual_cpu;
56 extern int thread_debug;
57
58 extern int max_threads_per_proc;
59 extern int max_groups_per_proc;
60 extern int max_threads_hits;
61 extern struct mtx kse_lock;
62
63
64 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
65 TAILQ_HEAD_INITIALIZER(zombie_upcalls);
66
67 static int thread_update_usr_ticks(struct thread *td);
68 static int thread_alloc_spare(struct thread *td);
69 static struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
70 static struct kse_upcall *upcall_alloc(void);
71
72
73 struct mtx kse_lock;
74 MTX_SYSINIT(kse_lock, &kse_lock, "kse lock", MTX_SPIN);
75
76 struct kse_upcall *
77 upcall_alloc(void)
78 {
79 struct kse_upcall *ku;
80
81 ku = uma_zalloc(upcall_zone, M_WAITOK | M_ZERO);
82 return (ku);
83 }
84
85 void
86 upcall_reap(void)
87 {
88 TAILQ_HEAD(, kse_upcall) zupcalls;
89 struct kse_upcall *ku_item, *ku_tmp;
90
91 TAILQ_INIT(&zupcalls);
92 mtx_lock_spin(&kse_lock);
93 if (!TAILQ_EMPTY(&zombie_upcalls)) {
94 TAILQ_CONCAT(&zupcalls, &zombie_upcalls, ku_link);
95 TAILQ_INIT(&zombie_upcalls);
96 }
97 mtx_unlock_spin(&kse_lock);
98 TAILQ_FOREACH_SAFE(ku_item, &zupcalls, ku_link, ku_tmp)
99 uma_zfree(upcall_zone, ku_item);
100 }
101
102 void
103 upcall_remove(struct thread *td)
104 {
105
106 PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
107 THREAD_LOCK_ASSERT(td, MA_OWNED);
108 if (td->td_upcall != NULL) {
109 /*
110 * If we are not a bound thread then decrement the count of
111 * possible upcall sources
112 */
113 if (td->td_pflags & TDP_SA)
114 td->td_proc->p_numupcalls--;
115 mtx_lock_spin(&kse_lock);
116 td->td_upcall->ku_owner = NULL;
117 TAILQ_REMOVE(&td->td_upcall->ku_proc->p_upcalls, td->td_upcall,
118 ku_link);
119 TAILQ_INSERT_HEAD(&zombie_upcalls, td->td_upcall, ku_link);
120 mtx_unlock_spin(&kse_lock);
121 td->td_upcall = NULL;
122 }
123 }
124 #endif
125
126 #ifndef _SYS_SYSPROTO_H_
127 struct kse_switchin_args {
128 struct kse_thr_mailbox *tmbx;
129 int flags;
130 };
131 #endif
132
133 #ifdef KSE
134 void
135 kse_unlink(struct thread *td)
136 {
137 mtx_lock_spin(&kse_lock);
138 thread_unlink(td);
139 mtx_unlock_spin(&kse_lock);
140 upcall_remove(td);
141 }
142 #endif
143
144 int
145 kse_switchin(struct thread *td, struct kse_switchin_args *uap)
146 {
147 #ifdef KSE
148 struct kse_thr_mailbox tmbx, *tmbxp;
149 struct kse_upcall *ku;
150 int error, flags;
151
152 /*
153 * Put the arguments in local variables, to allow uap to
154 * point into the trapframe. We clobber the trapframe as
155 * part of setting a new context.
156 */
157 tmbxp = uap->tmbx;
158 flags = uap->flags;
159
160 thread_lock(td);
161 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) {
162 thread_unlock(td);
163 return (EINVAL);
164 }
165 thread_unlock(td);
166 error = (tmbxp == NULL) ? EINVAL : 0;
167 if (!error)
168 error = copyin(tmbxp, &tmbx, sizeof(tmbx));
169 if (!error && (flags & KSE_SWITCHIN_SETTMBX))
170 error = (suword(&ku->ku_mailbox->km_curthread,
171 (long)tmbxp) != 0 ? EINVAL : 0);
172 if (!error)
173 error = set_mcontext(td, &tmbx.tm_context.uc_mcontext);
174 if (!error) {
175 suword32(&tmbxp->tm_lwp, td->td_tid);
176 if (flags & KSE_SWITCHIN_SETTMBX) {
177 td->td_mailbox = tmbxp;
178 td->td_pflags |= TDP_CAN_UNBIND;
179 }
180 PROC_LOCK(td->td_proc);
181 if (td->td_proc->p_flag & P_TRACED) {
182 _PHOLD(td->td_proc);
183 if (tmbx.tm_dflags & TMDF_SSTEP)
184 ptrace_single_step(td);
185 else
186 ptrace_clear_single_step(td);
187 if (tmbx.tm_dflags & TMDF_SUSPEND) {
188 thread_lock(td);
189 /* fuword can block, check again */
190 if (td->td_upcall)
191 ku->ku_flags |= KUF_DOUPCALL;
192 thread_unlock(td);
193 }
194 _PRELE(td->td_proc);
195 }
196 PROC_UNLOCK(td->td_proc);
197 }
198 return ((error == 0) ? EJUSTRETURN : error);
199 #else /* !KSE */
200 return (EOPNOTSUPP);
201 #endif
202 }
203
204 /*
205 struct kse_thr_interrupt_args {
206 struct kse_thr_mailbox * tmbx;
207 int cmd;
208 long data;
209 };
210 */
211 int
212 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
213 {
214 #ifdef KSE
215 struct kse_execve_args args;
216 struct image_args iargs;
217 struct proc *p;
218 struct thread *td2;
219 struct kse_upcall *ku;
220 struct kse_thr_mailbox *tmbx;
221 uint32_t flags;
222 int error;
223
224 p = td->td_proc;
225
226 PROC_LOCK(p);
227 if (!(p->p_flag & P_SA)) {
228 PROC_UNLOCK(p);
229 return (EINVAL);
230 }
231 PROC_UNLOCK(p);
232
233 switch (uap->cmd) {
234 case KSE_INTR_SENDSIG:
235 if (uap->data < 0 || uap->data > _SIG_MAXSIG)
236 return (EINVAL);
237 case KSE_INTR_INTERRUPT:
238 case KSE_INTR_RESTART:
239 PROC_LOCK(p);
240 PROC_SLOCK(p);
241 FOREACH_THREAD_IN_PROC(p, td2) {
242 if (td2->td_mailbox == uap->tmbx)
243 break;
244 }
245 if (td2 == NULL) {
246 PROC_SUNLOCK(p);
247 PROC_UNLOCK(p);
248 return (ESRCH);
249 }
250 thread_lock(td2);
251 PROC_SUNLOCK(p);
252 if (uap->cmd == KSE_INTR_SENDSIG) {
253 if (uap->data > 0) {
254 td2->td_flags &= ~TDF_INTERRUPT;
255 thread_unlock(td2);
256 tdsignal(p, td2, (int)uap->data, NULL);
257 } else {
258 thread_unlock(td2);
259 }
260 } else {
261 td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING;
262 if (TD_CAN_UNBIND(td2))
263 td2->td_upcall->ku_flags |= KUF_DOUPCALL;
264 if (uap->cmd == KSE_INTR_INTERRUPT)
265 td2->td_intrval = EINTR;
266 else
267 td2->td_intrval = ERESTART;
268 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR))
269 sleepq_abort(td2, td2->td_intrval);
270 thread_unlock(td2);
271 }
272 PROC_UNLOCK(p);
273 break;
274 case KSE_INTR_SIGEXIT:
275 if (uap->data < 1 || uap->data > _SIG_MAXSIG)
276 return (EINVAL);
277 PROC_LOCK(p);
278 sigexit(td, (int)uap->data);
279 break;
280
281 case KSE_INTR_DBSUSPEND:
282 /* this sub-function is only for bound thread */
283 if (td->td_pflags & TDP_SA)
284 return (EINVAL);
285 thread_lock(td);
286 ku = td->td_upcall;
287 thread_unlock(td);
288 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
289 if (tmbx == NULL || tmbx == (void *)-1)
290 return (EINVAL);
291 flags = 0;
292 PROC_LOCK(p);
293 while ((p->p_flag & P_TRACED) && !(p->p_flag & P_SINGLE_EXIT)) {
294 flags = fuword32(&tmbx->tm_dflags);
295 if (!(flags & TMDF_SUSPEND))
296 break;
297 PROC_SLOCK(p);
298 thread_stopped(p);
299 PROC_UNLOCK(p);
300 thread_lock(td);
301 thread_suspend_one(td);
302 PROC_SUNLOCK(p);
303 mi_switch(SW_VOL, NULL);
304 thread_unlock(td);
305 PROC_LOCK(p);
306 }
307 PROC_UNLOCK(p);
308 return (0);
309
310 case KSE_INTR_EXECVE:
311 error = copyin((void *)uap->data, &args, sizeof(args));
312 if (error)
313 return (error);
314 error = exec_copyin_args(&iargs, args.path, UIO_USERSPACE,
315 args.argv, args.envp);
316 if (error == 0)
317 error = kern_execve(td, &iargs, NULL);
318 if (error == 0) {
319 PROC_LOCK(p);
320 SIGSETOR(td->td_siglist, args.sigpend);
321 PROC_UNLOCK(p);
322 kern_sigprocmask(td, SIG_SETMASK, &args.sigmask, NULL,
323 0);
324 }
325 return (error);
326
327 default:
328 return (EINVAL);
329 }
330 return (0);
331 #else /* !KSE */
332 return (EOPNOTSUPP);
333 #endif
334 }
335
336 /*
337 struct kse_exit_args {
338 register_t dummy;
339 };
340 */
341 int
342 kse_exit(struct thread *td, struct kse_exit_args *uap)
343 {
344 #ifdef KSE
345 struct proc *p;
346 struct kse_upcall *ku, *ku2;
347 int error, count;
348
349 p = td->td_proc;
350 /*
351 * Ensure that this is only called from the UTS
352 */
353 thread_lock(td);
354 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) {
355 thread_unlock(td);
356 return (EINVAL);
357 }
358 thread_unlock(td);
359
360 /*
361 * Calculate the existing non-exiting upcalls in this process.
362 * If we are the last upcall but there are still other threads,
363 * then do not exit. We need the other threads to be able to
364 * complete whatever they are doing.
365 * XXX This relies on the userland knowing what to do if we return.
366 * It may be a better choice to convert ourselves into a kse_release
367 * ( or similar) and wait in the kernel to be needed.
368 * XXX Where are those other threads? I suppose they are waiting in
369 * the kernel. We should wait for them all at the user boundary after
370 * turning into an exit.
371 */
372 count = 0;
373 PROC_LOCK(p);
374 PROC_SLOCK(p);
375 FOREACH_UPCALL_IN_PROC(p, ku2) {
376 if ((ku2->ku_flags & KUF_EXITING) == 0)
377 count++;
378 }
379 if (count == 1 && (p->p_numthreads > 1)) {
380 PROC_SUNLOCK(p);
381 PROC_UNLOCK(p);
382 return (EDEADLK);
383 }
384 ku->ku_flags |= KUF_EXITING;
385 PROC_SUNLOCK(p);
386 PROC_UNLOCK(p);
387
388 /*
389 * Mark the UTS mailbox as having been finished with.
390 * If that fails then just go for a segfault.
391 * XXX need to check it that can be deliverred without a mailbox.
392 */
393 error = suword32(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
394 if (!(td->td_pflags & TDP_SA))
395 if (suword32(&td->td_mailbox->tm_lwp, 0))
396 error = EFAULT;
397 PROC_LOCK(p);
398 if (error)
399 psignal(p, SIGSEGV);
400 sigqueue_flush(&td->td_sigqueue);
401 PROC_SLOCK(p);
402 thread_lock(td);
403 upcall_remove(td);
404 thread_unlock(td);
405 if (p->p_numthreads != 1) {
406 thread_stopped(p);
407 thread_exit();
408 /* NOTREACHED */
409 }
410 /*
411 * This is the last thread. Just return to the user.
412 * Effectively we have left threading mode..
413 * The only real thing left to do is ensure that the
414 * scheduler sets out concurrency back to 1 as that may be a
415 * resource leak otherwise.
416 * This is an A[PB]I issue.. what SHOULD we do?
417 * One possibility is to return to the user. It may not cope well.
418 * The other possibility would be to let the process exit.
419 */
420 thread_unthread(td);
421 PROC_SUNLOCK(p);
422 PROC_UNLOCK(p);
423 #if 0
424 return (0);
425 #else
426 printf("kse_exit: called on last thread. Calling exit1()");
427 exit1(td, 0);
428 #endif
429 #else /* !KSE */
430 return (EOPNOTSUPP);
431 #endif
432 }
433
434 /*
435 * Either becomes an upcall or waits for an awakening event and
436 * then becomes an upcall. Only error cases return.
437 */
438 /*
439 struct kse_release_args {
440 struct timespec *timeout;
441 };
442 */
443 int
444 kse_release(struct thread *td, struct kse_release_args *uap)
445 {
446 #ifdef KSE
447 struct proc *p;
448 struct kse_upcall *ku;
449 struct timespec timeout;
450 struct timeval tv;
451 sigset_t sigset;
452 int error;
453
454 p = td->td_proc;
455 thread_lock(td);
456 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) {
457 thread_unlock(td);
458 printf("kse_release: called outside of threading. exiting");
459 exit1(td, 0);
460 }
461 thread_unlock(td);
462 if (uap->timeout != NULL) {
463 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
464 return (error);
465 TIMESPEC_TO_TIMEVAL(&tv, &timeout);
466 }
467 if (td->td_pflags & TDP_SA)
468 td->td_pflags |= TDP_UPCALLING;
469 else {
470 ku->ku_mflags = fuword32(&ku->ku_mailbox->km_flags);
471 if (ku->ku_mflags == -1) {
472 PROC_LOCK(p);
473 sigexit(td, SIGSEGV);
474 }
475 }
476 PROC_LOCK(p);
477 if (ku->ku_mflags & KMF_WAITSIGEVENT) {
478 /* UTS wants to wait for signal event */
479 if (!(p->p_flag & P_SIGEVENT) &&
480 !(ku->ku_flags & KUF_DOUPCALL)) {
481 td->td_kflags |= TDK_KSERELSIG;
482 error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH,
483 "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0));
484 td->td_kflags &= ~(TDK_KSERELSIG | TDK_WAKEUP);
485 }
486 p->p_flag &= ~P_SIGEVENT;
487 sigset = p->p_siglist;
488 PROC_UNLOCK(p);
489 error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught,
490 sizeof(sigset));
491 } else {
492 if ((ku->ku_flags & KUF_DOUPCALL) == 0 &&
493 ((ku->ku_mflags & KMF_NOCOMPLETED) ||
494 (p->p_completed == NULL))) {
495 p->p_upsleeps++;
496 td->td_kflags |= TDK_KSEREL;
497 error = msleep(&p->p_completed, &p->p_mtx,
498 PPAUSE|PCATCH, "kserel",
499 (uap->timeout ? tvtohz(&tv) : 0));
500 td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP);
501 p->p_upsleeps--;
502 }
503 PROC_UNLOCK(p);
504 }
505 if (ku->ku_flags & KUF_DOUPCALL) {
506 PROC_SLOCK(p);
507 ku->ku_flags &= ~KUF_DOUPCALL;
508 PROC_SUNLOCK(p);
509 }
510 return (0);
511 #else /* !KSE */
512 return (EOPNOTSUPP);
513 #endif
514 }
515
516 /* struct kse_wakeup_args {
517 struct kse_mailbox *mbx;
518 }; */
519 int
520 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
521 {
522 #ifdef KSE
523 struct proc *p;
524 struct kse_upcall *ku;
525 struct thread *td2;
526
527 p = td->td_proc;
528 td2 = NULL;
529 ku = NULL;
530 /* KSE-enabled processes only, please. */
531 PROC_LOCK(p);
532 if (!(p->p_flag & P_SA)) {
533 PROC_UNLOCK(p);
534 return (EINVAL);
535 }
536 PROC_SLOCK(p);
537 if (uap->mbx) {
538 FOREACH_UPCALL_IN_PROC(p, ku) {
539 if (ku->ku_mailbox == uap->mbx)
540 break;
541 }
542 } else {
543 if (p->p_upsleeps) {
544 PROC_SUNLOCK(p);
545 wakeup(&p->p_completed);
546 PROC_UNLOCK(p);
547 return (0);
548 }
549 ku = TAILQ_FIRST(&p->p_upcalls);
550 }
551 if (ku == NULL) {
552 PROC_SUNLOCK(p);
553 PROC_UNLOCK(p);
554 return (ESRCH);
555 }
556 mtx_lock_spin(&kse_lock);
557 if ((td2 = ku->ku_owner) == NULL) {
558 mtx_unlock_spin(&kse_lock);
559 PROC_SUNLOCK(p);
560 PROC_UNLOCK(p);
561 panic("%s: no owner", __func__);
562 } else if (td2->td_kflags & (TDK_KSEREL | TDK_KSERELSIG)) {
563 mtx_unlock_spin(&kse_lock);
564 if (!(td2->td_kflags & TDK_WAKEUP)) {
565 td2->td_kflags |= TDK_WAKEUP;
566 if (td2->td_kflags & TDK_KSEREL)
567 sleepq_remove(td2, &p->p_completed);
568 else
569 sleepq_remove(td2, &p->p_siglist);
570 }
571 } else {
572 ku->ku_flags |= KUF_DOUPCALL;
573 mtx_unlock_spin(&kse_lock);
574 }
575 PROC_SUNLOCK(p);
576 PROC_UNLOCK(p);
577 return (0);
578 #else /* !KSE */
579 return (EOPNOTSUPP);
580 #endif
581 }
582
583 /*
584 * newgroup == 0: first call: use current KSE, don't schedule an upcall
585 * All other situations, do allocate max new KSEs and schedule an upcall.
586 *
587 * XXX should be changed so that 'first' behaviour lasts for as long
588 * as you have not made a thread in this proc. i.e. as long as we do not have
589 * a mailbox..
590 */
591 /* struct kse_create_args {
592 struct kse_mailbox *mbx;
593 int newgroup;
594 }; */
595 int
596 kse_create(struct thread *td, struct kse_create_args *uap)
597 {
598 #ifdef KSE
599 struct proc *p;
600 struct kse_mailbox mbx;
601 struct kse_upcall *newku;
602 int err, ncpus, sa = 0, first = 0;
603 struct thread *newtd;
604
605 p = td->td_proc;
606
607 /*
608 * Processes using the other threading model can't
609 * suddenly start calling this one
610 * XXX maybe...
611 */
612 PROC_LOCK(p);
613 if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) {
614 PROC_UNLOCK(p);
615 return (EINVAL);
616 }
617 if (!(p->p_flag & P_SA)) {
618 first = 1;
619 p->p_flag |= P_SA|P_HADTHREADS;
620 }
621 PROC_UNLOCK(p);
622
623 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
624 return (err);
625
626 ncpus = mp_ncpus;
627 if (virtual_cpu != 0)
628 ncpus = virtual_cpu;
629 /*
630 * If the new UTS mailbox says that this
631 * will be a BOUND lwp, then it had better
632 * have its thread mailbox already there.
633 */
634 if ((mbx.km_flags & KMF_BOUND) || uap->newgroup) {
635 /* It's a bound thread (1:1) */
636 if (mbx.km_curthread == NULL)
637 return (EINVAL);
638 ncpus = 1;
639 if (!(uap->newgroup || first))
640 return (EINVAL);
641 } else {
642 /* It's an upcall capable thread */
643 sa = TDP_SA;
644 PROC_LOCK(p);
645 /*
646 * Limit it to NCPU upcall contexts per proc in any case.
647 * numupcalls will soon be numkse or something
648 * as it will represent the number of
649 * non-bound upcalls available. (i.e. ones that can
650 * actually call up).
651 */
652 if (p->p_numupcalls >= ncpus) {
653 PROC_UNLOCK(p);
654 return (EPROCLIM);
655 }
656 p->p_numupcalls++;
657 PROC_UNLOCK(p);
658 }
659
660 /*
661 * For the first call this may not have been set.
662 * Of course nor may it actually be needed.
663 * thread_schedule_upcall() will look for it.
664 */
665 if (td->td_standin == NULL) {
666 if (!thread_alloc_spare(td))
667 return (ENOMEM);
668 }
669
670 /*
671 * Even bound LWPs get a mailbox and an upcall to hold it.
672 * XXX This should change.
673 */
674 newku = upcall_alloc();
675 newku->ku_mailbox = uap->mbx;
676 newku->ku_func = mbx.km_func;
677 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
678
679 PROC_LOCK(p);
680 PROC_SLOCK(p);
681 /*
682 * If we are the first time, and a normal thread,
683 * then transfer all the signals back to the 'process'.
684 * SA threading will make a special thread to handle them.
685 */
686 if (first) {
687 sigqueue_move_set(&td->td_sigqueue, &p->p_sigqueue,
688 &td->td_sigqueue.sq_signals);
689 SIGFILLSET(td->td_sigmask);
690 SIG_CANTMASK(td->td_sigmask);
691 }
692
693 /*
694 * Make the new upcall available to the process.
695 * It may or may not use it, but it's available.
696 */
697 TAILQ_INSERT_TAIL(&p->p_upcalls, newku, ku_link);
698 newku->ku_proc = p;
699 PROC_UNLOCK(p);
700 if (mbx.km_quantum)
701 /* XXX should this be in the thread? */
702 p->p_upquantum = max(1, mbx.km_quantum / tick);
703
704 /*
705 * Each upcall structure has an owner thread, find which
706 * one owns it.
707 */
708 thread_lock(td);
709 mtx_lock_spin(&kse_lock);
710 if (uap->newgroup) {
711 /*
712 * The newgroup parameter now means
713 * "bound, non SA, system scope"
714 * It is only used for the interrupt thread at the
715 * moment I think.. (or system scope threads dopey).
716 * We'll rename it later.
717 */
718 newtd = thread_schedule_upcall(td, newku);
719 } else {
720 /*
721 * If the current thread hasn't an upcall structure,
722 * just assign the upcall to it.
723 * It'll just return.
724 */
725 if (td->td_upcall == NULL) {
726 newku->ku_owner = td;
727 td->td_upcall = newku;
728 newtd = td;
729 } else {
730 /*
731 * Create a new upcall thread to own it.
732 */
733 newtd = thread_schedule_upcall(td, newku);
734 }
735 }
736 mtx_unlock_spin(&kse_lock);
737 thread_unlock(td);
738 PROC_SUNLOCK(p);
739
740 /*
741 * Let the UTS instance know its LWPID.
742 * It doesn't really care. But the debugger will.
743 * XXX warning.. remember that this moves.
744 */
745 suword32(&newku->ku_mailbox->km_lwp, newtd->td_tid);
746
747 /*
748 * In the same manner, if the UTS has a current user thread,
749 * then it is also running on this LWP so set it as well.
750 * The library could do that of course.. but why not..
751 * XXX I'm not sure this can ever happen but ...
752 * XXX does the UTS ever set this in the mailbox before calling this?
753 */
754 if (mbx.km_curthread)
755 suword32(&mbx.km_curthread->tm_lwp, newtd->td_tid);
756
757 if (sa) {
758 newtd->td_pflags |= TDP_SA;
759 /*
760 * If we are starting a new thread, kick it off.
761 */
762 if (newtd != td) {
763 thread_lock(newtd);
764 sched_add(newtd, SRQ_BORING);
765 thread_unlock(newtd);
766 }
767 } else {
768 newtd->td_pflags &= ~TDP_SA;
769
770 /*
771 * Since a library will use the mailbox pointer to
772 * identify even a bound thread, and the mailbox pointer
773 * will never be allowed to change after this syscall
774 * for a bound thread, set it here so the library can
775 * find the thread after the syscall returns.
776 */
777 newtd->td_mailbox = mbx.km_curthread;
778
779 if (newtd != td) {
780 /*
781 * If we did create a new thread then
782 * make sure it goes to the right place
783 * when it starts up, and make sure that it runs
784 * at full speed when it gets there.
785 * thread_schedule_upcall() copies all cpu state
786 * to the new thread, so we should clear single step
787 * flag here.
788 */
789 cpu_set_upcall_kse(newtd, newku->ku_func,
790 newku->ku_mailbox, &newku->ku_stack);
791 PROC_LOCK(p);
792 if (p->p_flag & P_TRACED) {
793 _PHOLD(p);
794 ptrace_clear_single_step(newtd);
795 _PRELE(p);
796 }
797 PROC_UNLOCK(p);
798 thread_lock(newtd);
799 sched_add(newtd, SRQ_BORING);
800 thread_unlock(newtd);
801 }
802 }
803 return (0);
804 #else /* !KSE */
805 return (EOPNOTSUPP);
806 #endif
807 }
808
809 #ifdef KSE
810 /*
811 * Initialize global thread allocation resources.
812 */
813 void
814 kseinit(void)
815 {
816
817 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
818 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
819 }
820
821 /*
822 * Store the thread context in the UTS's mailbox.
823 * then add the mailbox at the head of a list we are building in user space.
824 * The list is anchored in the proc structure.
825 */
826 int
827 thread_export_context(struct thread *td, int willexit)
828 {
829 struct proc *p;
830 uintptr_t mbx;
831 void *addr;
832 int error = 0, sig;
833 mcontext_t mc;
834
835 p = td->td_proc;
836
837 /*
838 * Post sync signal, or process SIGKILL and SIGSTOP.
839 * For sync signal, it is only possible when the signal is not
840 * caught by userland or process is being debugged.
841 */
842 PROC_LOCK(p);
843 if (td->td_flags & TDF_NEEDSIGCHK) {
844 thread_lock(td);
845 td->td_flags &= ~TDF_NEEDSIGCHK;
846 thread_unlock(td);
847 mtx_lock(&p->p_sigacts->ps_mtx);
848 while ((sig = cursig(td)) != 0)
849 postsig(sig);
850 mtx_unlock(&p->p_sigacts->ps_mtx);
851 }
852 if (willexit)
853 SIGFILLSET(td->td_sigmask);
854 PROC_UNLOCK(p);
855
856 /* Export the user/machine context. */
857 get_mcontext(td, &mc, 0);
858 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
859 error = copyout(&mc, addr, sizeof(mcontext_t));
860 if (error)
861 goto bad;
862
863 addr = (caddr_t)(&td->td_mailbox->tm_lwp);
864 if (suword32(addr, 0)) {
865 error = EFAULT;
866 goto bad;
867 }
868
869 /* Get address in latest mbox of list pointer */
870 addr = (void *)(&td->td_mailbox->tm_next);
871 /*
872 * Put the saved address of the previous first
873 * entry into this one
874 */
875 for (;;) {
876 mbx = (uintptr_t)p->p_completed;
877 if (suword(addr, mbx)) {
878 error = EFAULT;
879 goto bad;
880 }
881 PROC_LOCK(p);
882 if (mbx == (uintptr_t)p->p_completed) {
883 thread_lock(td);
884 p->p_completed = td->td_mailbox;
885 /*
886 * The thread context may be taken away by
887 * other upcall threads when we unlock
888 * process lock. it's no longer valid to
889 * use it again in any other places.
890 */
891 td->td_mailbox = NULL;
892 thread_unlock(td);
893 PROC_UNLOCK(p);
894 break;
895 }
896 PROC_UNLOCK(p);
897 }
898 td->td_usticks = 0;
899 return (0);
900
901 bad:
902 PROC_LOCK(p);
903 sigexit(td, SIGILL);
904 return (error);
905 }
906
907 /*
908 * Take the list of completed mailboxes for this Process and put them on this
909 * upcall's mailbox as it's the next one going up.
910 */
911 static int
912 thread_link_mboxes(struct proc *p, struct kse_upcall *ku)
913 {
914 void *addr;
915 uintptr_t mbx;
916
917 addr = (void *)(&ku->ku_mailbox->km_completed);
918 for (;;) {
919 mbx = (uintptr_t)p->p_completed;
920 if (suword(addr, mbx)) {
921 PROC_LOCK(p);
922 psignal(p, SIGSEGV);
923 PROC_UNLOCK(p);
924 return (EFAULT);
925 }
926 PROC_LOCK(p);
927 if (mbx == (uintptr_t)p->p_completed) {
928 p->p_completed = NULL;
929 PROC_UNLOCK(p);
930 break;
931 }
932 PROC_UNLOCK(p);
933 }
934 return (0);
935 }
936
937 /*
938 * This function should be called at statclock interrupt time
939 */
940 int
941 thread_statclock(int user)
942 {
943 struct thread *td = curthread;
944
945 if (!(td->td_pflags & TDP_SA))
946 return (0);
947 if (user) {
948 /* Current always do via ast() */
949 thread_lock(td);
950 td->td_flags |= TDF_ASTPENDING;
951 thread_unlock(td);
952 td->td_uuticks++;
953 } else if (td->td_mailbox != NULL)
954 td->td_usticks++;
955 return (0);
956 }
957
958 /*
959 * Export state clock ticks for userland
960 */
961 static int
962 thread_update_usr_ticks(struct thread *td)
963 {
964 struct proc *p = td->td_proc;
965 caddr_t addr;
966 u_int uticks;
967
968 thread_lock(td);
969 if (td->td_mailbox == NULL) {
970 thread_unlock(td);
971 return (-1);
972 }
973 thread_unlock(td);
974
975 if ((uticks = td->td_uuticks) != 0) {
976 td->td_uuticks = 0;
977 addr = (caddr_t)&td->td_mailbox->tm_uticks;
978 if (suword32(addr, uticks+fuword32(addr)))
979 goto error;
980 }
981 if ((uticks = td->td_usticks) != 0) {
982 td->td_usticks = 0;
983 addr = (caddr_t)&td->td_mailbox->tm_sticks;
984 if (suword32(addr, uticks+fuword32(addr)))
985 goto error;
986 }
987 return (0);
988
989 error:
990 PROC_LOCK(p);
991 psignal(p, SIGSEGV);
992 PROC_UNLOCK(p);
993 return (-2);
994 }
995
996 /*
997 * This function is intended to be used to initialize a spare thread
998 * for upcall. Initialize thread's large data area outside the thread lock
999 * for thread_schedule_upcall(). The crhold is also here to get it out
1000 * from the schedlock as it has a mutex op itself.
1001 * XXX BUG.. we need to get the cr ref after the thread has
1002 * checked and chenged its own, not 6 months before...
1003 */
1004 int
1005 thread_alloc_spare(struct thread *td)
1006 {
1007 struct thread *spare;
1008
1009 if (td->td_standin)
1010 return (1);
1011 spare = thread_alloc();
1012 if (spare == NULL)
1013 return (0);
1014 td->td_standin = spare;
1015 bzero(&spare->td_startzero,
1016 __rangeof(struct thread, td_startzero, td_endzero));
1017 bzero(&spare->td_osd, sizeof(struct osd));
1018 spare->td_proc = td->td_proc;
1019 spare->td_ucred = crhold(td->td_ucred);
1020 spare->td_flags = TDF_INMEM;
1021 return (1);
1022 }
1023
1024 /*
1025 * Create a thread and schedule it for upcall on the KSE given.
1026 * Use our thread's standin so that we don't have to allocate one.
1027 */
1028 struct thread *
1029 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1030 {
1031 struct thread *td2;
1032
1033 THREAD_LOCK_ASSERT(td, MA_OWNED);
1034 mtx_assert(&kse_lock, MA_OWNED);
1035 /*
1036 * Schedule an upcall thread on specified kse_upcall,
1037 * the kse_upcall must be free.
1038 * td must have a spare thread.
1039 */
1040 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1041 if ((td2 = td->td_standin) != NULL) {
1042 td->td_standin = NULL;
1043 } else {
1044 panic("no reserve thread when scheduling an upcall");
1045 return (NULL);
1046 }
1047 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1048 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1049 /*
1050 * Bzero already done in thread_alloc_spare() because we can't
1051 * do the crhold here because we are in schedlock already.
1052 */
1053 bcopy(&td->td_startcopy, &td2->td_startcopy,
1054 __rangeof(struct thread, td_startcopy, td_endcopy));
1055 sched_fork_thread(td, td2);
1056 thread_link(td2, ku->ku_proc);
1057 /* inherit parts of blocked thread's context as a good template */
1058 cpu_set_upcall(td2, td);
1059 /* Let the new thread become owner of the upcall */
1060 ku->ku_owner = td2;
1061 td2->td_upcall = ku;
1062 td2->td_pflags = TDP_SA|TDP_UPCALLING;
1063 td2->td_state = TDS_CAN_RUN;
1064 td2->td_inhibitors = 0;
1065 SIGFILLSET(td2->td_sigmask);
1066 SIG_CANTMASK(td2->td_sigmask);
1067 return (td2); /* bogus.. should be a void function */
1068 }
1069
1070 /*
1071 * It is only used when thread generated a trap and process is being
1072 * debugged.
1073 */
1074 void
1075 thread_signal_add(struct thread *td, ksiginfo_t *ksi)
1076 {
1077 struct proc *p;
1078 struct sigacts *ps;
1079 int error;
1080
1081 p = td->td_proc;
1082 PROC_LOCK_ASSERT(p, MA_OWNED);
1083 ps = p->p_sigacts;
1084 mtx_assert(&ps->ps_mtx, MA_OWNED);
1085
1086 mtx_unlock(&ps->ps_mtx);
1087 SIGADDSET(td->td_sigmask, ksi->ksi_signo);
1088 PROC_UNLOCK(p);
1089 error = copyout(&ksi->ksi_info, &td->td_mailbox->tm_syncsig,
1090 sizeof(siginfo_t));
1091 if (error) {
1092 PROC_LOCK(p);
1093 sigexit(td, SIGSEGV);
1094 }
1095 PROC_LOCK(p);
1096 mtx_lock(&ps->ps_mtx);
1097 }
1098 #include "opt_sched.h"
1099 struct thread *
1100 thread_switchout(struct thread *td, int flags, struct thread *nextthread)
1101 {
1102 struct kse_upcall *ku;
1103 struct thread *td2;
1104
1105 THREAD_LOCK_ASSERT(td, MA_OWNED);
1106
1107 /*
1108 * If the outgoing thread is in threaded group and has never
1109 * scheduled an upcall, decide whether this is a short
1110 * or long term event and thus whether or not to schedule
1111 * an upcall.
1112 * If it is a short term event, just suspend it in
1113 * a way that takes its KSE with it.
1114 * Select the events for which we want to schedule upcalls.
1115 * For now it's just sleep or if thread is suspended but
1116 * process wide suspending flag is not set (debugger
1117 * suspends thread).
1118 * XXXKSE eventually almost any inhibition could do.
1119 */
1120 if (TD_CAN_UNBIND(td) && (td->td_standin) &&
1121 (TD_ON_SLEEPQ(td) || (TD_IS_SUSPENDED(td) &&
1122 !P_SHOULDSTOP(td->td_proc)))) {
1123 /*
1124 * Release ownership of upcall, and schedule an upcall
1125 * thread, this new upcall thread becomes the owner of
1126 * the upcall structure. It will be ahead of us in the
1127 * run queue, so as we are stopping, it should either
1128 * start up immediatly, or at least before us if
1129 * we release our slot.
1130 */
1131 mtx_lock_spin(&kse_lock);
1132 ku = td->td_upcall;
1133 ku->ku_owner = NULL;
1134 td->td_upcall = NULL;
1135 td->td_pflags &= ~TDP_CAN_UNBIND;
1136 td2 = thread_schedule_upcall(td, ku);
1137 mtx_unlock_spin(&kse_lock);
1138 if (flags & SW_INVOL || nextthread) {
1139 thread_lock(td2);
1140 sched_add(td2, SRQ_YIELDING);
1141 thread_unlock(td2);
1142 } else {
1143 /* Keep up with reality.. we have one extra thread
1144 * in the picture.. and it's 'running'.
1145 */
1146 return td2;
1147 }
1148 }
1149 return (nextthread);
1150 }
1151
1152 /*
1153 * Setup done on the thread when it enters the kernel.
1154 */
1155 void
1156 thread_user_enter(struct thread *td)
1157 {
1158 struct proc *p = td->td_proc;
1159 struct kse_upcall *ku;
1160 struct kse_thr_mailbox *tmbx;
1161 uint32_t flags;
1162
1163 /*
1164 * First check that we shouldn't just abort. we
1165 * can suspend it here or just exit.
1166 */
1167 if (__predict_false(P_SHOULDSTOP(p))) {
1168 PROC_LOCK(p);
1169 thread_suspend_check(0);
1170 PROC_UNLOCK(p);
1171 }
1172
1173 if (!(td->td_pflags & TDP_SA))
1174 return;
1175
1176 /*
1177 * If we are doing a syscall in a KSE environment,
1178 * note where our mailbox is.
1179 */
1180
1181 thread_lock(td);
1182 ku = td->td_upcall;
1183 thread_unlock(td);
1184
1185 KASSERT(ku != NULL, ("no upcall owned"));
1186 KASSERT(ku->ku_owner == td, ("wrong owner"));
1187 KASSERT(!TD_CAN_UNBIND(td), ("can unbind"));
1188
1189 if (td->td_standin == NULL) {
1190 if (!thread_alloc_spare(td)) {
1191 PROC_LOCK(p);
1192 if (kern_logsigexit)
1193 log(LOG_INFO,
1194 "pid %d (%s), uid %d: thread_alloc_spare failed\n",
1195 p->p_pid, p->p_comm,
1196 td->td_ucred ? td->td_ucred->cr_uid : -1);
1197 sigexit(td, SIGSEGV); /* XXX ? */
1198 /* panic("thread_user_enter: thread_alloc_spare failed"); */
1199 }
1200 }
1201 ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
1202 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1203 if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
1204 (ku->ku_mflags & KMF_NOUPCALL)) {
1205 td->td_mailbox = NULL;
1206 } else {
1207 flags = fuword32(&tmbx->tm_flags);
1208 /*
1209 * On some architectures, TP register points to thread
1210 * mailbox but not points to kse mailbox, and userland
1211 * can not atomically clear km_curthread, but can
1212 * use TP register, and set TMF_NOUPCALL in thread
1213 * flag to indicate a critical region.
1214 */
1215 if (flags & TMF_NOUPCALL) {
1216 td->td_mailbox = NULL;
1217 } else {
1218 td->td_mailbox = tmbx;
1219 td->td_pflags |= TDP_CAN_UNBIND;
1220 PROC_LOCK(p);
1221 if (__predict_false(p->p_flag & P_TRACED)) {
1222 flags = fuword32(&tmbx->tm_dflags);
1223 if (flags & TMDF_SUSPEND) {
1224 thread_lock(td);
1225 /* fuword can block, check again */
1226 if (td->td_upcall)
1227 ku->ku_flags |= KUF_DOUPCALL;
1228 thread_unlock(td);
1229 }
1230 }
1231 PROC_UNLOCK(p);
1232 }
1233 }
1234 }
1235
1236 /*
1237 * The extra work we go through if we are a threaded process when we
1238 * return to userland.
1239 *
1240 * If we are a KSE process and returning to user mode, check for
1241 * extra work to do before we return (e.g. for more syscalls
1242 * to complete first). If we were in a critical section, we should
1243 * just return to let it finish. Same if we were in the UTS (in
1244 * which case the mailbox's context's busy indicator will be set).
1245 * The only traps we suport will have set the mailbox.
1246 * We will clear it here.
1247 */
1248 int
1249 thread_userret(struct thread *td, struct trapframe *frame)
1250 {
1251 struct kse_upcall *ku;
1252 struct proc *p;
1253 struct timespec ts;
1254 int error = 0, uts_crit;
1255
1256 /* Nothing to do with bound thread */
1257 if (!(td->td_pflags & TDP_SA))
1258 return (0);
1259
1260 /*
1261 * Update stat clock count for userland
1262 */
1263 if (td->td_mailbox != NULL) {
1264 thread_update_usr_ticks(td);
1265 uts_crit = 0;
1266 } else {
1267 uts_crit = 1;
1268 }
1269
1270 p = td->td_proc;
1271 thread_lock(td);
1272 ku = td->td_upcall;
1273
1274 /*
1275 * Optimisation:
1276 * This thread has not started any upcall.
1277 * If there is no work to report other than ourself,
1278 * then it can return direct to userland.
1279 */
1280 if (TD_CAN_UNBIND(td)) {
1281 thread_unlock(td);
1282 td->td_pflags &= ~TDP_CAN_UNBIND;
1283 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1284 (p->p_completed == NULL) &&
1285 (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1286 (p->p_upquantum && ticks < p->p_nextupcall)) {
1287 nanotime(&ts);
1288 error = copyout(&ts,
1289 (caddr_t)&ku->ku_mailbox->km_timeofday,
1290 sizeof(ts));
1291 td->td_mailbox = 0;
1292 ku->ku_mflags = 0;
1293 if (error)
1294 goto out;
1295 return (0);
1296 }
1297 thread_export_context(td, 0);
1298 /*
1299 * There is something to report, and we own an upcall
1300 * structure, we can go to userland.
1301 * Turn ourself into an upcall thread.
1302 */
1303 td->td_pflags |= TDP_UPCALLING;
1304 } else if (td->td_mailbox && (ku == NULL)) {
1305 thread_unlock(td);
1306 thread_export_context(td, 1);
1307 PROC_LOCK(p);
1308 if (p->p_upsleeps)
1309 wakeup(&p->p_completed);
1310 WITNESS_WARN(WARN_PANIC, &p->p_mtx.lock_object,
1311 "thread exiting in userret");
1312 sigqueue_flush(&td->td_sigqueue);
1313 PROC_SLOCK(p);
1314 thread_stopped(p);
1315 thread_exit();
1316 /* NOTREACHED */
1317 } else
1318 thread_unlock(td);
1319
1320 KASSERT(ku != NULL, ("upcall is NULL"));
1321 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1322
1323 PROC_LOCK(p);
1324 PROC_SLOCK(p);
1325 if (p->p_numthreads > max_threads_per_proc) {
1326 max_threads_hits++;
1327 while (p->p_numthreads > max_threads_per_proc) {
1328 if (p->p_numupcalls >= max_threads_per_proc)
1329 break;
1330 PROC_SUNLOCK(p);
1331 if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1332 "maxthreads", hz/10) != EWOULDBLOCK) {
1333 PROC_SLOCK(p);
1334 break;
1335 } else
1336 PROC_SLOCK(p);
1337 }
1338 }
1339 PROC_SUNLOCK(p);
1340 PROC_UNLOCK(p);
1341
1342 if (td->td_pflags & TDP_UPCALLING) {
1343 uts_crit = 0;
1344 p->p_nextupcall = ticks + p->p_upquantum;
1345 /*
1346 * There is no more work to do and we are going to ride
1347 * this thread up to userland as an upcall.
1348 * Do the last parts of the setup needed for the upcall.
1349 */
1350 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1351 td, td->td_proc->p_pid, td->td_proc->p_comm);
1352
1353 td->td_pflags &= ~TDP_UPCALLING;
1354 if (ku->ku_flags & KUF_DOUPCALL) {
1355 PROC_SLOCK(p);
1356 ku->ku_flags &= ~KUF_DOUPCALL;
1357 PROC_SUNLOCK(p);
1358 }
1359 /*
1360 * Set user context to the UTS
1361 */
1362 if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1363 cpu_set_upcall_kse(td, ku->ku_func, ku->ku_mailbox,
1364 &ku->ku_stack);
1365 PROC_LOCK(p);
1366 if (p->p_flag & P_TRACED) {
1367 _PHOLD(p);
1368 ptrace_clear_single_step(td);
1369 _PRELE(p);
1370 }
1371 PROC_UNLOCK(p);
1372 error = suword32(&ku->ku_mailbox->km_lwp,
1373 td->td_tid);
1374 if (error)
1375 goto out;
1376 error = suword(&ku->ku_mailbox->km_curthread, 0);
1377 if (error)
1378 goto out;
1379 }
1380
1381 /*
1382 * Unhook the list of completed threads.
1383 * anything that completes after this gets to
1384 * come in next time.
1385 * Put the list of completed thread mailboxes on
1386 * this KSE's mailbox.
1387 */
1388 if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1389 (error = thread_link_mboxes(p, ku)) != 0)
1390 goto out;
1391 }
1392 if (!uts_crit) {
1393 nanotime(&ts);
1394 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1395 }
1396
1397 out:
1398 if (error) {
1399 /*
1400 * Things are going to be so screwed we should just kill
1401 * the process.
1402 * how do we do that?
1403 */
1404 PROC_LOCK(p);
1405 psignal(p, SIGSEGV);
1406 PROC_UNLOCK(p);
1407 } else {
1408 /*
1409 * Optimisation:
1410 * Ensure that we have a spare thread available,
1411 * for when we re-enter the kernel.
1412 */
1413 if (td->td_standin == NULL)
1414 thread_alloc_spare(td); /* XXX care of failure ? */
1415 }
1416
1417 ku->ku_mflags = 0;
1418 td->td_mailbox = NULL;
1419 td->td_usticks = 0;
1420 return (error); /* go sync */
1421 }
1422
1423 /*
1424 * called after ptrace resumed a process, force all
1425 * virtual CPUs to schedule upcall for SA process,
1426 * because debugger may have changed something in userland,
1427 * we should notice UTS as soon as possible.
1428 */
1429 void
1430 thread_continued(struct proc *p)
1431 {
1432 struct kse_upcall *ku;
1433 struct thread *td;
1434
1435 PROC_LOCK_ASSERT(p, MA_OWNED);
1436 KASSERT(P_SHOULDSTOP(p), ("process not stopped"));
1437
1438 if (!(p->p_flag & P_SA))
1439 return;
1440
1441 if (p->p_flag & P_TRACED) {
1442 td = TAILQ_FIRST(&p->p_threads);
1443 if (td && (td->td_pflags & TDP_SA)) {
1444 FOREACH_UPCALL_IN_PROC(p, ku) {
1445 PROC_SLOCK(p);
1446 ku->ku_flags |= KUF_DOUPCALL;
1447 PROC_SUNLOCK(p);
1448 wakeup(&p->p_completed);
1449 }
1450 }
1451 }
1452 }
1453 #endif
Cache object: 87c0d4eaa2500e95caf087c37a90e6da
|