FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_kse.c
1 /*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/imgact.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/ptrace.h>
40 #include <sys/smp.h>
41 #include <sys/syscallsubr.h>
42 #include <sys/sysproto.h>
43 #include <sys/sched.h>
44 #include <sys/signalvar.h>
45 #include <sys/sleepqueue.h>
46 #include <sys/syslog.h>
47 #include <sys/kse.h>
48 #include <sys/ktr.h>
49 #include <vm/uma.h>
50
51 #ifdef KSE
52 static uma_zone_t upcall_zone;
53
54 /* DEBUG ONLY */
55 extern int virtual_cpu;
56 extern int thread_debug;
57
58 extern int max_threads_per_proc;
59 extern int max_groups_per_proc;
60 extern int max_threads_hits;
61 extern struct mtx kse_lock;
62
63
64 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
65 TAILQ_HEAD_INITIALIZER(zombie_upcalls);
66
67 static int thread_update_usr_ticks(struct thread *td);
68 static int thread_alloc_spare(struct thread *td);
69 static struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
70 static struct kse_upcall *upcall_alloc(void);
71
72
73 struct mtx kse_lock;
74 MTX_SYSINIT(kse_lock, &kse_lock, "kse lock", MTX_SPIN);
75
76 struct kse_upcall *
77 upcall_alloc(void)
78 {
79 struct kse_upcall *ku;
80
81 ku = uma_zalloc(upcall_zone, M_WAITOK | M_ZERO);
82 return (ku);
83 }
84
85 void
86 upcall_reap(void)
87 {
88 TAILQ_HEAD(, kse_upcall) zupcalls;
89 struct kse_upcall *ku_item, *ku_tmp;
90
91 TAILQ_INIT(&zupcalls);
92 mtx_lock_spin(&kse_lock);
93 if (!TAILQ_EMPTY(&zombie_upcalls)) {
94 TAILQ_CONCAT(&zupcalls, &zombie_upcalls, ku_link);
95 TAILQ_INIT(&zombie_upcalls);
96 }
97 mtx_unlock_spin(&kse_lock);
98 TAILQ_FOREACH_SAFE(ku_item, &zupcalls, ku_link, ku_tmp)
99 uma_zfree(upcall_zone, ku_item);
100 }
101
102 void
103 upcall_remove(struct thread *td)
104 {
105
106 PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
107 THREAD_LOCK_ASSERT(td, MA_OWNED);
108 if (td->td_upcall != NULL) {
109 /*
110 * If we are not a bound thread then decrement the count of
111 * possible upcall sources
112 */
113 if (td->td_pflags & TDP_SA)
114 td->td_proc->p_numupcalls--;
115 mtx_lock_spin(&kse_lock);
116 td->td_upcall->ku_owner = NULL;
117 TAILQ_REMOVE(&td->td_upcall->ku_proc->p_upcalls, td->td_upcall,
118 ku_link);
119 TAILQ_INSERT_HEAD(&zombie_upcalls, td->td_upcall, ku_link);
120 mtx_unlock_spin(&kse_lock);
121 td->td_upcall = NULL;
122 }
123 }
124 #endif
125
126 #ifndef _SYS_SYSPROTO_H_
127 struct kse_switchin_args {
128 struct kse_thr_mailbox *tmbx;
129 int flags;
130 };
131 #endif
132
133 #ifdef KSE
134 void
135 kse_unlink(struct thread *td)
136 {
137 mtx_lock_spin(&kse_lock);
138 thread_unlink(td);
139 mtx_unlock_spin(&kse_lock);
140 upcall_remove(td);
141 }
142 #endif
143
144 int
145 kse_switchin(struct thread *td, struct kse_switchin_args *uap)
146 {
147 #ifdef KSE
148 struct kse_thr_mailbox tmbx;
149 struct kse_upcall *ku;
150 int error;
151
152 thread_lock(td);
153 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) {
154 thread_unlock(td);
155 return (EINVAL);
156 }
157 thread_unlock(td);
158 error = (uap->tmbx == NULL) ? EINVAL : 0;
159 if (!error)
160 error = copyin(uap->tmbx, &tmbx, sizeof(tmbx));
161 if (!error && (uap->flags & KSE_SWITCHIN_SETTMBX))
162 error = (suword(&ku->ku_mailbox->km_curthread,
163 (long)uap->tmbx) != 0 ? EINVAL : 0);
164 if (!error)
165 error = set_mcontext(td, &tmbx.tm_context.uc_mcontext);
166 if (!error) {
167 suword32(&uap->tmbx->tm_lwp, td->td_tid);
168 if (uap->flags & KSE_SWITCHIN_SETTMBX) {
169 td->td_mailbox = uap->tmbx;
170 td->td_pflags |= TDP_CAN_UNBIND;
171 }
172 PROC_LOCK(td->td_proc);
173 if (td->td_proc->p_flag & P_TRACED) {
174 _PHOLD(td->td_proc);
175 if (tmbx.tm_dflags & TMDF_SSTEP)
176 ptrace_single_step(td);
177 else
178 ptrace_clear_single_step(td);
179 if (tmbx.tm_dflags & TMDF_SUSPEND) {
180 thread_lock(td);
181 /* fuword can block, check again */
182 if (td->td_upcall)
183 ku->ku_flags |= KUF_DOUPCALL;
184 thread_unlock(td);
185 }
186 _PRELE(td->td_proc);
187 }
188 PROC_UNLOCK(td->td_proc);
189 }
190 return ((error == 0) ? EJUSTRETURN : error);
191 #else /* !KSE */
192 return (EOPNOTSUPP);
193 #endif
194 }
195
196 /*
197 struct kse_thr_interrupt_args {
198 struct kse_thr_mailbox * tmbx;
199 int cmd;
200 long data;
201 };
202 */
203 int
204 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
205 {
206 #ifdef KSE
207 struct kse_execve_args args;
208 struct image_args iargs;
209 struct proc *p;
210 struct thread *td2;
211 struct kse_upcall *ku;
212 struct kse_thr_mailbox *tmbx;
213 uint32_t flags;
214 int error;
215
216 p = td->td_proc;
217
218 PROC_LOCK(p);
219 if (!(p->p_flag & P_SA)) {
220 PROC_UNLOCK(p);
221 return (EINVAL);
222 }
223 PROC_UNLOCK(p);
224
225 switch (uap->cmd) {
226 case KSE_INTR_SENDSIG:
227 if (uap->data < 0 || uap->data > _SIG_MAXSIG)
228 return (EINVAL);
229 case KSE_INTR_INTERRUPT:
230 case KSE_INTR_RESTART:
231 PROC_LOCK(p);
232 PROC_SLOCK(p);
233 FOREACH_THREAD_IN_PROC(p, td2) {
234 if (td2->td_mailbox == uap->tmbx)
235 break;
236 }
237 if (td2 == NULL) {
238 PROC_SUNLOCK(p);
239 PROC_UNLOCK(p);
240 return (ESRCH);
241 }
242 thread_lock(td2);
243 PROC_SUNLOCK(p);
244 if (uap->cmd == KSE_INTR_SENDSIG) {
245 if (uap->data > 0) {
246 td2->td_flags &= ~TDF_INTERRUPT;
247 thread_unlock(td2);
248 tdsignal(p, td2, (int)uap->data, NULL);
249 } else {
250 thread_unlock(td2);
251 }
252 } else {
253 td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING;
254 if (TD_CAN_UNBIND(td2))
255 td2->td_upcall->ku_flags |= KUF_DOUPCALL;
256 if (uap->cmd == KSE_INTR_INTERRUPT)
257 td2->td_intrval = EINTR;
258 else
259 td2->td_intrval = ERESTART;
260 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR))
261 sleepq_abort(td2, td2->td_intrval);
262 thread_unlock(td2);
263 }
264 PROC_UNLOCK(p);
265 break;
266 case KSE_INTR_SIGEXIT:
267 if (uap->data < 1 || uap->data > _SIG_MAXSIG)
268 return (EINVAL);
269 PROC_LOCK(p);
270 sigexit(td, (int)uap->data);
271 break;
272
273 case KSE_INTR_DBSUSPEND:
274 /* this sub-function is only for bound thread */
275 if (td->td_pflags & TDP_SA)
276 return (EINVAL);
277 thread_lock(td);
278 ku = td->td_upcall;
279 thread_unlock(td);
280 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
281 if (tmbx == NULL || tmbx == (void *)-1)
282 return (EINVAL);
283 flags = 0;
284 PROC_LOCK(p);
285 while ((p->p_flag & P_TRACED) && !(p->p_flag & P_SINGLE_EXIT)) {
286 flags = fuword32(&tmbx->tm_dflags);
287 if (!(flags & TMDF_SUSPEND))
288 break;
289 PROC_SLOCK(p);
290 thread_stopped(p);
291 PROC_UNLOCK(p);
292 thread_lock(td);
293 thread_suspend_one(td);
294 PROC_SUNLOCK(p);
295 mi_switch(SW_VOL, NULL);
296 thread_unlock(td);
297 PROC_LOCK(p);
298 }
299 PROC_UNLOCK(p);
300 return (0);
301
302 case KSE_INTR_EXECVE:
303 error = copyin((void *)uap->data, &args, sizeof(args));
304 if (error)
305 return (error);
306 error = exec_copyin_args(&iargs, args.path, UIO_USERSPACE,
307 args.argv, args.envp);
308 if (error == 0)
309 error = kern_execve(td, &iargs, NULL);
310 if (error == 0) {
311 PROC_LOCK(p);
312 SIGSETOR(td->td_siglist, args.sigpend);
313 PROC_UNLOCK(p);
314 kern_sigprocmask(td, SIG_SETMASK, &args.sigmask, NULL,
315 0);
316 }
317 return (error);
318
319 default:
320 return (EINVAL);
321 }
322 return (0);
323 #else /* !KSE */
324 return (EOPNOTSUPP);
325 #endif
326 }
327
328 /*
329 struct kse_exit_args {
330 register_t dummy;
331 };
332 */
333 int
334 kse_exit(struct thread *td, struct kse_exit_args *uap)
335 {
336 #ifdef KSE
337 struct proc *p;
338 struct kse_upcall *ku, *ku2;
339 int error, count;
340
341 p = td->td_proc;
342 /*
343 * Ensure that this is only called from the UTS
344 */
345 thread_lock(td);
346 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) {
347 thread_unlock(td);
348 return (EINVAL);
349 }
350 thread_unlock(td);
351
352 /*
353 * Calculate the existing non-exiting upcalls in this process.
354 * If we are the last upcall but there are still other threads,
355 * then do not exit. We need the other threads to be able to
356 * complete whatever they are doing.
357 * XXX This relies on the userland knowing what to do if we return.
358 * It may be a better choice to convert ourselves into a kse_release
359 * ( or similar) and wait in the kernel to be needed.
360 * XXX Where are those other threads? I suppose they are waiting in
361 * the kernel. We should wait for them all at the user boundary after
362 * turning into an exit.
363 */
364 count = 0;
365 PROC_LOCK(p);
366 PROC_SLOCK(p);
367 FOREACH_UPCALL_IN_PROC(p, ku2) {
368 if ((ku2->ku_flags & KUF_EXITING) == 0)
369 count++;
370 }
371 if (count == 1 && (p->p_numthreads > 1)) {
372 PROC_SUNLOCK(p);
373 PROC_UNLOCK(p);
374 return (EDEADLK);
375 }
376 ku->ku_flags |= KUF_EXITING;
377 PROC_SUNLOCK(p);
378 PROC_UNLOCK(p);
379
380 /*
381 * Mark the UTS mailbox as having been finished with.
382 * If that fails then just go for a segfault.
383 * XXX need to check it that can be deliverred without a mailbox.
384 */
385 error = suword32(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
386 if (!(td->td_pflags & TDP_SA))
387 if (suword32(&td->td_mailbox->tm_lwp, 0))
388 error = EFAULT;
389 PROC_LOCK(p);
390 if (error)
391 psignal(p, SIGSEGV);
392 sigqueue_flush(&td->td_sigqueue);
393 PROC_SLOCK(p);
394 thread_lock(td);
395 upcall_remove(td);
396 thread_unlock(td);
397 if (p->p_numthreads != 1) {
398 thread_stopped(p);
399 thread_exit();
400 /* NOTREACHED */
401 }
402 /*
403 * This is the last thread. Just return to the user.
404 * Effectively we have left threading mode..
405 * The only real thing left to do is ensure that the
406 * scheduler sets out concurrency back to 1 as that may be a
407 * resource leak otherwise.
408 * This is an A[PB]I issue.. what SHOULD we do?
409 * One possibility is to return to the user. It may not cope well.
410 * The other possibility would be to let the process exit.
411 */
412 thread_unthread(td);
413 PROC_SUNLOCK(p);
414 PROC_UNLOCK(p);
415 #if 0
416 return (0);
417 #else
418 printf("kse_exit: called on last thread. Calling exit1()");
419 exit1(td, 0);
420 #endif
421 #else /* !KSE */
422 return (EOPNOTSUPP);
423 #endif
424 }
425
426 /*
427 * Either becomes an upcall or waits for an awakening event and
428 * then becomes an upcall. Only error cases return.
429 */
430 /*
431 struct kse_release_args {
432 struct timespec *timeout;
433 };
434 */
435 int
436 kse_release(struct thread *td, struct kse_release_args *uap)
437 {
438 #ifdef KSE
439 struct proc *p;
440 struct kse_upcall *ku;
441 struct timespec timeout;
442 struct timeval tv;
443 sigset_t sigset;
444 int error;
445
446 p = td->td_proc;
447 thread_lock(td);
448 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) {
449 thread_unlock(td);
450 printf("kse_release: called outside of threading. exiting");
451 exit1(td, 0);
452 }
453 thread_unlock(td);
454 if (uap->timeout != NULL) {
455 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
456 return (error);
457 TIMESPEC_TO_TIMEVAL(&tv, &timeout);
458 }
459 if (td->td_pflags & TDP_SA)
460 td->td_pflags |= TDP_UPCALLING;
461 else {
462 ku->ku_mflags = fuword32(&ku->ku_mailbox->km_flags);
463 if (ku->ku_mflags == -1) {
464 PROC_LOCK(p);
465 sigexit(td, SIGSEGV);
466 }
467 }
468 PROC_LOCK(p);
469 if (ku->ku_mflags & KMF_WAITSIGEVENT) {
470 /* UTS wants to wait for signal event */
471 if (!(p->p_flag & P_SIGEVENT) &&
472 !(ku->ku_flags & KUF_DOUPCALL)) {
473 td->td_kflags |= TDK_KSERELSIG;
474 error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH,
475 "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0));
476 td->td_kflags &= ~(TDK_KSERELSIG | TDK_WAKEUP);
477 }
478 p->p_flag &= ~P_SIGEVENT;
479 sigset = p->p_siglist;
480 PROC_UNLOCK(p);
481 error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught,
482 sizeof(sigset));
483 } else {
484 if ((ku->ku_flags & KUF_DOUPCALL) == 0 &&
485 ((ku->ku_mflags & KMF_NOCOMPLETED) ||
486 (p->p_completed == NULL))) {
487 p->p_upsleeps++;
488 td->td_kflags |= TDK_KSEREL;
489 error = msleep(&p->p_completed, &p->p_mtx,
490 PPAUSE|PCATCH, "kserel",
491 (uap->timeout ? tvtohz(&tv) : 0));
492 td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP);
493 p->p_upsleeps--;
494 }
495 PROC_UNLOCK(p);
496 }
497 if (ku->ku_flags & KUF_DOUPCALL) {
498 PROC_SLOCK(p);
499 ku->ku_flags &= ~KUF_DOUPCALL;
500 PROC_SUNLOCK(p);
501 }
502 return (0);
503 #else /* !KSE */
504 return (EOPNOTSUPP);
505 #endif
506 }
507
508 /* struct kse_wakeup_args {
509 struct kse_mailbox *mbx;
510 }; */
511 int
512 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
513 {
514 #ifdef KSE
515 struct proc *p;
516 struct kse_upcall *ku;
517 struct thread *td2;
518
519 p = td->td_proc;
520 td2 = NULL;
521 ku = NULL;
522 /* KSE-enabled processes only, please. */
523 PROC_LOCK(p);
524 if (!(p->p_flag & P_SA)) {
525 PROC_UNLOCK(p);
526 return (EINVAL);
527 }
528 PROC_SLOCK(p);
529 if (uap->mbx) {
530 FOREACH_UPCALL_IN_PROC(p, ku) {
531 if (ku->ku_mailbox == uap->mbx)
532 break;
533 }
534 } else {
535 if (p->p_upsleeps) {
536 PROC_SUNLOCK(p);
537 wakeup(&p->p_completed);
538 PROC_UNLOCK(p);
539 return (0);
540 }
541 ku = TAILQ_FIRST(&p->p_upcalls);
542 }
543 if (ku == NULL) {
544 PROC_SUNLOCK(p);
545 PROC_UNLOCK(p);
546 return (ESRCH);
547 }
548 mtx_lock_spin(&kse_lock);
549 if ((td2 = ku->ku_owner) == NULL) {
550 mtx_unlock_spin(&kse_lock);
551 PROC_SUNLOCK(p);
552 PROC_UNLOCK(p);
553 panic("%s: no owner", __func__);
554 } else if (td2->td_kflags & (TDK_KSEREL | TDK_KSERELSIG)) {
555 mtx_unlock_spin(&kse_lock);
556 if (!(td2->td_kflags & TDK_WAKEUP)) {
557 td2->td_kflags |= TDK_WAKEUP;
558 if (td2->td_kflags & TDK_KSEREL)
559 sleepq_remove(td2, &p->p_completed);
560 else
561 sleepq_remove(td2, &p->p_siglist);
562 }
563 } else {
564 ku->ku_flags |= KUF_DOUPCALL;
565 mtx_unlock_spin(&kse_lock);
566 }
567 PROC_SUNLOCK(p);
568 PROC_UNLOCK(p);
569 return (0);
570 #else /* !KSE */
571 return (EOPNOTSUPP);
572 #endif
573 }
574
575 /*
576 * newgroup == 0: first call: use current KSE, don't schedule an upcall
577 * All other situations, do allocate max new KSEs and schedule an upcall.
578 *
579 * XXX should be changed so that 'first' behaviour lasts for as long
580 * as you have not made a thread in this proc. i.e. as long as we do not have
581 * a mailbox..
582 */
583 /* struct kse_create_args {
584 struct kse_mailbox *mbx;
585 int newgroup;
586 }; */
587 int
588 kse_create(struct thread *td, struct kse_create_args *uap)
589 {
590 #ifdef KSE
591 struct proc *p;
592 struct kse_mailbox mbx;
593 struct kse_upcall *newku;
594 int err, ncpus, sa = 0, first = 0;
595 struct thread *newtd;
596
597 p = td->td_proc;
598
599 /*
600 * Processes using the other threading model can't
601 * suddenly start calling this one
602 * XXX maybe...
603 */
604 PROC_LOCK(p);
605 if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) {
606 PROC_UNLOCK(p);
607 return (EINVAL);
608 }
609 if (!(p->p_flag & P_SA)) {
610 first = 1;
611 p->p_flag |= P_SA|P_HADTHREADS;
612 }
613 PROC_UNLOCK(p);
614
615 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
616 return (err);
617
618 ncpus = mp_ncpus;
619 if (virtual_cpu != 0)
620 ncpus = virtual_cpu;
621 /*
622 * If the new UTS mailbox says that this
623 * will be a BOUND lwp, then it had better
624 * have its thread mailbox already there.
625 */
626 if ((mbx.km_flags & KMF_BOUND) || uap->newgroup) {
627 /* It's a bound thread (1:1) */
628 if (mbx.km_curthread == NULL)
629 return (EINVAL);
630 ncpus = 1;
631 if (!(uap->newgroup || first))
632 return (EINVAL);
633 } else {
634 /* It's an upcall capable thread */
635 sa = TDP_SA;
636 PROC_LOCK(p);
637 /*
638 * Limit it to NCPU upcall contexts per proc in any case.
639 * numupcalls will soon be numkse or something
640 * as it will represent the number of
641 * non-bound upcalls available. (i.e. ones that can
642 * actually call up).
643 */
644 if (p->p_numupcalls >= ncpus) {
645 PROC_UNLOCK(p);
646 return (EPROCLIM);
647 }
648 p->p_numupcalls++;
649 PROC_UNLOCK(p);
650 }
651
652 /*
653 * For the first call this may not have been set.
654 * Of course nor may it actually be needed.
655 * thread_schedule_upcall() will look for it.
656 */
657 if (td->td_standin == NULL) {
658 if (!thread_alloc_spare(td))
659 return (ENOMEM);
660 }
661
662 /*
663 * Even bound LWPs get a mailbox and an upcall to hold it.
664 * XXX This should change.
665 */
666 newku = upcall_alloc();
667 newku->ku_mailbox = uap->mbx;
668 newku->ku_func = mbx.km_func;
669 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
670
671 PROC_LOCK(p);
672 PROC_SLOCK(p);
673 /*
674 * If we are the first time, and a normal thread,
675 * then transfer all the signals back to the 'process'.
676 * SA threading will make a special thread to handle them.
677 */
678 if (first) {
679 sigqueue_move_set(&td->td_sigqueue, &p->p_sigqueue,
680 &td->td_sigqueue.sq_signals);
681 SIGFILLSET(td->td_sigmask);
682 SIG_CANTMASK(td->td_sigmask);
683 }
684
685 /*
686 * Make the new upcall available to the process.
687 * It may or may not use it, but it's available.
688 */
689 TAILQ_INSERT_TAIL(&p->p_upcalls, newku, ku_link);
690 newku->ku_proc = p;
691 PROC_UNLOCK(p);
692 if (mbx.km_quantum)
693 /* XXX should this be in the thread? */
694 p->p_upquantum = max(1, mbx.km_quantum / tick);
695
696 /*
697 * Each upcall structure has an owner thread, find which
698 * one owns it.
699 */
700 thread_lock(td);
701 mtx_lock_spin(&kse_lock);
702 if (uap->newgroup) {
703 /*
704 * The newgroup parameter now means
705 * "bound, non SA, system scope"
706 * It is only used for the interrupt thread at the
707 * moment I think.. (or system scope threads dopey).
708 * We'll rename it later.
709 */
710 newtd = thread_schedule_upcall(td, newku);
711 } else {
712 /*
713 * If the current thread hasn't an upcall structure,
714 * just assign the upcall to it.
715 * It'll just return.
716 */
717 if (td->td_upcall == NULL) {
718 newku->ku_owner = td;
719 td->td_upcall = newku;
720 newtd = td;
721 } else {
722 /*
723 * Create a new upcall thread to own it.
724 */
725 newtd = thread_schedule_upcall(td, newku);
726 }
727 }
728 mtx_unlock_spin(&kse_lock);
729 thread_unlock(td);
730 PROC_SUNLOCK(p);
731
732 /*
733 * Let the UTS instance know its LWPID.
734 * It doesn't really care. But the debugger will.
735 * XXX warning.. remember that this moves.
736 */
737 suword32(&newku->ku_mailbox->km_lwp, newtd->td_tid);
738
739 /*
740 * In the same manner, if the UTS has a current user thread,
741 * then it is also running on this LWP so set it as well.
742 * The library could do that of course.. but why not..
743 * XXX I'm not sure this can ever happen but ...
744 * XXX does the UTS ever set this in the mailbox before calling this?
745 */
746 if (mbx.km_curthread)
747 suword32(&mbx.km_curthread->tm_lwp, newtd->td_tid);
748
749 if (sa) {
750 newtd->td_pflags |= TDP_SA;
751 /*
752 * If we are starting a new thread, kick it off.
753 */
754 if (newtd != td) {
755 thread_lock(newtd);
756 sched_add(newtd, SRQ_BORING);
757 thread_unlock(newtd);
758 }
759 } else {
760 newtd->td_pflags &= ~TDP_SA;
761
762 /*
763 * Since a library will use the mailbox pointer to
764 * identify even a bound thread, and the mailbox pointer
765 * will never be allowed to change after this syscall
766 * for a bound thread, set it here so the library can
767 * find the thread after the syscall returns.
768 */
769 newtd->td_mailbox = mbx.km_curthread;
770
771 if (newtd != td) {
772 /*
773 * If we did create a new thread then
774 * make sure it goes to the right place
775 * when it starts up, and make sure that it runs
776 * at full speed when it gets there.
777 * thread_schedule_upcall() copies all cpu state
778 * to the new thread, so we should clear single step
779 * flag here.
780 */
781 cpu_set_upcall_kse(newtd, newku->ku_func,
782 newku->ku_mailbox, &newku->ku_stack);
783 PROC_LOCK(p);
784 if (p->p_flag & P_TRACED) {
785 _PHOLD(p);
786 ptrace_clear_single_step(newtd);
787 _PRELE(p);
788 }
789 PROC_UNLOCK(p);
790 thread_lock(newtd);
791 sched_add(newtd, SRQ_BORING);
792 thread_unlock(newtd);
793 }
794 }
795 return (0);
796 #else /* !KSE */
797 return (EOPNOTSUPP);
798 #endif
799 }
800
801 #ifdef KSE
802 /*
803 * Initialize global thread allocation resources.
804 */
805 void
806 kseinit(void)
807 {
808
809 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
810 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
811 }
812
813 /*
814 * Store the thread context in the UTS's mailbox.
815 * then add the mailbox at the head of a list we are building in user space.
816 * The list is anchored in the proc structure.
817 */
818 int
819 thread_export_context(struct thread *td, int willexit)
820 {
821 struct proc *p;
822 uintptr_t mbx;
823 void *addr;
824 int error = 0, sig;
825 mcontext_t mc;
826
827 p = td->td_proc;
828
829 /*
830 * Post sync signal, or process SIGKILL and SIGSTOP.
831 * For sync signal, it is only possible when the signal is not
832 * caught by userland or process is being debugged.
833 */
834 PROC_LOCK(p);
835 if (td->td_flags & TDF_NEEDSIGCHK) {
836 thread_lock(td);
837 td->td_flags &= ~TDF_NEEDSIGCHK;
838 thread_unlock(td);
839 mtx_lock(&p->p_sigacts->ps_mtx);
840 while ((sig = cursig(td)) != 0)
841 postsig(sig);
842 mtx_unlock(&p->p_sigacts->ps_mtx);
843 }
844 if (willexit)
845 SIGFILLSET(td->td_sigmask);
846 PROC_UNLOCK(p);
847
848 /* Export the user/machine context. */
849 get_mcontext(td, &mc, 0);
850 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
851 error = copyout(&mc, addr, sizeof(mcontext_t));
852 if (error)
853 goto bad;
854
855 addr = (caddr_t)(&td->td_mailbox->tm_lwp);
856 if (suword32(addr, 0)) {
857 error = EFAULT;
858 goto bad;
859 }
860
861 /* Get address in latest mbox of list pointer */
862 addr = (void *)(&td->td_mailbox->tm_next);
863 /*
864 * Put the saved address of the previous first
865 * entry into this one
866 */
867 for (;;) {
868 mbx = (uintptr_t)p->p_completed;
869 if (suword(addr, mbx)) {
870 error = EFAULT;
871 goto bad;
872 }
873 PROC_LOCK(p);
874 if (mbx == (uintptr_t)p->p_completed) {
875 thread_lock(td);
876 p->p_completed = td->td_mailbox;
877 /*
878 * The thread context may be taken away by
879 * other upcall threads when we unlock
880 * process lock. it's no longer valid to
881 * use it again in any other places.
882 */
883 td->td_mailbox = NULL;
884 thread_unlock(td);
885 PROC_UNLOCK(p);
886 break;
887 }
888 PROC_UNLOCK(p);
889 }
890 td->td_usticks = 0;
891 return (0);
892
893 bad:
894 PROC_LOCK(p);
895 sigexit(td, SIGILL);
896 return (error);
897 }
898
899 /*
900 * Take the list of completed mailboxes for this Process and put them on this
901 * upcall's mailbox as it's the next one going up.
902 */
903 static int
904 thread_link_mboxes(struct proc *p, struct kse_upcall *ku)
905 {
906 void *addr;
907 uintptr_t mbx;
908
909 addr = (void *)(&ku->ku_mailbox->km_completed);
910 for (;;) {
911 mbx = (uintptr_t)p->p_completed;
912 if (suword(addr, mbx)) {
913 PROC_LOCK(p);
914 psignal(p, SIGSEGV);
915 PROC_UNLOCK(p);
916 return (EFAULT);
917 }
918 PROC_LOCK(p);
919 if (mbx == (uintptr_t)p->p_completed) {
920 p->p_completed = NULL;
921 PROC_UNLOCK(p);
922 break;
923 }
924 PROC_UNLOCK(p);
925 }
926 return (0);
927 }
928
929 /*
930 * This function should be called at statclock interrupt time
931 */
932 int
933 thread_statclock(int user)
934 {
935 struct thread *td = curthread;
936
937 if (!(td->td_pflags & TDP_SA))
938 return (0);
939 if (user) {
940 /* Current always do via ast() */
941 thread_lock(td);
942 td->td_flags |= TDF_ASTPENDING;
943 thread_unlock(td);
944 td->td_uuticks++;
945 } else if (td->td_mailbox != NULL)
946 td->td_usticks++;
947 return (0);
948 }
949
950 /*
951 * Export state clock ticks for userland
952 */
953 static int
954 thread_update_usr_ticks(struct thread *td)
955 {
956 struct proc *p = td->td_proc;
957 caddr_t addr;
958 u_int uticks;
959
960 thread_lock(td);
961 if (td->td_mailbox == NULL) {
962 thread_unlock(td);
963 return (-1);
964 }
965 thread_unlock(td);
966
967 if ((uticks = td->td_uuticks) != 0) {
968 td->td_uuticks = 0;
969 addr = (caddr_t)&td->td_mailbox->tm_uticks;
970 if (suword32(addr, uticks+fuword32(addr)))
971 goto error;
972 }
973 if ((uticks = td->td_usticks) != 0) {
974 td->td_usticks = 0;
975 addr = (caddr_t)&td->td_mailbox->tm_sticks;
976 if (suword32(addr, uticks+fuword32(addr)))
977 goto error;
978 }
979 return (0);
980
981 error:
982 PROC_LOCK(p);
983 psignal(p, SIGSEGV);
984 PROC_UNLOCK(p);
985 return (-2);
986 }
987
988 /*
989 * This function is intended to be used to initialize a spare thread
990 * for upcall. Initialize thread's large data area outside the thread lock
991 * for thread_schedule_upcall(). The crhold is also here to get it out
992 * from the schedlock as it has a mutex op itself.
993 * XXX BUG.. we need to get the cr ref after the thread has
994 * checked and chenged its own, not 6 months before...
995 */
996 int
997 thread_alloc_spare(struct thread *td)
998 {
999 struct thread *spare;
1000
1001 if (td->td_standin)
1002 return (1);
1003 spare = thread_alloc();
1004 if (spare == NULL)
1005 return (0);
1006 td->td_standin = spare;
1007 bzero(&spare->td_startzero,
1008 __rangeof(struct thread, td_startzero, td_endzero));
1009 spare->td_proc = td->td_proc;
1010 spare->td_ucred = crhold(td->td_ucred);
1011 spare->td_flags = TDF_INMEM;
1012 return (1);
1013 }
1014
1015 /*
1016 * Create a thread and schedule it for upcall on the KSE given.
1017 * Use our thread's standin so that we don't have to allocate one.
1018 */
1019 struct thread *
1020 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1021 {
1022 struct thread *td2;
1023
1024 THREAD_LOCK_ASSERT(td, MA_OWNED);
1025 mtx_assert(&kse_lock, MA_OWNED);
1026 /*
1027 * Schedule an upcall thread on specified kse_upcall,
1028 * the kse_upcall must be free.
1029 * td must have a spare thread.
1030 */
1031 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1032 if ((td2 = td->td_standin) != NULL) {
1033 td->td_standin = NULL;
1034 } else {
1035 panic("no reserve thread when scheduling an upcall");
1036 return (NULL);
1037 }
1038 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1039 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1040 /*
1041 * Bzero already done in thread_alloc_spare() because we can't
1042 * do the crhold here because we are in schedlock already.
1043 */
1044 bcopy(&td->td_startcopy, &td2->td_startcopy,
1045 __rangeof(struct thread, td_startcopy, td_endcopy));
1046 sched_fork_thread(td, td2);
1047 thread_link(td2, ku->ku_proc);
1048 /* inherit parts of blocked thread's context as a good template */
1049 cpu_set_upcall(td2, td);
1050 /* Let the new thread become owner of the upcall */
1051 ku->ku_owner = td2;
1052 td2->td_upcall = ku;
1053 td2->td_pflags = TDP_SA|TDP_UPCALLING;
1054 td2->td_state = TDS_CAN_RUN;
1055 td2->td_inhibitors = 0;
1056 SIGFILLSET(td2->td_sigmask);
1057 SIG_CANTMASK(td2->td_sigmask);
1058 return (td2); /* bogus.. should be a void function */
1059 }
1060
1061 /*
1062 * It is only used when thread generated a trap and process is being
1063 * debugged.
1064 */
1065 void
1066 thread_signal_add(struct thread *td, ksiginfo_t *ksi)
1067 {
1068 struct proc *p;
1069 struct sigacts *ps;
1070 int error;
1071
1072 p = td->td_proc;
1073 PROC_LOCK_ASSERT(p, MA_OWNED);
1074 ps = p->p_sigacts;
1075 mtx_assert(&ps->ps_mtx, MA_OWNED);
1076
1077 mtx_unlock(&ps->ps_mtx);
1078 SIGADDSET(td->td_sigmask, ksi->ksi_signo);
1079 PROC_UNLOCK(p);
1080 error = copyout(&ksi->ksi_info, &td->td_mailbox->tm_syncsig,
1081 sizeof(siginfo_t));
1082 if (error) {
1083 PROC_LOCK(p);
1084 sigexit(td, SIGSEGV);
1085 }
1086 PROC_LOCK(p);
1087 mtx_lock(&ps->ps_mtx);
1088 }
1089 #include "opt_sched.h"
1090 struct thread *
1091 thread_switchout(struct thread *td, int flags, struct thread *nextthread)
1092 {
1093 struct kse_upcall *ku;
1094 struct thread *td2;
1095
1096 THREAD_LOCK_ASSERT(td, MA_OWNED);
1097
1098 /*
1099 * If the outgoing thread is in threaded group and has never
1100 * scheduled an upcall, decide whether this is a short
1101 * or long term event and thus whether or not to schedule
1102 * an upcall.
1103 * If it is a short term event, just suspend it in
1104 * a way that takes its KSE with it.
1105 * Select the events for which we want to schedule upcalls.
1106 * For now it's just sleep or if thread is suspended but
1107 * process wide suspending flag is not set (debugger
1108 * suspends thread).
1109 * XXXKSE eventually almost any inhibition could do.
1110 */
1111 if (TD_CAN_UNBIND(td) && (td->td_standin) &&
1112 (TD_ON_SLEEPQ(td) || (TD_IS_SUSPENDED(td) &&
1113 !P_SHOULDSTOP(td->td_proc)))) {
1114 /*
1115 * Release ownership of upcall, and schedule an upcall
1116 * thread, this new upcall thread becomes the owner of
1117 * the upcall structure. It will be ahead of us in the
1118 * run queue, so as we are stopping, it should either
1119 * start up immediatly, or at least before us if
1120 * we release our slot.
1121 */
1122 mtx_lock_spin(&kse_lock);
1123 ku = td->td_upcall;
1124 ku->ku_owner = NULL;
1125 td->td_upcall = NULL;
1126 td->td_pflags &= ~TDP_CAN_UNBIND;
1127 td2 = thread_schedule_upcall(td, ku);
1128 mtx_unlock_spin(&kse_lock);
1129 if (flags & SW_INVOL || nextthread) {
1130 thread_lock(td2);
1131 sched_add(td2, SRQ_YIELDING);
1132 thread_unlock(td2);
1133 } else {
1134 /* Keep up with reality.. we have one extra thread
1135 * in the picture.. and it's 'running'.
1136 */
1137 return td2;
1138 }
1139 }
1140 return (nextthread);
1141 }
1142
1143 /*
1144 * Setup done on the thread when it enters the kernel.
1145 */
1146 void
1147 thread_user_enter(struct thread *td)
1148 {
1149 struct proc *p = td->td_proc;
1150 struct kse_upcall *ku;
1151 struct kse_thr_mailbox *tmbx;
1152 uint32_t flags;
1153
1154 /*
1155 * First check that we shouldn't just abort. we
1156 * can suspend it here or just exit.
1157 */
1158 if (__predict_false(P_SHOULDSTOP(p))) {
1159 PROC_LOCK(p);
1160 thread_suspend_check(0);
1161 PROC_UNLOCK(p);
1162 }
1163
1164 if (!(td->td_pflags & TDP_SA))
1165 return;
1166
1167 /*
1168 * If we are doing a syscall in a KSE environment,
1169 * note where our mailbox is.
1170 */
1171
1172 thread_lock(td);
1173 ku = td->td_upcall;
1174 thread_unlock(td);
1175
1176 KASSERT(ku != NULL, ("no upcall owned"));
1177 KASSERT(ku->ku_owner == td, ("wrong owner"));
1178 KASSERT(!TD_CAN_UNBIND(td), ("can unbind"));
1179
1180 if (td->td_standin == NULL) {
1181 if (!thread_alloc_spare(td)) {
1182 PROC_LOCK(p);
1183 if (kern_logsigexit)
1184 log(LOG_INFO,
1185 "pid %d (%s), uid %d: thread_alloc_spare failed\n",
1186 p->p_pid, p->p_comm,
1187 td->td_ucred ? td->td_ucred->cr_uid : -1);
1188 sigexit(td, SIGSEGV); /* XXX ? */
1189 /* panic("thread_user_enter: thread_alloc_spare failed"); */
1190 }
1191 }
1192 ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
1193 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1194 if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
1195 (ku->ku_mflags & KMF_NOUPCALL)) {
1196 td->td_mailbox = NULL;
1197 } else {
1198 flags = fuword32(&tmbx->tm_flags);
1199 /*
1200 * On some architectures, TP register points to thread
1201 * mailbox but not points to kse mailbox, and userland
1202 * can not atomically clear km_curthread, but can
1203 * use TP register, and set TMF_NOUPCALL in thread
1204 * flag to indicate a critical region.
1205 */
1206 if (flags & TMF_NOUPCALL) {
1207 td->td_mailbox = NULL;
1208 } else {
1209 td->td_mailbox = tmbx;
1210 td->td_pflags |= TDP_CAN_UNBIND;
1211 PROC_LOCK(p);
1212 if (__predict_false(p->p_flag & P_TRACED)) {
1213 flags = fuword32(&tmbx->tm_dflags);
1214 if (flags & TMDF_SUSPEND) {
1215 thread_lock(td);
1216 /* fuword can block, check again */
1217 if (td->td_upcall)
1218 ku->ku_flags |= KUF_DOUPCALL;
1219 thread_unlock(td);
1220 }
1221 }
1222 PROC_UNLOCK(p);
1223 }
1224 }
1225 }
1226
1227 /*
1228 * The extra work we go through if we are a threaded process when we
1229 * return to userland.
1230 *
1231 * If we are a KSE process and returning to user mode, check for
1232 * extra work to do before we return (e.g. for more syscalls
1233 * to complete first). If we were in a critical section, we should
1234 * just return to let it finish. Same if we were in the UTS (in
1235 * which case the mailbox's context's busy indicator will be set).
1236 * The only traps we suport will have set the mailbox.
1237 * We will clear it here.
1238 */
1239 int
1240 thread_userret(struct thread *td, struct trapframe *frame)
1241 {
1242 struct kse_upcall *ku;
1243 struct proc *p;
1244 struct timespec ts;
1245 int error = 0, uts_crit;
1246
1247 /* Nothing to do with bound thread */
1248 if (!(td->td_pflags & TDP_SA))
1249 return (0);
1250
1251 /*
1252 * Update stat clock count for userland
1253 */
1254 if (td->td_mailbox != NULL) {
1255 thread_update_usr_ticks(td);
1256 uts_crit = 0;
1257 } else {
1258 uts_crit = 1;
1259 }
1260
1261 p = td->td_proc;
1262 thread_lock(td);
1263 ku = td->td_upcall;
1264
1265 /*
1266 * Optimisation:
1267 * This thread has not started any upcall.
1268 * If there is no work to report other than ourself,
1269 * then it can return direct to userland.
1270 */
1271 if (TD_CAN_UNBIND(td)) {
1272 thread_unlock(td);
1273 td->td_pflags &= ~TDP_CAN_UNBIND;
1274 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1275 (p->p_completed == NULL) &&
1276 (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1277 (p->p_upquantum && ticks < p->p_nextupcall)) {
1278 nanotime(&ts);
1279 error = copyout(&ts,
1280 (caddr_t)&ku->ku_mailbox->km_timeofday,
1281 sizeof(ts));
1282 td->td_mailbox = 0;
1283 ku->ku_mflags = 0;
1284 if (error)
1285 goto out;
1286 return (0);
1287 }
1288 thread_export_context(td, 0);
1289 /*
1290 * There is something to report, and we own an upcall
1291 * structure, we can go to userland.
1292 * Turn ourself into an upcall thread.
1293 */
1294 td->td_pflags |= TDP_UPCALLING;
1295 } else if (td->td_mailbox && (ku == NULL)) {
1296 thread_unlock(td);
1297 thread_export_context(td, 1);
1298 PROC_LOCK(p);
1299 if (p->p_upsleeps)
1300 wakeup(&p->p_completed);
1301 WITNESS_WARN(WARN_PANIC, &p->p_mtx.lock_object,
1302 "thread exiting in userret");
1303 sigqueue_flush(&td->td_sigqueue);
1304 PROC_SLOCK(p);
1305 thread_stopped(p);
1306 thread_exit();
1307 /* NOTREACHED */
1308 } else
1309 thread_unlock(td);
1310
1311 KASSERT(ku != NULL, ("upcall is NULL"));
1312 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1313
1314 PROC_LOCK(p);
1315 PROC_SLOCK(p);
1316 if (p->p_numthreads > max_threads_per_proc) {
1317 max_threads_hits++;
1318 while (p->p_numthreads > max_threads_per_proc) {
1319 if (p->p_numupcalls >= max_threads_per_proc)
1320 break;
1321 PROC_SUNLOCK(p);
1322 if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1323 "maxthreads", hz/10) != EWOULDBLOCK) {
1324 PROC_SLOCK(p);
1325 break;
1326 } else
1327 PROC_SLOCK(p);
1328 }
1329 }
1330 PROC_SUNLOCK(p);
1331 PROC_UNLOCK(p);
1332
1333 if (td->td_pflags & TDP_UPCALLING) {
1334 uts_crit = 0;
1335 p->p_nextupcall = ticks + p->p_upquantum;
1336 /*
1337 * There is no more work to do and we are going to ride
1338 * this thread up to userland as an upcall.
1339 * Do the last parts of the setup needed for the upcall.
1340 */
1341 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1342 td, td->td_proc->p_pid, td->td_proc->p_comm);
1343
1344 td->td_pflags &= ~TDP_UPCALLING;
1345 if (ku->ku_flags & KUF_DOUPCALL) {
1346 PROC_SLOCK(p);
1347 ku->ku_flags &= ~KUF_DOUPCALL;
1348 PROC_SUNLOCK(p);
1349 }
1350 /*
1351 * Set user context to the UTS
1352 */
1353 if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1354 cpu_set_upcall_kse(td, ku->ku_func, ku->ku_mailbox,
1355 &ku->ku_stack);
1356 PROC_LOCK(p);
1357 if (p->p_flag & P_TRACED) {
1358 _PHOLD(p);
1359 ptrace_clear_single_step(td);
1360 _PRELE(p);
1361 }
1362 PROC_UNLOCK(p);
1363 error = suword32(&ku->ku_mailbox->km_lwp,
1364 td->td_tid);
1365 if (error)
1366 goto out;
1367 error = suword(&ku->ku_mailbox->km_curthread, 0);
1368 if (error)
1369 goto out;
1370 }
1371
1372 /*
1373 * Unhook the list of completed threads.
1374 * anything that completes after this gets to
1375 * come in next time.
1376 * Put the list of completed thread mailboxes on
1377 * this KSE's mailbox.
1378 */
1379 if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1380 (error = thread_link_mboxes(p, ku)) != 0)
1381 goto out;
1382 }
1383 if (!uts_crit) {
1384 nanotime(&ts);
1385 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1386 }
1387
1388 out:
1389 if (error) {
1390 /*
1391 * Things are going to be so screwed we should just kill
1392 * the process.
1393 * how do we do that?
1394 */
1395 PROC_LOCK(p);
1396 psignal(p, SIGSEGV);
1397 PROC_UNLOCK(p);
1398 } else {
1399 /*
1400 * Optimisation:
1401 * Ensure that we have a spare thread available,
1402 * for when we re-enter the kernel.
1403 */
1404 if (td->td_standin == NULL)
1405 thread_alloc_spare(td); /* XXX care of failure ? */
1406 }
1407
1408 ku->ku_mflags = 0;
1409 td->td_mailbox = NULL;
1410 td->td_usticks = 0;
1411 return (error); /* go sync */
1412 }
1413
1414 /*
1415 * called after ptrace resumed a process, force all
1416 * virtual CPUs to schedule upcall for SA process,
1417 * because debugger may have changed something in userland,
1418 * we should notice UTS as soon as possible.
1419 */
1420 void
1421 thread_continued(struct proc *p)
1422 {
1423 struct kse_upcall *ku;
1424 struct thread *td;
1425
1426 PROC_LOCK_ASSERT(p, MA_OWNED);
1427 KASSERT(P_SHOULDSTOP(p), ("process not stopped"));
1428
1429 if (!(p->p_flag & P_SA))
1430 return;
1431
1432 if (p->p_flag & P_TRACED) {
1433 td = TAILQ_FIRST(&p->p_threads);
1434 if (td && (td->td_pflags & TDP_SA)) {
1435 FOREACH_UPCALL_IN_PROC(p, ku) {
1436 PROC_SLOCK(p);
1437 ku->ku_flags |= KUF_DOUPCALL;
1438 PROC_SUNLOCK(p);
1439 wakeup(&p->p_completed);
1440 }
1441 }
1442 }
1443 }
1444 #endif
Cache object: 288159ffee22df4f4e13ea73fe318a13
|