1 /*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include "opt_hwpmc_hooks.h"
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD: releng/7.4/sys/kern/kern_thread.c 207744 2010-05-07 11:17:20Z kib $");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/proc.h>
40 #include <sys/resourcevar.h>
41 #include <sys/smp.h>
42 #include <sys/sysctl.h>
43 #include <sys/sched.h>
44 #include <sys/sleepqueue.h>
45 #include <sys/turnstile.h>
46 #include <sys/ktr.h>
47 #include <sys/umtx.h>
48 #include <sys/cpuset.h>
49 #ifdef HWPMC_HOOKS
50 #include <sys/pmckern.h>
51 #endif
52
53 #include <security/audit/audit.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_extern.h>
57 #include <vm/uma.h>
58 #include <sys/eventhandler.h>
59
60 /*
61 * thread related storage.
62 */
63 static uma_zone_t thread_zone;
64
65 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
66
67 int max_threads_per_proc = 1500;
68 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
69 &max_threads_per_proc, 0, "Limit on threads per proc");
70
71 int max_threads_hits;
72 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
73 &max_threads_hits, 0, "");
74
75 #ifdef KSE
76 int virtual_cpu;
77
78 #endif
79 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
80 static struct mtx zombie_lock;
81 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
82
83 static void thread_zombie(struct thread *);
84
85 #ifdef KSE
86 static int
87 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
88 {
89 int error, new_val;
90 int def_val;
91
92 def_val = mp_ncpus;
93 if (virtual_cpu == 0)
94 new_val = def_val;
95 else
96 new_val = virtual_cpu;
97 error = sysctl_handle_int(oidp, &new_val, 0, req);
98 if (error != 0 || req->newptr == NULL)
99 return (error);
100 if (new_val < 0)
101 return (EINVAL);
102 virtual_cpu = new_val;
103 return (0);
104 }
105
106 /* DEBUG ONLY */
107 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
108 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
109 "debug virtual cpus");
110 #endif
111
112 struct mtx tid_lock;
113 static struct unrhdr *tid_unrhdr;
114
115 /*
116 * Prepare a thread for use.
117 */
118 static int
119 thread_ctor(void *mem, int size, void *arg, int flags)
120 {
121 struct thread *td;
122
123 td = (struct thread *)mem;
124 td->td_state = TDS_INACTIVE;
125 td->td_oncpu = NOCPU;
126
127 td->td_tid = alloc_unr(tid_unrhdr);
128 td->td_syscalls = 0;
129 td->td_incruntime = 0;
130
131 /*
132 * Note that td_critnest begins life as 1 because the thread is not
133 * running and is thereby implicitly waiting to be on the receiving
134 * end of a context switch.
135 */
136 td->td_critnest = 1;
137 EVENTHANDLER_INVOKE(thread_ctor, td);
138 #ifdef AUDIT
139 audit_thread_alloc(td);
140 #endif
141 /* Free all OSD associated to this thread. */
142 osd_thread_exit(td);
143
144 umtx_thread_alloc(td);
145 return (0);
146 }
147
148 /*
149 * Reclaim a thread after use.
150 */
151 static void
152 thread_dtor(void *mem, int size, void *arg)
153 {
154 struct thread *td;
155
156 td = (struct thread *)mem;
157
158 #ifdef INVARIANTS
159 /* Verify that this thread is in a safe state to free. */
160 switch (td->td_state) {
161 case TDS_INHIBITED:
162 case TDS_RUNNING:
163 case TDS_CAN_RUN:
164 case TDS_RUNQ:
165 /*
166 * We must never unlink a thread that is in one of
167 * these states, because it is currently active.
168 */
169 panic("bad state for thread unlinking");
170 /* NOTREACHED */
171 case TDS_INACTIVE:
172 break;
173 default:
174 panic("bad thread state");
175 /* NOTREACHED */
176 }
177 #endif
178 #ifdef AUDIT
179 audit_thread_free(td);
180 #endif
181 EVENTHANDLER_INVOKE(thread_dtor, td);
182 free_unr(tid_unrhdr, td->td_tid);
183 sched_newthread(td);
184 }
185
186 /*
187 * Initialize type-stable parts of a thread (when newly created).
188 */
189 static int
190 thread_init(void *mem, int size, int flags)
191 {
192 struct thread *td;
193
194 td = (struct thread *)mem;
195
196 td->td_sleepqueue = sleepq_alloc();
197 td->td_turnstile = turnstile_alloc();
198 EVENTHANDLER_INVOKE(thread_init, td);
199 td->td_sched = (struct td_sched *)&td[1];
200 sched_newthread(td);
201 umtx_thread_init(td);
202 td->td_kstack = 0;
203 td->td_fpop = NULL;
204 return (0);
205 }
206
207 /*
208 * Tear down type-stable parts of a thread (just before being discarded).
209 */
210 static void
211 thread_fini(void *mem, int size)
212 {
213 struct thread *td;
214
215 td = (struct thread *)mem;
216 EVENTHANDLER_INVOKE(thread_fini, td);
217 turnstile_free(td->td_turnstile);
218 sleepq_free(td->td_sleepqueue);
219 umtx_thread_fini(td);
220 }
221
222 /*
223 * For a newly created process,
224 * link up all the structures and its initial threads etc.
225 * called from:
226 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
227 * proc_dtor() (should go away)
228 * proc_init()
229 */
230 void
231 proc_linkup0(struct proc *p, struct thread *td)
232 {
233 TAILQ_INIT(&p->p_threads); /* all threads in proc */
234 proc_linkup(p, td);
235 }
236
237 void
238 proc_linkup(struct proc *p, struct thread *td)
239 {
240
241 #ifdef KSE
242 TAILQ_INIT(&p->p_upcalls); /* upcall list */
243 #endif
244 sigqueue_init(&p->p_sigqueue, p);
245 p->p_ksi = ksiginfo_alloc(1);
246 if (p->p_ksi != NULL) {
247 /* XXX p_ksi may be null if ksiginfo zone is not ready */
248 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
249 }
250 LIST_INIT(&p->p_mqnotifier);
251 p->p_numthreads = 0;
252 thread_link(td, p);
253 }
254
255 /*
256 * Initialize global thread allocation resources.
257 */
258 void
259 threadinit(void)
260 {
261
262 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
263 /* leave one number for thread0 */
264 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
265
266 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
267 thread_ctor, thread_dtor, thread_init, thread_fini,
268 16 - 1, 0);
269 #ifdef KSE
270 kseinit(); /* set up kse specific stuff e.g. upcall zone*/
271 #endif
272 }
273
274 /*
275 * Place an unused thread on the zombie list.
276 * Use the slpq as that must be unused by now.
277 */
278 void
279 thread_zombie(struct thread *td)
280 {
281 mtx_lock_spin(&zombie_lock);
282 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
283 mtx_unlock_spin(&zombie_lock);
284 }
285
286 /*
287 * Release a thread that has exited after cpu_throw().
288 */
289 void
290 thread_stash(struct thread *td)
291 {
292 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
293 thread_zombie(td);
294 }
295
296 /*
297 * Reap zombie kse resource.
298 */
299 void
300 thread_reap(void)
301 {
302 struct thread *td_first, *td_next;
303
304 /*
305 * Don't even bother to lock if none at this instant,
306 * we really don't care about the next instant..
307 */
308 if (!TAILQ_EMPTY(&zombie_threads)) {
309 mtx_lock_spin(&zombie_lock);
310 td_first = TAILQ_FIRST(&zombie_threads);
311 if (td_first)
312 TAILQ_INIT(&zombie_threads);
313 mtx_unlock_spin(&zombie_lock);
314 while (td_first) {
315 td_next = TAILQ_NEXT(td_first, td_slpq);
316 if (td_first->td_ucred)
317 crfree(td_first->td_ucred);
318 thread_free(td_first);
319 td_first = td_next;
320 }
321 }
322 #ifdef KSE
323 upcall_reap();
324 #endif
325 }
326
327 /*
328 * Allocate a thread.
329 */
330 struct thread *
331 thread_alloc(void)
332 {
333 struct thread *td;
334
335 thread_reap(); /* check if any zombies to get */
336
337 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
338 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
339 if (!vm_thread_new(td, 0)) {
340 uma_zfree(thread_zone, td);
341 return (NULL);
342 }
343 cpu_thread_alloc(td);
344 return (td);
345 }
346
347
348 /*
349 * Deallocate a thread.
350 */
351 void
352 thread_free(struct thread *td)
353 {
354 if (td->td_cpuset)
355 cpuset_rel(td->td_cpuset);
356 td->td_cpuset = NULL;
357 cpu_thread_free(td);
358 if (td->td_altkstack != 0)
359 vm_thread_dispose_altkstack(td);
360 if (td->td_kstack != 0)
361 vm_thread_dispose(td);
362 uma_zfree(thread_zone, td);
363 }
364
365 /*
366 * Discard the current thread and exit from its context.
367 * Always called with scheduler locked.
368 *
369 * Because we can't free a thread while we're operating under its context,
370 * push the current thread into our CPU's deadthread holder. This means
371 * we needn't worry about someone else grabbing our context before we
372 * do a cpu_throw(). This may not be needed now as we are under schedlock.
373 * Maybe we can just do a thread_stash() as thr_exit1 does.
374 */
375 /* XXX
376 * libthr expects its thread exit to return for the last
377 * thread, meaning that the program is back to non-threaded
378 * mode I guess. Because we do this (cpu_throw) unconditionally
379 * here, they have their own version of it. (thr_exit1())
380 * that doesn't do it all if this was the last thread.
381 * It is also called from thread_suspend_check().
382 * Of course in the end, they end up coming here through exit1
383 * anyhow.. After fixing 'thr' to play by the rules we should be able
384 * to merge these two functions together.
385 *
386 * called from:
387 * exit1()
388 * kse_exit()
389 * thr_exit()
390 * ifdef KSE
391 * thread_user_enter()
392 * thread_userret()
393 * endif
394 * thread_suspend_check()
395 */
396 void
397 thread_exit(void)
398 {
399 uint64_t new_switchtime;
400 struct thread *td;
401 struct thread *td2;
402 struct proc *p;
403 int wakeup_swapper;
404
405 td = curthread;
406 p = td->td_proc;
407
408 PROC_SLOCK_ASSERT(p, MA_OWNED);
409 mtx_assert(&Giant, MA_NOTOWNED);
410
411 PROC_LOCK_ASSERT(p, MA_OWNED);
412 KASSERT(p != NULL, ("thread exiting without a process"));
413 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
414 (long)p->p_pid, p->p_comm);
415 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
416
417 #ifdef AUDIT
418 AUDIT_SYSCALL_EXIT(0, td);
419 #endif
420
421 #ifdef KSE
422 if (td->td_standin != NULL) {
423 /*
424 * Note that we don't need to free the cred here as it
425 * is done in thread_reap().
426 */
427 thread_zombie(td->td_standin);
428 td->td_standin = NULL;
429 }
430 #endif
431
432 umtx_thread_exit(td);
433
434 /*
435 * drop FPU & debug register state storage, or any other
436 * architecture specific resources that
437 * would not be on a new untouched process.
438 */
439 cpu_thread_exit(td); /* XXXSMP */
440
441 /* Do the same timestamp bookkeeping that mi_switch() would do. */
442 new_switchtime = cpu_ticks();
443 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
444 PCPU_SET(switchtime, new_switchtime);
445 PCPU_SET(switchticks, ticks);
446 PCPU_INC(cnt.v_swtch);
447 /* Save our resource usage in our process. */
448 td->td_ru.ru_nvcsw++;
449 rucollect(&p->p_ru, &td->td_ru);
450 /*
451 * The last thread is left attached to the process
452 * So that the whole bundle gets recycled. Skip
453 * all this stuff if we never had threads.
454 * EXIT clears all sign of other threads when
455 * it goes to single threading, so the last thread always
456 * takes the short path.
457 */
458 if (p->p_flag & P_HADTHREADS) {
459 if (p->p_numthreads > 1) {
460 thread_lock(td);
461 #ifdef KSE
462 kse_unlink(td);
463 #else
464 thread_unlink(td);
465 #endif
466 thread_unlock(td);
467 td2 = FIRST_THREAD_IN_PROC(p);
468 sched_exit_thread(td2, td);
469
470 /*
471 * The test below is NOT true if we are the
472 * sole exiting thread. P_STOPPED_SINGLE is unset
473 * in exit1() after it is the only survivor.
474 */
475 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
476 if (p->p_numthreads == p->p_suspcount) {
477 thread_lock(p->p_singlethread);
478 wakeup_swapper = thread_unsuspend_one(
479 p->p_singlethread);
480 thread_unlock(p->p_singlethread);
481 if (wakeup_swapper)
482 kick_proc0();
483 }
484 }
485
486 atomic_add_int(&td->td_proc->p_exitthreads, 1);
487 PCPU_SET(deadthread, td);
488 } else {
489 /*
490 * The last thread is exiting.. but not through exit()
491 * what should we do?
492 * Theoretically this can't happen
493 * exit1() - clears threading flags before coming here
494 * kse_exit() - treats last thread specially
495 * thr_exit() - treats last thread specially
496 * ifdef KSE
497 * thread_user_enter() - only if more exist
498 * thread_userret() - only if more exist
499 * endif
500 * thread_suspend_check() - only if more exist
501 */
502 panic ("thread_exit: Last thread exiting on its own");
503 }
504 }
505 #ifdef HWPMC_HOOKS
506 /*
507 * If this thread is part of a process that is being tracked by hwpmc(4),
508 * inform the module of the thread's impending exit.
509 */
510 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
511 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
512 #endif
513 PROC_UNLOCK(p);
514 thread_lock(td);
515 /* Save our tick information with both the thread and proc locked */
516 ruxagg(&p->p_rux, td);
517 PROC_SUNLOCK(p);
518 td->td_state = TDS_INACTIVE;
519 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
520 sched_throw(td);
521 panic("I'm a teapot!");
522 /* NOTREACHED */
523 }
524
525 /*
526 * Do any thread specific cleanups that may be needed in wait()
527 * called with Giant, proc and schedlock not held.
528 */
529 void
530 thread_wait(struct proc *p)
531 {
532 struct thread *td;
533
534 mtx_assert(&Giant, MA_NOTOWNED);
535 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
536 td = FIRST_THREAD_IN_PROC(p);
537 #ifdef KSE
538 if (td->td_standin != NULL) {
539 if (td->td_standin->td_ucred != NULL) {
540 crfree(td->td_standin->td_ucred);
541 td->td_standin->td_ucred = NULL;
542 }
543 thread_free(td->td_standin);
544 td->td_standin = NULL;
545 }
546 #endif
547 /* Lock the last thread so we spin until it exits cpu_throw(). */
548 thread_lock(td);
549 thread_unlock(td);
550 /* Wait for any remaining threads to exit cpu_throw(). */
551 while (p->p_exitthreads)
552 sched_relinquish(curthread);
553 cpuset_rel(td->td_cpuset);
554 td->td_cpuset = NULL;
555 cpu_thread_clean(td);
556 crfree(td->td_ucred);
557 thread_reap(); /* check for zombie threads etc. */
558 }
559
560 /*
561 * Link a thread to a process.
562 * set up anything that needs to be initialized for it to
563 * be used by the process.
564 *
565 * Note that we do not link to the proc's ucred here.
566 * The thread is linked as if running but no KSE assigned.
567 * Called from:
568 * proc_linkup()
569 * thread_schedule_upcall()
570 * thr_create()
571 */
572 void
573 thread_link(struct thread *td, struct proc *p)
574 {
575
576 /*
577 * XXX This can't be enabled because it's called for proc0 before
578 * it's spinlock has been created.
579 * PROC_SLOCK_ASSERT(p, MA_OWNED);
580 */
581 td->td_state = TDS_INACTIVE;
582 td->td_proc = p;
583 td->td_flags = TDF_INMEM;
584
585 LIST_INIT(&td->td_contested);
586 sigqueue_init(&td->td_sigqueue, p);
587 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
588 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
589 p->p_numthreads++;
590 }
591
592 /*
593 * Convert a process with one thread to an unthreaded process.
594 * Called from:
595 * thread_single(exit) (called from execve and exit)
596 * kse_exit() XXX may need cleaning up wrt KSE stuff
597 */
598 void
599 thread_unthread(struct thread *td)
600 {
601 struct proc *p = td->td_proc;
602
603 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
604 #ifdef KSE
605 thread_lock(td);
606 upcall_remove(td);
607 thread_unlock(td);
608 p->p_flag &= ~(P_SA|P_HADTHREADS);
609 td->td_mailbox = NULL;
610 td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
611 if (td->td_standin != NULL) {
612 thread_zombie(td->td_standin);
613 td->td_standin = NULL;
614 }
615 #else
616 p->p_flag &= ~P_HADTHREADS;
617 #endif
618 }
619
620 /*
621 * Called from:
622 * thread_exit()
623 */
624 void
625 thread_unlink(struct thread *td)
626 {
627 struct proc *p = td->td_proc;
628
629 PROC_SLOCK_ASSERT(p, MA_OWNED);
630 TAILQ_REMOVE(&p->p_threads, td, td_plist);
631 p->p_numthreads--;
632 /* could clear a few other things here */
633 /* Must NOT clear links to proc! */
634 }
635
636 /*
637 * Enforce single-threading.
638 *
639 * Returns 1 if the caller must abort (another thread is waiting to
640 * exit the process or similar). Process is locked!
641 * Returns 0 when you are successfully the only thread running.
642 * A process has successfully single threaded in the suspend mode when
643 * There are no threads in user mode. Threads in the kernel must be
644 * allowed to continue until they get to the user boundary. They may even
645 * copy out their return values and data before suspending. They may however be
646 * accelerated in reaching the user boundary as we will wake up
647 * any sleeping threads that are interruptable. (PCATCH).
648 */
649 int
650 thread_single(int mode)
651 {
652 struct thread *td;
653 struct thread *td2;
654 struct proc *p;
655 int remaining, wakeup_swapper;
656
657 td = curthread;
658 p = td->td_proc;
659 mtx_assert(&Giant, MA_NOTOWNED);
660 PROC_LOCK_ASSERT(p, MA_OWNED);
661 KASSERT((td != NULL), ("curthread is NULL"));
662
663 if ((p->p_flag & P_HADTHREADS) == 0)
664 return (0);
665
666 /* Is someone already single threading? */
667 if (p->p_singlethread != NULL && p->p_singlethread != td)
668 return (1);
669
670 if (mode == SINGLE_EXIT) {
671 p->p_flag |= P_SINGLE_EXIT;
672 p->p_flag &= ~P_SINGLE_BOUNDARY;
673 } else {
674 p->p_flag &= ~P_SINGLE_EXIT;
675 if (mode == SINGLE_BOUNDARY)
676 p->p_flag |= P_SINGLE_BOUNDARY;
677 else
678 p->p_flag &= ~P_SINGLE_BOUNDARY;
679 }
680 p->p_flag |= P_STOPPED_SINGLE;
681 PROC_SLOCK(p);
682 p->p_singlethread = td;
683 if (mode == SINGLE_EXIT)
684 remaining = p->p_numthreads;
685 else if (mode == SINGLE_BOUNDARY)
686 remaining = p->p_numthreads - p->p_boundary_count;
687 else
688 remaining = p->p_numthreads - p->p_suspcount;
689 while (remaining != 1) {
690 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
691 goto stopme;
692 wakeup_swapper = 0;
693 FOREACH_THREAD_IN_PROC(p, td2) {
694 if (td2 == td)
695 continue;
696 thread_lock(td2);
697 td2->td_flags |= TDF_ASTPENDING;
698 if (TD_IS_INHIBITED(td2)) {
699 switch (mode) {
700 case SINGLE_EXIT:
701 if (td->td_flags & TDF_DBSUSPEND)
702 td->td_flags &= ~TDF_DBSUSPEND;
703 if (TD_IS_SUSPENDED(td2))
704 wakeup_swapper |=
705 thread_unsuspend_one(td2);
706 if (TD_ON_SLEEPQ(td2) &&
707 (td2->td_flags & TDF_SINTR))
708 wakeup_swapper |=
709 sleepq_abort(td2, EINTR);
710 break;
711 case SINGLE_BOUNDARY:
712 if (TD_IS_SUSPENDED(td2) &&
713 !(td2->td_flags & TDF_BOUNDARY))
714 wakeup_swapper |=
715 thread_unsuspend_one(td2);
716 if (TD_ON_SLEEPQ(td2) &&
717 (td2->td_flags & TDF_SINTR))
718 wakeup_swapper |=
719 sleepq_abort(td2, ERESTART);
720 break;
721 default:
722 if (TD_IS_SUSPENDED(td2)) {
723 thread_unlock(td2);
724 continue;
725 }
726 /*
727 * maybe other inhibited states too?
728 */
729 if ((td2->td_flags & TDF_SINTR) &&
730 (td2->td_inhibitors &
731 (TDI_SLEEPING | TDI_SWAPPED)))
732 thread_suspend_one(td2);
733 break;
734 }
735 }
736 #ifdef SMP
737 else if (TD_IS_RUNNING(td2) && td != td2) {
738 forward_signal(td2);
739 }
740 #endif
741 thread_unlock(td2);
742 }
743 if (wakeup_swapper)
744 kick_proc0();
745 if (mode == SINGLE_EXIT)
746 remaining = p->p_numthreads;
747 else if (mode == SINGLE_BOUNDARY)
748 remaining = p->p_numthreads - p->p_boundary_count;
749 else
750 remaining = p->p_numthreads - p->p_suspcount;
751
752 /*
753 * Maybe we suspended some threads.. was it enough?
754 */
755 if (remaining == 1)
756 break;
757
758 stopme:
759 /*
760 * Wake us up when everyone else has suspended.
761 * In the mean time we suspend as well.
762 */
763 thread_suspend_switch(td);
764 if (mode == SINGLE_EXIT)
765 remaining = p->p_numthreads;
766 else if (mode == SINGLE_BOUNDARY)
767 remaining = p->p_numthreads - p->p_boundary_count;
768 else
769 remaining = p->p_numthreads - p->p_suspcount;
770 }
771 if (mode == SINGLE_EXIT) {
772 /*
773 * We have gotten rid of all the other threads and we
774 * are about to either exit or exec. In either case,
775 * we try our utmost to revert to being a non-threaded
776 * process.
777 */
778 p->p_singlethread = NULL;
779 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
780 thread_unthread(td);
781 }
782 PROC_SUNLOCK(p);
783 return (0);
784 }
785
786 /*
787 * Called in from locations that can safely check to see
788 * whether we have to suspend or at least throttle for a
789 * single-thread event (e.g. fork).
790 *
791 * Such locations include userret().
792 * If the "return_instead" argument is non zero, the thread must be able to
793 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
794 *
795 * The 'return_instead' argument tells the function if it may do a
796 * thread_exit() or suspend, or whether the caller must abort and back
797 * out instead.
798 *
799 * If the thread that set the single_threading request has set the
800 * P_SINGLE_EXIT bit in the process flags then this call will never return
801 * if 'return_instead' is false, but will exit.
802 *
803 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
804 *---------------+--------------------+---------------------
805 * 0 | returns 0 | returns 0 or 1
806 * | when ST ends | immediatly
807 *---------------+--------------------+---------------------
808 * 1 | thread exits | returns 1
809 * | | immediatly
810 * 0 = thread_exit() or suspension ok,
811 * other = return error instead of stopping the thread.
812 *
813 * While a full suspension is under effect, even a single threading
814 * thread would be suspended if it made this call (but it shouldn't).
815 * This call should only be made from places where
816 * thread_exit() would be safe as that may be the outcome unless
817 * return_instead is set.
818 */
819 int
820 thread_suspend_check(int return_instead)
821 {
822 struct thread *td;
823 struct proc *p;
824 int wakeup_swapper;
825
826 td = curthread;
827 p = td->td_proc;
828 mtx_assert(&Giant, MA_NOTOWNED);
829 PROC_LOCK_ASSERT(p, MA_OWNED);
830 while (P_SHOULDSTOP(p) ||
831 ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
832 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
833 KASSERT(p->p_singlethread != NULL,
834 ("singlethread not set"));
835 /*
836 * The only suspension in action is a
837 * single-threading. Single threader need not stop.
838 * XXX Should be safe to access unlocked
839 * as it can only be set to be true by us.
840 */
841 if (p->p_singlethread == td)
842 return (0); /* Exempt from stopping. */
843 }
844 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
845 return (EINTR);
846
847 /* Should we goto user boundary if we didn't come from there? */
848 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
849 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
850 return (ERESTART);
851
852 /* If thread will exit, flush its pending signals */
853 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
854 sigqueue_flush(&td->td_sigqueue);
855
856 PROC_SLOCK(p);
857 thread_stopped(p);
858 /*
859 * If the process is waiting for us to exit,
860 * this thread should just suicide.
861 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
862 */
863 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
864 thread_exit();
865 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
866 if (p->p_numthreads == p->p_suspcount + 1) {
867 thread_lock(p->p_singlethread);
868 wakeup_swapper =
869 thread_unsuspend_one(p->p_singlethread);
870 thread_unlock(p->p_singlethread);
871 if (wakeup_swapper)
872 kick_proc0();
873 }
874 }
875 PROC_UNLOCK(p);
876 thread_lock(td);
877 /*
878 * When a thread suspends, it just
879 * gets taken off all queues.
880 */
881 thread_suspend_one(td);
882 if (return_instead == 0) {
883 p->p_boundary_count++;
884 td->td_flags |= TDF_BOUNDARY;
885 }
886 PROC_SUNLOCK(p);
887 mi_switch(SW_INVOL, NULL);
888 if (return_instead == 0)
889 td->td_flags &= ~TDF_BOUNDARY;
890 thread_unlock(td);
891 PROC_LOCK(p);
892 if (return_instead == 0)
893 p->p_boundary_count--;
894 }
895 return (0);
896 }
897
898 void
899 thread_suspend_switch(struct thread *td)
900 {
901 struct proc *p;
902
903 p = td->td_proc;
904 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
905 PROC_LOCK_ASSERT(p, MA_OWNED);
906 PROC_SLOCK_ASSERT(p, MA_OWNED);
907 /*
908 * We implement thread_suspend_one in stages here to avoid
909 * dropping the proc lock while the thread lock is owned.
910 */
911 thread_stopped(p);
912 p->p_suspcount++;
913 PROC_UNLOCK(p);
914 thread_lock(td);
915 sched_sleep(td);
916 TD_SET_SUSPENDED(td);
917 PROC_SUNLOCK(p);
918 DROP_GIANT();
919 mi_switch(SW_VOL, NULL);
920 thread_unlock(td);
921 PICKUP_GIANT();
922 PROC_LOCK(p);
923 PROC_SLOCK(p);
924 }
925
926 void
927 thread_suspend_one(struct thread *td)
928 {
929 struct proc *p = td->td_proc;
930
931 PROC_SLOCK_ASSERT(p, MA_OWNED);
932 THREAD_LOCK_ASSERT(td, MA_OWNED);
933 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
934 p->p_suspcount++;
935 sched_sleep(td);
936 TD_SET_SUSPENDED(td);
937 }
938
939 int
940 thread_unsuspend_one(struct thread *td)
941 {
942 struct proc *p = td->td_proc;
943
944 PROC_SLOCK_ASSERT(p, MA_OWNED);
945 THREAD_LOCK_ASSERT(td, MA_OWNED);
946 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
947 TD_CLR_SUSPENDED(td);
948 p->p_suspcount--;
949 return (setrunnable(td));
950 }
951
952 /*
953 * Allow all threads blocked by single threading to continue running.
954 */
955 void
956 thread_unsuspend(struct proc *p)
957 {
958 struct thread *td;
959 int wakeup_swapper;
960
961 PROC_LOCK_ASSERT(p, MA_OWNED);
962 PROC_SLOCK_ASSERT(p, MA_OWNED);
963 wakeup_swapper = 0;
964 if (!P_SHOULDSTOP(p)) {
965 FOREACH_THREAD_IN_PROC(p, td) {
966 thread_lock(td);
967 if (TD_IS_SUSPENDED(td)) {
968 wakeup_swapper |= thread_unsuspend_one(td);
969 }
970 thread_unlock(td);
971 }
972 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
973 (p->p_numthreads == p->p_suspcount)) {
974 /*
975 * Stopping everything also did the job for the single
976 * threading request. Now we've downgraded to single-threaded,
977 * let it continue.
978 */
979 thread_lock(p->p_singlethread);
980 wakeup_swapper = thread_unsuspend_one(p->p_singlethread);
981 thread_unlock(p->p_singlethread);
982 }
983 if (wakeup_swapper)
984 kick_proc0();
985 }
986
987 /*
988 * End the single threading mode..
989 */
990 void
991 thread_single_end(void)
992 {
993 struct thread *td;
994 struct proc *p;
995 int wakeup_swapper;
996
997 td = curthread;
998 p = td->td_proc;
999 PROC_LOCK_ASSERT(p, MA_OWNED);
1000 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
1001 PROC_SLOCK(p);
1002 p->p_singlethread = NULL;
1003 wakeup_swapper = 0;
1004 /*
1005 * If there are other threads they may now run,
1006 * unless of course there is a blanket 'stop order'
1007 * on the process. The single threader must be allowed
1008 * to continue however as this is a bad place to stop.
1009 */
1010 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1011 FOREACH_THREAD_IN_PROC(p, td) {
1012 thread_lock(td);
1013 if (TD_IS_SUSPENDED(td)) {
1014 wakeup_swapper |= thread_unsuspend_one(td);
1015 }
1016 thread_unlock(td);
1017 }
1018 }
1019 PROC_SUNLOCK(p);
1020 if (wakeup_swapper)
1021 kick_proc0();
1022 }
1023
1024 struct thread *
1025 thread_find(struct proc *p, lwpid_t tid)
1026 {
1027 struct thread *td;
1028
1029 PROC_LOCK_ASSERT(p, MA_OWNED);
1030 PROC_SLOCK(p);
1031 FOREACH_THREAD_IN_PROC(p, td) {
1032 if (td->td_tid == tid)
1033 break;
1034 }
1035 PROC_SUNLOCK(p);
1036 return (td);
1037 }
Cache object: 5e0dc01a5b6954f1961322a42b20183f
|