1 /*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include "opt_witness.h"
30 #include "opt_hwpmc_hooks.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/8.3/sys/kern/kern_thread.c 230080 2012-01-13 20:15:49Z jhb $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/resourcevar.h>
42 #include <sys/smp.h>
43 #include <sys/sched.h>
44 #include <sys/sleepqueue.h>
45 #include <sys/selinfo.h>
46 #include <sys/turnstile.h>
47 #include <sys/ktr.h>
48 #include <sys/umtx.h>
49 #include <sys/cpuset.h>
50 #ifdef HWPMC_HOOKS
51 #include <sys/pmckern.h>
52 #endif
53
54 #include <security/audit/audit.h>
55
56 #include <vm/vm.h>
57 #include <vm/vm_extern.h>
58 #include <vm/uma.h>
59 #include <sys/eventhandler.h>
60
61 /*
62 * thread related storage.
63 */
64 static uma_zone_t thread_zone;
65
66 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
67 static struct mtx zombie_lock;
68 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
69
70 static void thread_zombie(struct thread *);
71
72 struct mtx tid_lock;
73 static struct unrhdr *tid_unrhdr;
74
75 /*
76 * Prepare a thread for use.
77 */
78 static int
79 thread_ctor(void *mem, int size, void *arg, int flags)
80 {
81 struct thread *td;
82
83 td = (struct thread *)mem;
84 td->td_state = TDS_INACTIVE;
85 td->td_oncpu = NOCPU;
86
87 td->td_tid = alloc_unr(tid_unrhdr);
88 td->td_syscalls = 0;
89
90 /*
91 * Note that td_critnest begins life as 1 because the thread is not
92 * running and is thereby implicitly waiting to be on the receiving
93 * end of a context switch.
94 */
95 td->td_critnest = 1;
96 EVENTHANDLER_INVOKE(thread_ctor, td);
97 #ifdef AUDIT
98 audit_thread_alloc(td);
99 #endif
100 umtx_thread_alloc(td);
101 return (0);
102 }
103
104 /*
105 * Reclaim a thread after use.
106 */
107 static void
108 thread_dtor(void *mem, int size, void *arg)
109 {
110 struct thread *td;
111
112 td = (struct thread *)mem;
113
114 #ifdef INVARIANTS
115 /* Verify that this thread is in a safe state to free. */
116 switch (td->td_state) {
117 case TDS_INHIBITED:
118 case TDS_RUNNING:
119 case TDS_CAN_RUN:
120 case TDS_RUNQ:
121 /*
122 * We must never unlink a thread that is in one of
123 * these states, because it is currently active.
124 */
125 panic("bad state for thread unlinking");
126 /* NOTREACHED */
127 case TDS_INACTIVE:
128 break;
129 default:
130 panic("bad thread state");
131 /* NOTREACHED */
132 }
133 #endif
134 #ifdef AUDIT
135 audit_thread_free(td);
136 #endif
137 /* Free all OSD associated to this thread. */
138 osd_thread_exit(td);
139
140 EVENTHANDLER_INVOKE(thread_dtor, td);
141 free_unr(tid_unrhdr, td->td_tid);
142 }
143
144 /*
145 * Initialize type-stable parts of a thread (when newly created).
146 */
147 static int
148 thread_init(void *mem, int size, int flags)
149 {
150 struct thread *td;
151
152 td = (struct thread *)mem;
153
154 td->td_sleepqueue = sleepq_alloc();
155 td->td_turnstile = turnstile_alloc();
156 EVENTHANDLER_INVOKE(thread_init, td);
157 td->td_sched = (struct td_sched *)&td[1];
158 umtx_thread_init(td);
159 td->td_kstack = 0;
160 return (0);
161 }
162
163 /*
164 * Tear down type-stable parts of a thread (just before being discarded).
165 */
166 static void
167 thread_fini(void *mem, int size)
168 {
169 struct thread *td;
170
171 td = (struct thread *)mem;
172 EVENTHANDLER_INVOKE(thread_fini, td);
173 turnstile_free(td->td_turnstile);
174 sleepq_free(td->td_sleepqueue);
175 umtx_thread_fini(td);
176 seltdfini(td);
177 }
178
179 /*
180 * For a newly created process,
181 * link up all the structures and its initial threads etc.
182 * called from:
183 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
184 * proc_dtor() (should go away)
185 * proc_init()
186 */
187 void
188 proc_linkup0(struct proc *p, struct thread *td)
189 {
190 TAILQ_INIT(&p->p_threads); /* all threads in proc */
191 proc_linkup(p, td);
192 }
193
194 void
195 proc_linkup(struct proc *p, struct thread *td)
196 {
197
198 sigqueue_init(&p->p_sigqueue, p);
199 p->p_ksi = ksiginfo_alloc(1);
200 if (p->p_ksi != NULL) {
201 /* XXX p_ksi may be null if ksiginfo zone is not ready */
202 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
203 }
204 LIST_INIT(&p->p_mqnotifier);
205 p->p_numthreads = 0;
206 thread_link(td, p);
207 }
208
209 /*
210 * Initialize global thread allocation resources.
211 */
212 void
213 threadinit(void)
214 {
215
216 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
217 /* leave one number for thread0 */
218 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
219
220 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
221 thread_ctor, thread_dtor, thread_init, thread_fini,
222 16 - 1, 0);
223 }
224
225 /*
226 * Place an unused thread on the zombie list.
227 * Use the slpq as that must be unused by now.
228 */
229 void
230 thread_zombie(struct thread *td)
231 {
232 mtx_lock_spin(&zombie_lock);
233 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
234 mtx_unlock_spin(&zombie_lock);
235 }
236
237 /*
238 * Release a thread that has exited after cpu_throw().
239 */
240 void
241 thread_stash(struct thread *td)
242 {
243 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
244 thread_zombie(td);
245 }
246
247 /*
248 * Reap zombie resources.
249 */
250 void
251 thread_reap(void)
252 {
253 struct thread *td_first, *td_next;
254
255 /*
256 * Don't even bother to lock if none at this instant,
257 * we really don't care about the next instant..
258 */
259 if (!TAILQ_EMPTY(&zombie_threads)) {
260 mtx_lock_spin(&zombie_lock);
261 td_first = TAILQ_FIRST(&zombie_threads);
262 if (td_first)
263 TAILQ_INIT(&zombie_threads);
264 mtx_unlock_spin(&zombie_lock);
265 while (td_first) {
266 td_next = TAILQ_NEXT(td_first, td_slpq);
267 if (td_first->td_ucred)
268 crfree(td_first->td_ucred);
269 thread_free(td_first);
270 td_first = td_next;
271 }
272 }
273 }
274
275 /*
276 * Allocate a thread.
277 */
278 struct thread *
279 thread_alloc(int pages)
280 {
281 struct thread *td;
282
283 thread_reap(); /* check if any zombies to get */
284
285 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
286 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
287 if (!vm_thread_new(td, pages)) {
288 uma_zfree(thread_zone, td);
289 return (NULL);
290 }
291 cpu_thread_alloc(td);
292 return (td);
293 }
294
295 int
296 thread_alloc_stack(struct thread *td, int pages)
297 {
298
299 KASSERT(td->td_kstack == 0,
300 ("thread_alloc_stack called on a thread with kstack"));
301 if (!vm_thread_new(td, pages))
302 return (0);
303 cpu_thread_alloc(td);
304 return (1);
305 }
306
307 /*
308 * Deallocate a thread.
309 */
310 void
311 thread_free(struct thread *td)
312 {
313
314 lock_profile_thread_exit(td);
315 if (td->td_cpuset)
316 cpuset_rel(td->td_cpuset);
317 td->td_cpuset = NULL;
318 cpu_thread_free(td);
319 if (td->td_kstack != 0)
320 vm_thread_dispose(td);
321 uma_zfree(thread_zone, td);
322 }
323
324 /*
325 * Discard the current thread and exit from its context.
326 * Always called with scheduler locked.
327 *
328 * Because we can't free a thread while we're operating under its context,
329 * push the current thread into our CPU's deadthread holder. This means
330 * we needn't worry about someone else grabbing our context before we
331 * do a cpu_throw().
332 */
333 void
334 thread_exit(void)
335 {
336 uint64_t runtime, new_switchtime;
337 struct thread *td;
338 struct thread *td2;
339 struct proc *p;
340 int wakeup_swapper;
341
342 td = curthread;
343 p = td->td_proc;
344
345 PROC_SLOCK_ASSERT(p, MA_OWNED);
346 mtx_assert(&Giant, MA_NOTOWNED);
347
348 PROC_LOCK_ASSERT(p, MA_OWNED);
349 KASSERT(p != NULL, ("thread exiting without a process"));
350 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
351 (long)p->p_pid, td->td_name);
352 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
353
354 #ifdef AUDIT
355 AUDIT_SYSCALL_EXIT(0, td);
356 #endif
357 umtx_thread_exit(td);
358 /*
359 * drop FPU & debug register state storage, or any other
360 * architecture specific resources that
361 * would not be on a new untouched process.
362 */
363 cpu_thread_exit(td); /* XXXSMP */
364
365 /*
366 * The last thread is left attached to the process
367 * So that the whole bundle gets recycled. Skip
368 * all this stuff if we never had threads.
369 * EXIT clears all sign of other threads when
370 * it goes to single threading, so the last thread always
371 * takes the short path.
372 */
373 if (p->p_flag & P_HADTHREADS) {
374 if (p->p_numthreads > 1) {
375 thread_unlink(td);
376 td2 = FIRST_THREAD_IN_PROC(p);
377 sched_exit_thread(td2, td);
378
379 /*
380 * The test below is NOT true if we are the
381 * sole exiting thread. P_STOPPED_SINGLE is unset
382 * in exit1() after it is the only survivor.
383 */
384 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
385 if (p->p_numthreads == p->p_suspcount) {
386 thread_lock(p->p_singlethread);
387 wakeup_swapper = thread_unsuspend_one(
388 p->p_singlethread);
389 thread_unlock(p->p_singlethread);
390 if (wakeup_swapper)
391 kick_proc0();
392 }
393 }
394
395 atomic_add_int(&td->td_proc->p_exitthreads, 1);
396 PCPU_SET(deadthread, td);
397 } else {
398 /*
399 * The last thread is exiting.. but not through exit()
400 */
401 panic ("thread_exit: Last thread exiting on its own");
402 }
403 }
404 #ifdef HWPMC_HOOKS
405 /*
406 * If this thread is part of a process that is being tracked by hwpmc(4),
407 * inform the module of the thread's impending exit.
408 */
409 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
410 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
411 #endif
412 PROC_UNLOCK(p);
413
414 /* Do the same timestamp bookkeeping that mi_switch() would do. */
415 new_switchtime = cpu_ticks();
416 runtime = new_switchtime - PCPU_GET(switchtime);
417 td->td_runtime += runtime;
418 td->td_incruntime += runtime;
419 PCPU_SET(switchtime, new_switchtime);
420 PCPU_SET(switchticks, ticks);
421 PCPU_INC(cnt.v_swtch);
422
423 /* Save our resource usage in our process. */
424 td->td_ru.ru_nvcsw++;
425 ruxagg(p, td);
426 rucollect(&p->p_ru, &td->td_ru);
427
428 thread_lock(td);
429 PROC_SUNLOCK(p);
430 td->td_state = TDS_INACTIVE;
431 #ifdef WITNESS
432 witness_thread_exit(td);
433 #endif
434 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
435 sched_throw(td);
436 panic("I'm a teapot!");
437 /* NOTREACHED */
438 }
439
440 /*
441 * Do any thread specific cleanups that may be needed in wait()
442 * called with Giant, proc and schedlock not held.
443 */
444 void
445 thread_wait(struct proc *p)
446 {
447 struct thread *td;
448
449 mtx_assert(&Giant, MA_NOTOWNED);
450 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
451 td = FIRST_THREAD_IN_PROC(p);
452 /* Lock the last thread so we spin until it exits cpu_throw(). */
453 thread_lock(td);
454 thread_unlock(td);
455 /* Wait for any remaining threads to exit cpu_throw(). */
456 while (p->p_exitthreads)
457 sched_relinquish(curthread);
458 lock_profile_thread_exit(td);
459 cpuset_rel(td->td_cpuset);
460 td->td_cpuset = NULL;
461 cpu_thread_clean(td);
462 crfree(td->td_ucred);
463 thread_reap(); /* check for zombie threads etc. */
464 }
465
466 /*
467 * Link a thread to a process.
468 * set up anything that needs to be initialized for it to
469 * be used by the process.
470 */
471 void
472 thread_link(struct thread *td, struct proc *p)
473 {
474
475 /*
476 * XXX This can't be enabled because it's called for proc0 before
477 * its lock has been created.
478 * PROC_LOCK_ASSERT(p, MA_OWNED);
479 */
480 td->td_state = TDS_INACTIVE;
481 td->td_proc = p;
482 td->td_flags = TDF_INMEM;
483
484 LIST_INIT(&td->td_contested);
485 LIST_INIT(&td->td_lprof[0]);
486 LIST_INIT(&td->td_lprof[1]);
487 sigqueue_init(&td->td_sigqueue, p);
488 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
489 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
490 p->p_numthreads++;
491 }
492
493 /*
494 * Convert a process with one thread to an unthreaded process.
495 */
496 void
497 thread_unthread(struct thread *td)
498 {
499 struct proc *p = td->td_proc;
500
501 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
502 p->p_flag &= ~P_HADTHREADS;
503 }
504
505 /*
506 * Called from:
507 * thread_exit()
508 */
509 void
510 thread_unlink(struct thread *td)
511 {
512 struct proc *p = td->td_proc;
513
514 PROC_LOCK_ASSERT(p, MA_OWNED);
515 TAILQ_REMOVE(&p->p_threads, td, td_plist);
516 p->p_numthreads--;
517 /* could clear a few other things here */
518 /* Must NOT clear links to proc! */
519 }
520
521 static int
522 calc_remaining(struct proc *p, int mode)
523 {
524 int remaining;
525
526 PROC_LOCK_ASSERT(p, MA_OWNED);
527 PROC_SLOCK_ASSERT(p, MA_OWNED);
528 if (mode == SINGLE_EXIT)
529 remaining = p->p_numthreads;
530 else if (mode == SINGLE_BOUNDARY)
531 remaining = p->p_numthreads - p->p_boundary_count;
532 else if (mode == SINGLE_NO_EXIT)
533 remaining = p->p_numthreads - p->p_suspcount;
534 else
535 panic("calc_remaining: wrong mode %d", mode);
536 return (remaining);
537 }
538
539 /*
540 * Enforce single-threading.
541 *
542 * Returns 1 if the caller must abort (another thread is waiting to
543 * exit the process or similar). Process is locked!
544 * Returns 0 when you are successfully the only thread running.
545 * A process has successfully single threaded in the suspend mode when
546 * There are no threads in user mode. Threads in the kernel must be
547 * allowed to continue until they get to the user boundary. They may even
548 * copy out their return values and data before suspending. They may however be
549 * accelerated in reaching the user boundary as we will wake up
550 * any sleeping threads that are interruptable. (PCATCH).
551 */
552 int
553 thread_single(int mode)
554 {
555 struct thread *td;
556 struct thread *td2;
557 struct proc *p;
558 int remaining, wakeup_swapper;
559
560 td = curthread;
561 p = td->td_proc;
562 mtx_assert(&Giant, MA_NOTOWNED);
563 PROC_LOCK_ASSERT(p, MA_OWNED);
564 KASSERT((td != NULL), ("curthread is NULL"));
565
566 if ((p->p_flag & P_HADTHREADS) == 0)
567 return (0);
568
569 /* Is someone already single threading? */
570 if (p->p_singlethread != NULL && p->p_singlethread != td)
571 return (1);
572
573 if (mode == SINGLE_EXIT) {
574 p->p_flag |= P_SINGLE_EXIT;
575 p->p_flag &= ~P_SINGLE_BOUNDARY;
576 } else {
577 p->p_flag &= ~P_SINGLE_EXIT;
578 if (mode == SINGLE_BOUNDARY)
579 p->p_flag |= P_SINGLE_BOUNDARY;
580 else
581 p->p_flag &= ~P_SINGLE_BOUNDARY;
582 }
583 p->p_flag |= P_STOPPED_SINGLE;
584 PROC_SLOCK(p);
585 p->p_singlethread = td;
586 remaining = calc_remaining(p, mode);
587 while (remaining != 1) {
588 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
589 goto stopme;
590 wakeup_swapper = 0;
591 FOREACH_THREAD_IN_PROC(p, td2) {
592 if (td2 == td)
593 continue;
594 thread_lock(td2);
595 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
596 if (TD_IS_INHIBITED(td2)) {
597 switch (mode) {
598 case SINGLE_EXIT:
599 if (TD_IS_SUSPENDED(td2))
600 wakeup_swapper |=
601 thread_unsuspend_one(td2);
602 if (TD_ON_SLEEPQ(td2) &&
603 (td2->td_flags & TDF_SINTR))
604 wakeup_swapper |=
605 sleepq_abort(td2, EINTR);
606 break;
607 case SINGLE_BOUNDARY:
608 if (TD_IS_SUSPENDED(td2) &&
609 !(td2->td_flags & TDF_BOUNDARY))
610 wakeup_swapper |=
611 thread_unsuspend_one(td2);
612 if (TD_ON_SLEEPQ(td2) &&
613 (td2->td_flags & TDF_SINTR))
614 wakeup_swapper |=
615 sleepq_abort(td2, ERESTART);
616 break;
617 case SINGLE_NO_EXIT:
618 if (TD_IS_SUSPENDED(td2) &&
619 !(td2->td_flags & TDF_BOUNDARY))
620 wakeup_swapper |=
621 thread_unsuspend_one(td2);
622 if (TD_ON_SLEEPQ(td2) &&
623 (td2->td_flags & TDF_SINTR))
624 wakeup_swapper |=
625 sleepq_abort(td2, ERESTART);
626 break;
627 default:
628 break;
629 }
630 }
631 #ifdef SMP
632 else if (TD_IS_RUNNING(td2) && td != td2) {
633 forward_signal(td2);
634 }
635 #endif
636 thread_unlock(td2);
637 }
638 if (wakeup_swapper)
639 kick_proc0();
640 remaining = calc_remaining(p, mode);
641
642 /*
643 * Maybe we suspended some threads.. was it enough?
644 */
645 if (remaining == 1)
646 break;
647
648 stopme:
649 /*
650 * Wake us up when everyone else has suspended.
651 * In the mean time we suspend as well.
652 */
653 thread_suspend_switch(td);
654 remaining = calc_remaining(p, mode);
655 }
656 if (mode == SINGLE_EXIT) {
657 /*
658 * We have gotten rid of all the other threads and we
659 * are about to either exit or exec. In either case,
660 * we try our utmost to revert to being a non-threaded
661 * process.
662 */
663 p->p_singlethread = NULL;
664 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
665 thread_unthread(td);
666 }
667 PROC_SUNLOCK(p);
668 return (0);
669 }
670
671 /*
672 * Called in from locations that can safely check to see
673 * whether we have to suspend or at least throttle for a
674 * single-thread event (e.g. fork).
675 *
676 * Such locations include userret().
677 * If the "return_instead" argument is non zero, the thread must be able to
678 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
679 *
680 * The 'return_instead' argument tells the function if it may do a
681 * thread_exit() or suspend, or whether the caller must abort and back
682 * out instead.
683 *
684 * If the thread that set the single_threading request has set the
685 * P_SINGLE_EXIT bit in the process flags then this call will never return
686 * if 'return_instead' is false, but will exit.
687 *
688 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
689 *---------------+--------------------+---------------------
690 * 0 | returns 0 | returns 0 or 1
691 * | when ST ends | immediatly
692 *---------------+--------------------+---------------------
693 * 1 | thread exits | returns 1
694 * | | immediatly
695 * 0 = thread_exit() or suspension ok,
696 * other = return error instead of stopping the thread.
697 *
698 * While a full suspension is under effect, even a single threading
699 * thread would be suspended if it made this call (but it shouldn't).
700 * This call should only be made from places where
701 * thread_exit() would be safe as that may be the outcome unless
702 * return_instead is set.
703 */
704 int
705 thread_suspend_check(int return_instead)
706 {
707 struct thread *td;
708 struct proc *p;
709 int wakeup_swapper;
710
711 td = curthread;
712 p = td->td_proc;
713 mtx_assert(&Giant, MA_NOTOWNED);
714 PROC_LOCK_ASSERT(p, MA_OWNED);
715 while (P_SHOULDSTOP(p) ||
716 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) {
717 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
718 KASSERT(p->p_singlethread != NULL,
719 ("singlethread not set"));
720 /*
721 * The only suspension in action is a
722 * single-threading. Single threader need not stop.
723 * XXX Should be safe to access unlocked
724 * as it can only be set to be true by us.
725 */
726 if (p->p_singlethread == td)
727 return (0); /* Exempt from stopping. */
728 }
729 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
730 return (EINTR);
731
732 /* Should we goto user boundary if we didn't come from there? */
733 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
734 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
735 return (ERESTART);
736
737 /* If thread will exit, flush its pending signals */
738 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
739 sigqueue_flush(&td->td_sigqueue);
740
741 PROC_SLOCK(p);
742 thread_stopped(p);
743 /*
744 * If the process is waiting for us to exit,
745 * this thread should just suicide.
746 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
747 */
748 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
749 thread_exit();
750 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
751 if (p->p_numthreads == p->p_suspcount + 1) {
752 thread_lock(p->p_singlethread);
753 wakeup_swapper =
754 thread_unsuspend_one(p->p_singlethread);
755 thread_unlock(p->p_singlethread);
756 if (wakeup_swapper)
757 kick_proc0();
758 }
759 }
760 PROC_UNLOCK(p);
761 thread_lock(td);
762 /*
763 * When a thread suspends, it just
764 * gets taken off all queues.
765 */
766 thread_suspend_one(td);
767 if (return_instead == 0) {
768 p->p_boundary_count++;
769 td->td_flags |= TDF_BOUNDARY;
770 }
771 PROC_SUNLOCK(p);
772 mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
773 if (return_instead == 0)
774 td->td_flags &= ~TDF_BOUNDARY;
775 thread_unlock(td);
776 PROC_LOCK(p);
777 if (return_instead == 0) {
778 PROC_SLOCK(p);
779 p->p_boundary_count--;
780 PROC_SUNLOCK(p);
781 }
782 }
783 return (0);
784 }
785
786 void
787 thread_suspend_switch(struct thread *td)
788 {
789 struct proc *p;
790
791 p = td->td_proc;
792 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
793 PROC_LOCK_ASSERT(p, MA_OWNED);
794 PROC_SLOCK_ASSERT(p, MA_OWNED);
795 /*
796 * We implement thread_suspend_one in stages here to avoid
797 * dropping the proc lock while the thread lock is owned.
798 */
799 thread_stopped(p);
800 p->p_suspcount++;
801 PROC_UNLOCK(p);
802 thread_lock(td);
803 td->td_flags &= ~TDF_NEEDSUSPCHK;
804 TD_SET_SUSPENDED(td);
805 sched_sleep(td, 0);
806 PROC_SUNLOCK(p);
807 DROP_GIANT();
808 mi_switch(SW_VOL | SWT_SUSPEND, NULL);
809 thread_unlock(td);
810 PICKUP_GIANT();
811 PROC_LOCK(p);
812 PROC_SLOCK(p);
813 }
814
815 void
816 thread_suspend_one(struct thread *td)
817 {
818 struct proc *p = td->td_proc;
819
820 PROC_SLOCK_ASSERT(p, MA_OWNED);
821 THREAD_LOCK_ASSERT(td, MA_OWNED);
822 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
823 p->p_suspcount++;
824 td->td_flags &= ~TDF_NEEDSUSPCHK;
825 TD_SET_SUSPENDED(td);
826 sched_sleep(td, 0);
827 }
828
829 int
830 thread_unsuspend_one(struct thread *td)
831 {
832 struct proc *p = td->td_proc;
833
834 PROC_SLOCK_ASSERT(p, MA_OWNED);
835 THREAD_LOCK_ASSERT(td, MA_OWNED);
836 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
837 TD_CLR_SUSPENDED(td);
838 p->p_suspcount--;
839 return (setrunnable(td));
840 }
841
842 /*
843 * Allow all threads blocked by single threading to continue running.
844 */
845 void
846 thread_unsuspend(struct proc *p)
847 {
848 struct thread *td;
849 int wakeup_swapper;
850
851 PROC_LOCK_ASSERT(p, MA_OWNED);
852 PROC_SLOCK_ASSERT(p, MA_OWNED);
853 wakeup_swapper = 0;
854 if (!P_SHOULDSTOP(p)) {
855 FOREACH_THREAD_IN_PROC(p, td) {
856 thread_lock(td);
857 if (TD_IS_SUSPENDED(td)) {
858 wakeup_swapper |= thread_unsuspend_one(td);
859 }
860 thread_unlock(td);
861 }
862 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
863 (p->p_numthreads == p->p_suspcount)) {
864 /*
865 * Stopping everything also did the job for the single
866 * threading request. Now we've downgraded to single-threaded,
867 * let it continue.
868 */
869 thread_lock(p->p_singlethread);
870 wakeup_swapper = thread_unsuspend_one(p->p_singlethread);
871 thread_unlock(p->p_singlethread);
872 }
873 if (wakeup_swapper)
874 kick_proc0();
875 }
876
877 /*
878 * End the single threading mode..
879 */
880 void
881 thread_single_end(void)
882 {
883 struct thread *td;
884 struct proc *p;
885 int wakeup_swapper;
886
887 td = curthread;
888 p = td->td_proc;
889 PROC_LOCK_ASSERT(p, MA_OWNED);
890 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
891 PROC_SLOCK(p);
892 p->p_singlethread = NULL;
893 wakeup_swapper = 0;
894 /*
895 * If there are other threads they may now run,
896 * unless of course there is a blanket 'stop order'
897 * on the process. The single threader must be allowed
898 * to continue however as this is a bad place to stop.
899 */
900 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
901 FOREACH_THREAD_IN_PROC(p, td) {
902 thread_lock(td);
903 if (TD_IS_SUSPENDED(td)) {
904 wakeup_swapper |= thread_unsuspend_one(td);
905 }
906 thread_unlock(td);
907 }
908 }
909 PROC_SUNLOCK(p);
910 if (wakeup_swapper)
911 kick_proc0();
912 }
913
914 struct thread *
915 thread_find(struct proc *p, lwpid_t tid)
916 {
917 struct thread *td;
918
919 PROC_LOCK_ASSERT(p, MA_OWNED);
920 FOREACH_THREAD_IN_PROC(p, td) {
921 if (td->td_tid == tid)
922 break;
923 }
924 return (td);
925 }
Cache object: 6a55449b4ea05257f473e8f0834e1572
|