1 /*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include "opt_witness.h"
30 #include "opt_hwpmc_hooks.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/8.1/sys/kern/kern_thread.c 208580 2010-05-26 19:26:28Z kib $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/resourcevar.h>
42 #include <sys/smp.h>
43 #include <sys/sysctl.h>
44 #include <sys/sched.h>
45 #include <sys/sleepqueue.h>
46 #include <sys/selinfo.h>
47 #include <sys/turnstile.h>
48 #include <sys/ktr.h>
49 #include <sys/umtx.h>
50 #include <sys/cpuset.h>
51 #ifdef HWPMC_HOOKS
52 #include <sys/pmckern.h>
53 #endif
54
55 #include <security/audit/audit.h>
56
57 #include <vm/vm.h>
58 #include <vm/vm_extern.h>
59 #include <vm/uma.h>
60 #include <sys/eventhandler.h>
61
62 /*
63 * thread related storage.
64 */
65 static uma_zone_t thread_zone;
66
67 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
68
69 int max_threads_per_proc = 1500;
70 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
71 &max_threads_per_proc, 0, "Limit on threads per proc");
72
73 int max_threads_hits;
74 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
75 &max_threads_hits, 0, "");
76
77 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
78 static struct mtx zombie_lock;
79 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
80
81 static void thread_zombie(struct thread *);
82
83 struct mtx tid_lock;
84 static struct unrhdr *tid_unrhdr;
85
86 /*
87 * Prepare a thread for use.
88 */
89 static int
90 thread_ctor(void *mem, int size, void *arg, int flags)
91 {
92 struct thread *td;
93
94 td = (struct thread *)mem;
95 td->td_state = TDS_INACTIVE;
96 td->td_oncpu = NOCPU;
97
98 td->td_tid = alloc_unr(tid_unrhdr);
99 td->td_syscalls = 0;
100
101 /*
102 * Note that td_critnest begins life as 1 because the thread is not
103 * running and is thereby implicitly waiting to be on the receiving
104 * end of a context switch.
105 */
106 td->td_critnest = 1;
107 EVENTHANDLER_INVOKE(thread_ctor, td);
108 #ifdef AUDIT
109 audit_thread_alloc(td);
110 #endif
111 umtx_thread_alloc(td);
112 return (0);
113 }
114
115 /*
116 * Reclaim a thread after use.
117 */
118 static void
119 thread_dtor(void *mem, int size, void *arg)
120 {
121 struct thread *td;
122
123 td = (struct thread *)mem;
124
125 #ifdef INVARIANTS
126 /* Verify that this thread is in a safe state to free. */
127 switch (td->td_state) {
128 case TDS_INHIBITED:
129 case TDS_RUNNING:
130 case TDS_CAN_RUN:
131 case TDS_RUNQ:
132 /*
133 * We must never unlink a thread that is in one of
134 * these states, because it is currently active.
135 */
136 panic("bad state for thread unlinking");
137 /* NOTREACHED */
138 case TDS_INACTIVE:
139 break;
140 default:
141 panic("bad thread state");
142 /* NOTREACHED */
143 }
144 #endif
145 #ifdef AUDIT
146 audit_thread_free(td);
147 #endif
148 /* Free all OSD associated to this thread. */
149 osd_thread_exit(td);
150
151 EVENTHANDLER_INVOKE(thread_dtor, td);
152 free_unr(tid_unrhdr, td->td_tid);
153 }
154
155 /*
156 * Initialize type-stable parts of a thread (when newly created).
157 */
158 static int
159 thread_init(void *mem, int size, int flags)
160 {
161 struct thread *td;
162
163 td = (struct thread *)mem;
164
165 td->td_sleepqueue = sleepq_alloc();
166 td->td_turnstile = turnstile_alloc();
167 EVENTHANDLER_INVOKE(thread_init, td);
168 td->td_sched = (struct td_sched *)&td[1];
169 umtx_thread_init(td);
170 td->td_kstack = 0;
171 return (0);
172 }
173
174 /*
175 * Tear down type-stable parts of a thread (just before being discarded).
176 */
177 static void
178 thread_fini(void *mem, int size)
179 {
180 struct thread *td;
181
182 td = (struct thread *)mem;
183 EVENTHANDLER_INVOKE(thread_fini, td);
184 turnstile_free(td->td_turnstile);
185 sleepq_free(td->td_sleepqueue);
186 umtx_thread_fini(td);
187 seltdfini(td);
188 }
189
190 /*
191 * For a newly created process,
192 * link up all the structures and its initial threads etc.
193 * called from:
194 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
195 * proc_dtor() (should go away)
196 * proc_init()
197 */
198 void
199 proc_linkup0(struct proc *p, struct thread *td)
200 {
201 TAILQ_INIT(&p->p_threads); /* all threads in proc */
202 proc_linkup(p, td);
203 }
204
205 void
206 proc_linkup(struct proc *p, struct thread *td)
207 {
208
209 sigqueue_init(&p->p_sigqueue, p);
210 p->p_ksi = ksiginfo_alloc(1);
211 if (p->p_ksi != NULL) {
212 /* XXX p_ksi may be null if ksiginfo zone is not ready */
213 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
214 }
215 LIST_INIT(&p->p_mqnotifier);
216 p->p_numthreads = 0;
217 thread_link(td, p);
218 }
219
220 /*
221 * Initialize global thread allocation resources.
222 */
223 void
224 threadinit(void)
225 {
226
227 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
228 /* leave one number for thread0 */
229 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
230
231 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
232 thread_ctor, thread_dtor, thread_init, thread_fini,
233 16 - 1, 0);
234 }
235
236 /*
237 * Place an unused thread on the zombie list.
238 * Use the slpq as that must be unused by now.
239 */
240 void
241 thread_zombie(struct thread *td)
242 {
243 mtx_lock_spin(&zombie_lock);
244 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
245 mtx_unlock_spin(&zombie_lock);
246 }
247
248 /*
249 * Release a thread that has exited after cpu_throw().
250 */
251 void
252 thread_stash(struct thread *td)
253 {
254 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
255 thread_zombie(td);
256 }
257
258 /*
259 * Reap zombie resources.
260 */
261 void
262 thread_reap(void)
263 {
264 struct thread *td_first, *td_next;
265
266 /*
267 * Don't even bother to lock if none at this instant,
268 * we really don't care about the next instant..
269 */
270 if (!TAILQ_EMPTY(&zombie_threads)) {
271 mtx_lock_spin(&zombie_lock);
272 td_first = TAILQ_FIRST(&zombie_threads);
273 if (td_first)
274 TAILQ_INIT(&zombie_threads);
275 mtx_unlock_spin(&zombie_lock);
276 while (td_first) {
277 td_next = TAILQ_NEXT(td_first, td_slpq);
278 if (td_first->td_ucred)
279 crfree(td_first->td_ucred);
280 thread_free(td_first);
281 td_first = td_next;
282 }
283 }
284 }
285
286 /*
287 * Allocate a thread.
288 */
289 struct thread *
290 thread_alloc(int pages)
291 {
292 struct thread *td;
293
294 thread_reap(); /* check if any zombies to get */
295
296 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
297 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
298 if (!vm_thread_new(td, pages)) {
299 uma_zfree(thread_zone, td);
300 return (NULL);
301 }
302 cpu_thread_alloc(td);
303 return (td);
304 }
305
306 int
307 thread_alloc_stack(struct thread *td, int pages)
308 {
309
310 KASSERT(td->td_kstack == 0,
311 ("thread_alloc_stack called on a thread with kstack"));
312 if (!vm_thread_new(td, pages))
313 return (0);
314 cpu_thread_alloc(td);
315 return (1);
316 }
317
318 /*
319 * Deallocate a thread.
320 */
321 void
322 thread_free(struct thread *td)
323 {
324
325 lock_profile_thread_exit(td);
326 if (td->td_cpuset)
327 cpuset_rel(td->td_cpuset);
328 td->td_cpuset = NULL;
329 cpu_thread_free(td);
330 if (td->td_kstack != 0)
331 vm_thread_dispose(td);
332 uma_zfree(thread_zone, td);
333 }
334
335 /*
336 * Discard the current thread and exit from its context.
337 * Always called with scheduler locked.
338 *
339 * Because we can't free a thread while we're operating under its context,
340 * push the current thread into our CPU's deadthread holder. This means
341 * we needn't worry about someone else grabbing our context before we
342 * do a cpu_throw().
343 */
344 void
345 thread_exit(void)
346 {
347 uint64_t new_switchtime;
348 struct thread *td;
349 struct thread *td2;
350 struct proc *p;
351 int wakeup_swapper;
352
353 td = curthread;
354 p = td->td_proc;
355
356 PROC_SLOCK_ASSERT(p, MA_OWNED);
357 mtx_assert(&Giant, MA_NOTOWNED);
358
359 PROC_LOCK_ASSERT(p, MA_OWNED);
360 KASSERT(p != NULL, ("thread exiting without a process"));
361 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
362 (long)p->p_pid, td->td_name);
363 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
364
365 #ifdef AUDIT
366 AUDIT_SYSCALL_EXIT(0, td);
367 #endif
368 umtx_thread_exit(td);
369 /*
370 * drop FPU & debug register state storage, or any other
371 * architecture specific resources that
372 * would not be on a new untouched process.
373 */
374 cpu_thread_exit(td); /* XXXSMP */
375
376 /* Do the same timestamp bookkeeping that mi_switch() would do. */
377 new_switchtime = cpu_ticks();
378 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
379 PCPU_SET(switchtime, new_switchtime);
380 PCPU_SET(switchticks, ticks);
381 PCPU_INC(cnt.v_swtch);
382 /* Save our resource usage in our process. */
383 td->td_ru.ru_nvcsw++;
384 rucollect(&p->p_ru, &td->td_ru);
385 /*
386 * The last thread is left attached to the process
387 * So that the whole bundle gets recycled. Skip
388 * all this stuff if we never had threads.
389 * EXIT clears all sign of other threads when
390 * it goes to single threading, so the last thread always
391 * takes the short path.
392 */
393 if (p->p_flag & P_HADTHREADS) {
394 if (p->p_numthreads > 1) {
395 thread_unlink(td);
396 td2 = FIRST_THREAD_IN_PROC(p);
397 sched_exit_thread(td2, td);
398
399 /*
400 * The test below is NOT true if we are the
401 * sole exiting thread. P_STOPPED_SINGLE is unset
402 * in exit1() after it is the only survivor.
403 */
404 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
405 if (p->p_numthreads == p->p_suspcount) {
406 thread_lock(p->p_singlethread);
407 wakeup_swapper = thread_unsuspend_one(
408 p->p_singlethread);
409 thread_unlock(p->p_singlethread);
410 if (wakeup_swapper)
411 kick_proc0();
412 }
413 }
414
415 atomic_add_int(&td->td_proc->p_exitthreads, 1);
416 PCPU_SET(deadthread, td);
417 } else {
418 /*
419 * The last thread is exiting.. but not through exit()
420 */
421 panic ("thread_exit: Last thread exiting on its own");
422 }
423 }
424 #ifdef HWPMC_HOOKS
425 /*
426 * If this thread is part of a process that is being tracked by hwpmc(4),
427 * inform the module of the thread's impending exit.
428 */
429 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
430 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
431 #endif
432 PROC_UNLOCK(p);
433 ruxagg(p, td);
434 thread_lock(td);
435 PROC_SUNLOCK(p);
436 td->td_state = TDS_INACTIVE;
437 #ifdef WITNESS
438 witness_thread_exit(td);
439 #endif
440 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
441 sched_throw(td);
442 panic("I'm a teapot!");
443 /* NOTREACHED */
444 }
445
446 /*
447 * Do any thread specific cleanups that may be needed in wait()
448 * called with Giant, proc and schedlock not held.
449 */
450 void
451 thread_wait(struct proc *p)
452 {
453 struct thread *td;
454
455 mtx_assert(&Giant, MA_NOTOWNED);
456 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
457 td = FIRST_THREAD_IN_PROC(p);
458 /* Lock the last thread so we spin until it exits cpu_throw(). */
459 thread_lock(td);
460 thread_unlock(td);
461 /* Wait for any remaining threads to exit cpu_throw(). */
462 while (p->p_exitthreads)
463 sched_relinquish(curthread);
464 lock_profile_thread_exit(td);
465 cpuset_rel(td->td_cpuset);
466 td->td_cpuset = NULL;
467 cpu_thread_clean(td);
468 crfree(td->td_ucred);
469 thread_reap(); /* check for zombie threads etc. */
470 }
471
472 /*
473 * Link a thread to a process.
474 * set up anything that needs to be initialized for it to
475 * be used by the process.
476 */
477 void
478 thread_link(struct thread *td, struct proc *p)
479 {
480
481 /*
482 * XXX This can't be enabled because it's called for proc0 before
483 * its lock has been created.
484 * PROC_LOCK_ASSERT(p, MA_OWNED);
485 */
486 td->td_state = TDS_INACTIVE;
487 td->td_proc = p;
488 td->td_flags = TDF_INMEM;
489
490 LIST_INIT(&td->td_contested);
491 LIST_INIT(&td->td_lprof[0]);
492 LIST_INIT(&td->td_lprof[1]);
493 sigqueue_init(&td->td_sigqueue, p);
494 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
495 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
496 p->p_numthreads++;
497 }
498
499 /*
500 * Convert a process with one thread to an unthreaded process.
501 */
502 void
503 thread_unthread(struct thread *td)
504 {
505 struct proc *p = td->td_proc;
506
507 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
508 p->p_flag &= ~P_HADTHREADS;
509 }
510
511 /*
512 * Called from:
513 * thread_exit()
514 */
515 void
516 thread_unlink(struct thread *td)
517 {
518 struct proc *p = td->td_proc;
519
520 PROC_LOCK_ASSERT(p, MA_OWNED);
521 TAILQ_REMOVE(&p->p_threads, td, td_plist);
522 p->p_numthreads--;
523 /* could clear a few other things here */
524 /* Must NOT clear links to proc! */
525 }
526
527 static int
528 calc_remaining(struct proc *p, int mode)
529 {
530 int remaining;
531
532 if (mode == SINGLE_EXIT)
533 remaining = p->p_numthreads;
534 else if (mode == SINGLE_BOUNDARY)
535 remaining = p->p_numthreads - p->p_boundary_count;
536 else if (mode == SINGLE_NO_EXIT)
537 remaining = p->p_numthreads - p->p_suspcount;
538 else
539 panic("calc_remaining: wrong mode %d", mode);
540 return (remaining);
541 }
542
543 /*
544 * Enforce single-threading.
545 *
546 * Returns 1 if the caller must abort (another thread is waiting to
547 * exit the process or similar). Process is locked!
548 * Returns 0 when you are successfully the only thread running.
549 * A process has successfully single threaded in the suspend mode when
550 * There are no threads in user mode. Threads in the kernel must be
551 * allowed to continue until they get to the user boundary. They may even
552 * copy out their return values and data before suspending. They may however be
553 * accelerated in reaching the user boundary as we will wake up
554 * any sleeping threads that are interruptable. (PCATCH).
555 */
556 int
557 thread_single(int mode)
558 {
559 struct thread *td;
560 struct thread *td2;
561 struct proc *p;
562 int remaining, wakeup_swapper;
563
564 td = curthread;
565 p = td->td_proc;
566 mtx_assert(&Giant, MA_NOTOWNED);
567 PROC_LOCK_ASSERT(p, MA_OWNED);
568 KASSERT((td != NULL), ("curthread is NULL"));
569
570 if ((p->p_flag & P_HADTHREADS) == 0)
571 return (0);
572
573 /* Is someone already single threading? */
574 if (p->p_singlethread != NULL && p->p_singlethread != td)
575 return (1);
576
577 if (mode == SINGLE_EXIT) {
578 p->p_flag |= P_SINGLE_EXIT;
579 p->p_flag &= ~P_SINGLE_BOUNDARY;
580 } else {
581 p->p_flag &= ~P_SINGLE_EXIT;
582 if (mode == SINGLE_BOUNDARY)
583 p->p_flag |= P_SINGLE_BOUNDARY;
584 else
585 p->p_flag &= ~P_SINGLE_BOUNDARY;
586 }
587 p->p_flag |= P_STOPPED_SINGLE;
588 PROC_SLOCK(p);
589 p->p_singlethread = td;
590 remaining = calc_remaining(p, mode);
591 while (remaining != 1) {
592 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
593 goto stopme;
594 wakeup_swapper = 0;
595 FOREACH_THREAD_IN_PROC(p, td2) {
596 if (td2 == td)
597 continue;
598 thread_lock(td2);
599 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
600 if (TD_IS_INHIBITED(td2)) {
601 switch (mode) {
602 case SINGLE_EXIT:
603 if (TD_IS_SUSPENDED(td2))
604 wakeup_swapper |=
605 thread_unsuspend_one(td2);
606 if (TD_ON_SLEEPQ(td2) &&
607 (td2->td_flags & TDF_SINTR))
608 wakeup_swapper |=
609 sleepq_abort(td2, EINTR);
610 break;
611 case SINGLE_BOUNDARY:
612 if (TD_IS_SUSPENDED(td2) &&
613 !(td2->td_flags & TDF_BOUNDARY))
614 wakeup_swapper |=
615 thread_unsuspend_one(td2);
616 if (TD_ON_SLEEPQ(td2) &&
617 (td2->td_flags & TDF_SINTR))
618 wakeup_swapper |=
619 sleepq_abort(td2, ERESTART);
620 break;
621 case SINGLE_NO_EXIT:
622 if (TD_IS_SUSPENDED(td2) &&
623 !(td2->td_flags & TDF_BOUNDARY))
624 wakeup_swapper |=
625 thread_unsuspend_one(td2);
626 if (TD_ON_SLEEPQ(td2) &&
627 (td2->td_flags & TDF_SINTR))
628 wakeup_swapper |=
629 sleepq_abort(td2, ERESTART);
630 break;
631 default:
632 break;
633 }
634 }
635 #ifdef SMP
636 else if (TD_IS_RUNNING(td2) && td != td2) {
637 forward_signal(td2);
638 }
639 #endif
640 thread_unlock(td2);
641 }
642 if (wakeup_swapper)
643 kick_proc0();
644 remaining = calc_remaining(p, mode);
645
646 /*
647 * Maybe we suspended some threads.. was it enough?
648 */
649 if (remaining == 1)
650 break;
651
652 stopme:
653 /*
654 * Wake us up when everyone else has suspended.
655 * In the mean time we suspend as well.
656 */
657 thread_suspend_switch(td);
658 remaining = calc_remaining(p, mode);
659 }
660 if (mode == SINGLE_EXIT) {
661 /*
662 * We have gotten rid of all the other threads and we
663 * are about to either exit or exec. In either case,
664 * we try our utmost to revert to being a non-threaded
665 * process.
666 */
667 p->p_singlethread = NULL;
668 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
669 thread_unthread(td);
670 }
671 PROC_SUNLOCK(p);
672 return (0);
673 }
674
675 /*
676 * Called in from locations that can safely check to see
677 * whether we have to suspend or at least throttle for a
678 * single-thread event (e.g. fork).
679 *
680 * Such locations include userret().
681 * If the "return_instead" argument is non zero, the thread must be able to
682 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
683 *
684 * The 'return_instead' argument tells the function if it may do a
685 * thread_exit() or suspend, or whether the caller must abort and back
686 * out instead.
687 *
688 * If the thread that set the single_threading request has set the
689 * P_SINGLE_EXIT bit in the process flags then this call will never return
690 * if 'return_instead' is false, but will exit.
691 *
692 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
693 *---------------+--------------------+---------------------
694 * 0 | returns 0 | returns 0 or 1
695 * | when ST ends | immediatly
696 *---------------+--------------------+---------------------
697 * 1 | thread exits | returns 1
698 * | | immediatly
699 * 0 = thread_exit() or suspension ok,
700 * other = return error instead of stopping the thread.
701 *
702 * While a full suspension is under effect, even a single threading
703 * thread would be suspended if it made this call (but it shouldn't).
704 * This call should only be made from places where
705 * thread_exit() would be safe as that may be the outcome unless
706 * return_instead is set.
707 */
708 int
709 thread_suspend_check(int return_instead)
710 {
711 struct thread *td;
712 struct proc *p;
713 int wakeup_swapper;
714
715 td = curthread;
716 p = td->td_proc;
717 mtx_assert(&Giant, MA_NOTOWNED);
718 PROC_LOCK_ASSERT(p, MA_OWNED);
719 while (P_SHOULDSTOP(p) ||
720 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) {
721 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
722 KASSERT(p->p_singlethread != NULL,
723 ("singlethread not set"));
724 /*
725 * The only suspension in action is a
726 * single-threading. Single threader need not stop.
727 * XXX Should be safe to access unlocked
728 * as it can only be set to be true by us.
729 */
730 if (p->p_singlethread == td)
731 return (0); /* Exempt from stopping. */
732 }
733 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
734 return (EINTR);
735
736 /* Should we goto user boundary if we didn't come from there? */
737 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
738 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
739 return (ERESTART);
740
741 /* If thread will exit, flush its pending signals */
742 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
743 sigqueue_flush(&td->td_sigqueue);
744
745 PROC_SLOCK(p);
746 thread_stopped(p);
747 /*
748 * If the process is waiting for us to exit,
749 * this thread should just suicide.
750 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
751 */
752 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
753 thread_exit();
754 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
755 if (p->p_numthreads == p->p_suspcount + 1) {
756 thread_lock(p->p_singlethread);
757 wakeup_swapper =
758 thread_unsuspend_one(p->p_singlethread);
759 thread_unlock(p->p_singlethread);
760 if (wakeup_swapper)
761 kick_proc0();
762 }
763 }
764 PROC_UNLOCK(p);
765 thread_lock(td);
766 /*
767 * When a thread suspends, it just
768 * gets taken off all queues.
769 */
770 thread_suspend_one(td);
771 if (return_instead == 0) {
772 p->p_boundary_count++;
773 td->td_flags |= TDF_BOUNDARY;
774 }
775 PROC_SUNLOCK(p);
776 mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
777 if (return_instead == 0)
778 td->td_flags &= ~TDF_BOUNDARY;
779 thread_unlock(td);
780 PROC_LOCK(p);
781 if (return_instead == 0)
782 p->p_boundary_count--;
783 }
784 return (0);
785 }
786
787 void
788 thread_suspend_switch(struct thread *td)
789 {
790 struct proc *p;
791
792 p = td->td_proc;
793 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
794 PROC_LOCK_ASSERT(p, MA_OWNED);
795 PROC_SLOCK_ASSERT(p, MA_OWNED);
796 /*
797 * We implement thread_suspend_one in stages here to avoid
798 * dropping the proc lock while the thread lock is owned.
799 */
800 thread_stopped(p);
801 p->p_suspcount++;
802 PROC_UNLOCK(p);
803 thread_lock(td);
804 td->td_flags &= ~TDF_NEEDSUSPCHK;
805 TD_SET_SUSPENDED(td);
806 sched_sleep(td, 0);
807 PROC_SUNLOCK(p);
808 DROP_GIANT();
809 mi_switch(SW_VOL | SWT_SUSPEND, NULL);
810 thread_unlock(td);
811 PICKUP_GIANT();
812 PROC_LOCK(p);
813 PROC_SLOCK(p);
814 }
815
816 void
817 thread_suspend_one(struct thread *td)
818 {
819 struct proc *p = td->td_proc;
820
821 PROC_SLOCK_ASSERT(p, MA_OWNED);
822 THREAD_LOCK_ASSERT(td, MA_OWNED);
823 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
824 p->p_suspcount++;
825 td->td_flags &= ~TDF_NEEDSUSPCHK;
826 TD_SET_SUSPENDED(td);
827 sched_sleep(td, 0);
828 }
829
830 int
831 thread_unsuspend_one(struct thread *td)
832 {
833 struct proc *p = td->td_proc;
834
835 PROC_SLOCK_ASSERT(p, MA_OWNED);
836 THREAD_LOCK_ASSERT(td, MA_OWNED);
837 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
838 TD_CLR_SUSPENDED(td);
839 p->p_suspcount--;
840 return (setrunnable(td));
841 }
842
843 /*
844 * Allow all threads blocked by single threading to continue running.
845 */
846 void
847 thread_unsuspend(struct proc *p)
848 {
849 struct thread *td;
850 int wakeup_swapper;
851
852 PROC_LOCK_ASSERT(p, MA_OWNED);
853 PROC_SLOCK_ASSERT(p, MA_OWNED);
854 wakeup_swapper = 0;
855 if (!P_SHOULDSTOP(p)) {
856 FOREACH_THREAD_IN_PROC(p, td) {
857 thread_lock(td);
858 if (TD_IS_SUSPENDED(td)) {
859 wakeup_swapper |= thread_unsuspend_one(td);
860 }
861 thread_unlock(td);
862 }
863 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
864 (p->p_numthreads == p->p_suspcount)) {
865 /*
866 * Stopping everything also did the job for the single
867 * threading request. Now we've downgraded to single-threaded,
868 * let it continue.
869 */
870 thread_lock(p->p_singlethread);
871 wakeup_swapper = thread_unsuspend_one(p->p_singlethread);
872 thread_unlock(p->p_singlethread);
873 }
874 if (wakeup_swapper)
875 kick_proc0();
876 }
877
878 /*
879 * End the single threading mode..
880 */
881 void
882 thread_single_end(void)
883 {
884 struct thread *td;
885 struct proc *p;
886 int wakeup_swapper;
887
888 td = curthread;
889 p = td->td_proc;
890 PROC_LOCK_ASSERT(p, MA_OWNED);
891 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
892 PROC_SLOCK(p);
893 p->p_singlethread = NULL;
894 wakeup_swapper = 0;
895 /*
896 * If there are other threads they may now run,
897 * unless of course there is a blanket 'stop order'
898 * on the process. The single threader must be allowed
899 * to continue however as this is a bad place to stop.
900 */
901 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
902 FOREACH_THREAD_IN_PROC(p, td) {
903 thread_lock(td);
904 if (TD_IS_SUSPENDED(td)) {
905 wakeup_swapper |= thread_unsuspend_one(td);
906 }
907 thread_unlock(td);
908 }
909 }
910 PROC_SUNLOCK(p);
911 if (wakeup_swapper)
912 kick_proc0();
913 }
914
915 struct thread *
916 thread_find(struct proc *p, lwpid_t tid)
917 {
918 struct thread *td;
919
920 PROC_LOCK_ASSERT(p, MA_OWNED);
921 FOREACH_THREAD_IN_PROC(p, td) {
922 if (td->td_tid == tid)
923 break;
924 }
925 return (td);
926 }
Cache object: 541c67df051d1b296e5d8efc25cd8503
|