1 /*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/resourcevar.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/sched.h>
42 #include <sys/sleepqueue.h>
43 #include <sys/turnstile.h>
44 #include <sys/ktr.h>
45 #include <sys/umtx.h>
46
47 #include <security/audit/audit.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/uma.h>
52 #include <sys/eventhandler.h>
53
54 /*
55 * thread related storage.
56 */
57 static uma_zone_t thread_zone;
58
59 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
60
61 int max_threads_per_proc = 1500;
62 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
63 &max_threads_per_proc, 0, "Limit on threads per proc");
64
65 int max_threads_hits;
66 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
67 &max_threads_hits, 0, "");
68
69 #ifdef KSE
70 int virtual_cpu;
71
72 #endif
73 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
74 static struct mtx zombie_lock;
75 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
76
77 static void thread_zombie(struct thread *);
78
79 #ifdef KSE
80 static int
81 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
82 {
83 int error, new_val;
84 int def_val;
85
86 def_val = mp_ncpus;
87 if (virtual_cpu == 0)
88 new_val = def_val;
89 else
90 new_val = virtual_cpu;
91 error = sysctl_handle_int(oidp, &new_val, 0, req);
92 if (error != 0 || req->newptr == NULL)
93 return (error);
94 if (new_val < 0)
95 return (EINVAL);
96 virtual_cpu = new_val;
97 return (0);
98 }
99
100 /* DEBUG ONLY */
101 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
102 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
103 "debug virtual cpus");
104 #endif
105
106 struct mtx tid_lock;
107 static struct unrhdr *tid_unrhdr;
108
109 /*
110 * Prepare a thread for use.
111 */
112 static int
113 thread_ctor(void *mem, int size, void *arg, int flags)
114 {
115 struct thread *td;
116
117 td = (struct thread *)mem;
118 td->td_state = TDS_INACTIVE;
119 td->td_oncpu = NOCPU;
120
121 td->td_tid = alloc_unr(tid_unrhdr);
122 td->td_syscalls = 0;
123
124 /*
125 * Note that td_critnest begins life as 1 because the thread is not
126 * running and is thereby implicitly waiting to be on the receiving
127 * end of a context switch.
128 */
129 td->td_critnest = 1;
130 EVENTHANDLER_INVOKE(thread_ctor, td);
131 #ifdef AUDIT
132 audit_thread_alloc(td);
133 #endif
134 umtx_thread_alloc(td);
135 return (0);
136 }
137
138 /*
139 * Reclaim a thread after use.
140 */
141 static void
142 thread_dtor(void *mem, int size, void *arg)
143 {
144 struct thread *td;
145
146 td = (struct thread *)mem;
147
148 #ifdef INVARIANTS
149 /* Verify that this thread is in a safe state to free. */
150 switch (td->td_state) {
151 case TDS_INHIBITED:
152 case TDS_RUNNING:
153 case TDS_CAN_RUN:
154 case TDS_RUNQ:
155 /*
156 * We must never unlink a thread that is in one of
157 * these states, because it is currently active.
158 */
159 panic("bad state for thread unlinking");
160 /* NOTREACHED */
161 case TDS_INACTIVE:
162 break;
163 default:
164 panic("bad thread state");
165 /* NOTREACHED */
166 }
167 #endif
168 #ifdef AUDIT
169 audit_thread_free(td);
170 #endif
171 EVENTHANDLER_INVOKE(thread_dtor, td);
172 free_unr(tid_unrhdr, td->td_tid);
173 sched_newthread(td);
174 }
175
176 /*
177 * Initialize type-stable parts of a thread (when newly created).
178 */
179 static int
180 thread_init(void *mem, int size, int flags)
181 {
182 struct thread *td;
183
184 td = (struct thread *)mem;
185
186 td->td_sleepqueue = sleepq_alloc();
187 td->td_turnstile = turnstile_alloc();
188 EVENTHANDLER_INVOKE(thread_init, td);
189 td->td_sched = (struct td_sched *)&td[1];
190 sched_newthread(td);
191 umtx_thread_init(td);
192 td->td_kstack = 0;
193 return (0);
194 }
195
196 /*
197 * Tear down type-stable parts of a thread (just before being discarded).
198 */
199 static void
200 thread_fini(void *mem, int size)
201 {
202 struct thread *td;
203
204 td = (struct thread *)mem;
205 EVENTHANDLER_INVOKE(thread_fini, td);
206 turnstile_free(td->td_turnstile);
207 sleepq_free(td->td_sleepqueue);
208 umtx_thread_fini(td);
209 }
210
211 /*
212 * For a newly created process,
213 * link up all the structures and its initial threads etc.
214 * called from:
215 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
216 * proc_dtor() (should go away)
217 * proc_init()
218 */
219 void
220 proc_linkup0(struct proc *p, struct thread *td)
221 {
222 TAILQ_INIT(&p->p_threads); /* all threads in proc */
223 proc_linkup(p, td);
224 }
225
226 void
227 proc_linkup(struct proc *p, struct thread *td)
228 {
229
230 #ifdef KSE
231 TAILQ_INIT(&p->p_upcalls); /* upcall list */
232 #endif
233 sigqueue_init(&p->p_sigqueue, p);
234 p->p_ksi = ksiginfo_alloc(1);
235 if (p->p_ksi != NULL) {
236 /* XXX p_ksi may be null if ksiginfo zone is not ready */
237 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
238 }
239 LIST_INIT(&p->p_mqnotifier);
240 p->p_numthreads = 0;
241 thread_link(td, p);
242 }
243
244 /*
245 * Initialize global thread allocation resources.
246 */
247 void
248 threadinit(void)
249 {
250
251 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
252 tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock);
253
254 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
255 thread_ctor, thread_dtor, thread_init, thread_fini,
256 16 - 1, 0);
257 #ifdef KSE
258 kseinit(); /* set up kse specific stuff e.g. upcall zone*/
259 #endif
260 }
261
262 /*
263 * Place an unused thread on the zombie list.
264 * Use the slpq as that must be unused by now.
265 */
266 void
267 thread_zombie(struct thread *td)
268 {
269 mtx_lock_spin(&zombie_lock);
270 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
271 mtx_unlock_spin(&zombie_lock);
272 }
273
274 /*
275 * Release a thread that has exited after cpu_throw().
276 */
277 void
278 thread_stash(struct thread *td)
279 {
280 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
281 thread_zombie(td);
282 }
283
284 /*
285 * Reap zombie kse resource.
286 */
287 void
288 thread_reap(void)
289 {
290 struct thread *td_first, *td_next;
291
292 /*
293 * Don't even bother to lock if none at this instant,
294 * we really don't care about the next instant..
295 */
296 if (!TAILQ_EMPTY(&zombie_threads)) {
297 mtx_lock_spin(&zombie_lock);
298 td_first = TAILQ_FIRST(&zombie_threads);
299 if (td_first)
300 TAILQ_INIT(&zombie_threads);
301 mtx_unlock_spin(&zombie_lock);
302 while (td_first) {
303 td_next = TAILQ_NEXT(td_first, td_slpq);
304 if (td_first->td_ucred)
305 crfree(td_first->td_ucred);
306 thread_free(td_first);
307 td_first = td_next;
308 }
309 }
310 #ifdef KSE
311 upcall_reap();
312 #endif
313 }
314
315 /*
316 * Allocate a thread.
317 */
318 struct thread *
319 thread_alloc(void)
320 {
321 struct thread *td;
322
323 thread_reap(); /* check if any zombies to get */
324
325 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
326 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
327 if (!vm_thread_new(td, 0)) {
328 uma_zfree(thread_zone, td);
329 return (NULL);
330 }
331 cpu_thread_setup(td);
332 return (td);
333 }
334
335
336 /*
337 * Deallocate a thread.
338 */
339 void
340 thread_free(struct thread *td)
341 {
342
343 cpu_thread_clean(td);
344 if (td->td_altkstack != 0)
345 vm_thread_dispose_altkstack(td);
346 if (td->td_kstack != 0)
347 vm_thread_dispose(td);
348 uma_zfree(thread_zone, td);
349 }
350
351 /*
352 * Discard the current thread and exit from its context.
353 * Always called with scheduler locked.
354 *
355 * Because we can't free a thread while we're operating under its context,
356 * push the current thread into our CPU's deadthread holder. This means
357 * we needn't worry about someone else grabbing our context before we
358 * do a cpu_throw(). This may not be needed now as we are under schedlock.
359 * Maybe we can just do a thread_stash() as thr_exit1 does.
360 */
361 /* XXX
362 * libthr expects its thread exit to return for the last
363 * thread, meaning that the program is back to non-threaded
364 * mode I guess. Because we do this (cpu_throw) unconditionally
365 * here, they have their own version of it. (thr_exit1())
366 * that doesn't do it all if this was the last thread.
367 * It is also called from thread_suspend_check().
368 * Of course in the end, they end up coming here through exit1
369 * anyhow.. After fixing 'thr' to play by the rules we should be able
370 * to merge these two functions together.
371 *
372 * called from:
373 * exit1()
374 * kse_exit()
375 * thr_exit()
376 * ifdef KSE
377 * thread_user_enter()
378 * thread_userret()
379 * endif
380 * thread_suspend_check()
381 */
382 void
383 thread_exit(void)
384 {
385 uint64_t new_switchtime;
386 struct thread *td;
387 struct thread *td2;
388 struct proc *p;
389
390 td = curthread;
391 p = td->td_proc;
392
393 PROC_SLOCK_ASSERT(p, MA_OWNED);
394 mtx_assert(&Giant, MA_NOTOWNED);
395
396 PROC_LOCK_ASSERT(p, MA_OWNED);
397 KASSERT(p != NULL, ("thread exiting without a process"));
398 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
399 (long)p->p_pid, p->p_comm);
400 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
401
402 #ifdef AUDIT
403 AUDIT_SYSCALL_EXIT(0, td);
404 #endif
405
406 #ifdef KSE
407 if (td->td_standin != NULL) {
408 /*
409 * Note that we don't need to free the cred here as it
410 * is done in thread_reap().
411 */
412 thread_zombie(td->td_standin);
413 td->td_standin = NULL;
414 }
415 #endif
416
417 umtx_thread_exit(td);
418
419 /*
420 * drop FPU & debug register state storage, or any other
421 * architecture specific resources that
422 * would not be on a new untouched process.
423 */
424 cpu_thread_exit(td); /* XXXSMP */
425
426 /* Do the same timestamp bookkeeping that mi_switch() would do. */
427 new_switchtime = cpu_ticks();
428 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
429 PCPU_SET(switchtime, new_switchtime);
430 PCPU_SET(switchticks, ticks);
431 PCPU_INC(cnt.v_swtch);
432 /* Save our resource usage in our process. */
433 td->td_ru.ru_nvcsw++;
434 rucollect(&p->p_ru, &td->td_ru);
435 /*
436 * The last thread is left attached to the process
437 * So that the whole bundle gets recycled. Skip
438 * all this stuff if we never had threads.
439 * EXIT clears all sign of other threads when
440 * it goes to single threading, so the last thread always
441 * takes the short path.
442 */
443 if (p->p_flag & P_HADTHREADS) {
444 if (p->p_numthreads > 1) {
445 thread_lock(td);
446 #ifdef KSE
447 kse_unlink(td);
448 #else
449 thread_unlink(td);
450 #endif
451 thread_unlock(td);
452 td2 = FIRST_THREAD_IN_PROC(p);
453 sched_exit_thread(td2, td);
454
455 /*
456 * The test below is NOT true if we are the
457 * sole exiting thread. P_STOPPED_SNGL is unset
458 * in exit1() after it is the only survivor.
459 */
460 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
461 if (p->p_numthreads == p->p_suspcount) {
462 thread_lock(p->p_singlethread);
463 thread_unsuspend_one(p->p_singlethread);
464 thread_unlock(p->p_singlethread);
465 }
466 }
467
468 atomic_add_int(&td->td_proc->p_exitthreads, 1);
469 PCPU_SET(deadthread, td);
470 } else {
471 /*
472 * The last thread is exiting.. but not through exit()
473 * what should we do?
474 * Theoretically this can't happen
475 * exit1() - clears threading flags before coming here
476 * kse_exit() - treats last thread specially
477 * thr_exit() - treats last thread specially
478 * ifdef KSE
479 * thread_user_enter() - only if more exist
480 * thread_userret() - only if more exist
481 * endif
482 * thread_suspend_check() - only if more exist
483 */
484 panic ("thread_exit: Last thread exiting on its own");
485 }
486 }
487 PROC_UNLOCK(p);
488 thread_lock(td);
489 /* Save our tick information with both the thread and proc locked */
490 ruxagg(&p->p_rux, td);
491 PROC_SUNLOCK(p);
492 td->td_state = TDS_INACTIVE;
493 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
494 sched_throw(td);
495 panic("I'm a teapot!");
496 /* NOTREACHED */
497 }
498
499 /*
500 * Do any thread specific cleanups that may be needed in wait()
501 * called with Giant, proc and schedlock not held.
502 */
503 void
504 thread_wait(struct proc *p)
505 {
506 struct thread *td;
507
508 mtx_assert(&Giant, MA_NOTOWNED);
509 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
510 td = FIRST_THREAD_IN_PROC(p);
511 #ifdef KSE
512 if (td->td_standin != NULL) {
513 if (td->td_standin->td_ucred != NULL) {
514 crfree(td->td_standin->td_ucred);
515 td->td_standin->td_ucred = NULL;
516 }
517 thread_free(td->td_standin);
518 td->td_standin = NULL;
519 }
520 #endif
521 /* Lock the last thread so we spin until it exits cpu_throw(). */
522 thread_lock(td);
523 thread_unlock(td);
524 /* Wait for any remaining threads to exit cpu_throw(). */
525 while (p->p_exitthreads)
526 sched_relinquish(curthread);
527 cpu_thread_clean(td);
528 crfree(td->td_ucred);
529 thread_reap(); /* check for zombie threads etc. */
530 }
531
532 /*
533 * Link a thread to a process.
534 * set up anything that needs to be initialized for it to
535 * be used by the process.
536 *
537 * Note that we do not link to the proc's ucred here.
538 * The thread is linked as if running but no KSE assigned.
539 * Called from:
540 * proc_linkup()
541 * thread_schedule_upcall()
542 * thr_create()
543 */
544 void
545 thread_link(struct thread *td, struct proc *p)
546 {
547
548 /*
549 * XXX This can't be enabled because it's called for proc0 before
550 * it's spinlock has been created.
551 * PROC_SLOCK_ASSERT(p, MA_OWNED);
552 */
553 td->td_state = TDS_INACTIVE;
554 td->td_proc = p;
555 td->td_flags = TDF_INMEM;
556
557 LIST_INIT(&td->td_contested);
558 sigqueue_init(&td->td_sigqueue, p);
559 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
560 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
561 p->p_numthreads++;
562 }
563
564 /*
565 * Convert a process with one thread to an unthreaded process.
566 * Called from:
567 * thread_single(exit) (called from execve and exit)
568 * kse_exit() XXX may need cleaning up wrt KSE stuff
569 */
570 void
571 thread_unthread(struct thread *td)
572 {
573 struct proc *p = td->td_proc;
574
575 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
576 #ifdef KSE
577 thread_lock(td);
578 upcall_remove(td);
579 thread_unlock(td);
580 p->p_flag &= ~(P_SA|P_HADTHREADS);
581 td->td_mailbox = NULL;
582 td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
583 if (td->td_standin != NULL) {
584 thread_zombie(td->td_standin);
585 td->td_standin = NULL;
586 }
587 #else
588 p->p_flag &= ~P_HADTHREADS;
589 #endif
590 }
591
592 /*
593 * Called from:
594 * thread_exit()
595 */
596 void
597 thread_unlink(struct thread *td)
598 {
599 struct proc *p = td->td_proc;
600
601 PROC_SLOCK_ASSERT(p, MA_OWNED);
602 TAILQ_REMOVE(&p->p_threads, td, td_plist);
603 p->p_numthreads--;
604 /* could clear a few other things here */
605 /* Must NOT clear links to proc! */
606 }
607
608 /*
609 * Enforce single-threading.
610 *
611 * Returns 1 if the caller must abort (another thread is waiting to
612 * exit the process or similar). Process is locked!
613 * Returns 0 when you are successfully the only thread running.
614 * A process has successfully single threaded in the suspend mode when
615 * There are no threads in user mode. Threads in the kernel must be
616 * allowed to continue until they get to the user boundary. They may even
617 * copy out their return values and data before suspending. They may however be
618 * accelerated in reaching the user boundary as we will wake up
619 * any sleeping threads that are interruptable. (PCATCH).
620 */
621 int
622 thread_single(int mode)
623 {
624 struct thread *td;
625 struct thread *td2;
626 struct proc *p;
627 int remaining;
628
629 td = curthread;
630 p = td->td_proc;
631 mtx_assert(&Giant, MA_NOTOWNED);
632 PROC_LOCK_ASSERT(p, MA_OWNED);
633 KASSERT((td != NULL), ("curthread is NULL"));
634
635 if ((p->p_flag & P_HADTHREADS) == 0)
636 return (0);
637
638 /* Is someone already single threading? */
639 if (p->p_singlethread != NULL && p->p_singlethread != td)
640 return (1);
641
642 if (mode == SINGLE_EXIT) {
643 p->p_flag |= P_SINGLE_EXIT;
644 p->p_flag &= ~P_SINGLE_BOUNDARY;
645 } else {
646 p->p_flag &= ~P_SINGLE_EXIT;
647 if (mode == SINGLE_BOUNDARY)
648 p->p_flag |= P_SINGLE_BOUNDARY;
649 else
650 p->p_flag &= ~P_SINGLE_BOUNDARY;
651 }
652 p->p_flag |= P_STOPPED_SINGLE;
653 PROC_SLOCK(p);
654 p->p_singlethread = td;
655 if (mode == SINGLE_EXIT)
656 remaining = p->p_numthreads;
657 else if (mode == SINGLE_BOUNDARY)
658 remaining = p->p_numthreads - p->p_boundary_count;
659 else
660 remaining = p->p_numthreads - p->p_suspcount;
661 while (remaining != 1) {
662 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
663 goto stopme;
664 FOREACH_THREAD_IN_PROC(p, td2) {
665 if (td2 == td)
666 continue;
667 thread_lock(td2);
668 td2->td_flags |= TDF_ASTPENDING;
669 if (TD_IS_INHIBITED(td2)) {
670 switch (mode) {
671 case SINGLE_EXIT:
672 if (td->td_flags & TDF_DBSUSPEND)
673 td->td_flags &= ~TDF_DBSUSPEND;
674 if (TD_IS_SUSPENDED(td2))
675 thread_unsuspend_one(td2);
676 if (TD_ON_SLEEPQ(td2) &&
677 (td2->td_flags & TDF_SINTR))
678 sleepq_abort(td2, EINTR);
679 break;
680 case SINGLE_BOUNDARY:
681 if (TD_IS_SUSPENDED(td2) &&
682 !(td2->td_flags & TDF_BOUNDARY))
683 thread_unsuspend_one(td2);
684 if (TD_ON_SLEEPQ(td2) &&
685 (td2->td_flags & TDF_SINTR))
686 sleepq_abort(td2, ERESTART);
687 break;
688 default:
689 if (TD_IS_SUSPENDED(td2)) {
690 thread_unlock(td2);
691 continue;
692 }
693 /*
694 * maybe other inhibited states too?
695 */
696 if ((td2->td_flags & TDF_SINTR) &&
697 (td2->td_inhibitors &
698 (TDI_SLEEPING | TDI_SWAPPED)))
699 thread_suspend_one(td2);
700 break;
701 }
702 }
703 #ifdef SMP
704 else if (TD_IS_RUNNING(td2) && td != td2) {
705 forward_signal(td2);
706 }
707 #endif
708 thread_unlock(td2);
709 }
710 if (mode == SINGLE_EXIT)
711 remaining = p->p_numthreads;
712 else if (mode == SINGLE_BOUNDARY)
713 remaining = p->p_numthreads - p->p_boundary_count;
714 else
715 remaining = p->p_numthreads - p->p_suspcount;
716
717 /*
718 * Maybe we suspended some threads.. was it enough?
719 */
720 if (remaining == 1)
721 break;
722
723 stopme:
724 /*
725 * Wake us up when everyone else has suspended.
726 * In the mean time we suspend as well.
727 */
728 thread_suspend_switch(td);
729 if (mode == SINGLE_EXIT)
730 remaining = p->p_numthreads;
731 else if (mode == SINGLE_BOUNDARY)
732 remaining = p->p_numthreads - p->p_boundary_count;
733 else
734 remaining = p->p_numthreads - p->p_suspcount;
735 }
736 if (mode == SINGLE_EXIT) {
737 /*
738 * We have gotten rid of all the other threads and we
739 * are about to either exit or exec. In either case,
740 * we try our utmost to revert to being a non-threaded
741 * process.
742 */
743 p->p_singlethread = NULL;
744 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
745 thread_unthread(td);
746 }
747 PROC_SUNLOCK(p);
748 return (0);
749 }
750
751 /*
752 * Called in from locations that can safely check to see
753 * whether we have to suspend or at least throttle for a
754 * single-thread event (e.g. fork).
755 *
756 * Such locations include userret().
757 * If the "return_instead" argument is non zero, the thread must be able to
758 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
759 *
760 * The 'return_instead' argument tells the function if it may do a
761 * thread_exit() or suspend, or whether the caller must abort and back
762 * out instead.
763 *
764 * If the thread that set the single_threading request has set the
765 * P_SINGLE_EXIT bit in the process flags then this call will never return
766 * if 'return_instead' is false, but will exit.
767 *
768 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
769 *---------------+--------------------+---------------------
770 * 0 | returns 0 | returns 0 or 1
771 * | when ST ends | immediatly
772 *---------------+--------------------+---------------------
773 * 1 | thread exits | returns 1
774 * | | immediatly
775 * 0 = thread_exit() or suspension ok,
776 * other = return error instead of stopping the thread.
777 *
778 * While a full suspension is under effect, even a single threading
779 * thread would be suspended if it made this call (but it shouldn't).
780 * This call should only be made from places where
781 * thread_exit() would be safe as that may be the outcome unless
782 * return_instead is set.
783 */
784 int
785 thread_suspend_check(int return_instead)
786 {
787 struct thread *td;
788 struct proc *p;
789
790 td = curthread;
791 p = td->td_proc;
792 mtx_assert(&Giant, MA_NOTOWNED);
793 PROC_LOCK_ASSERT(p, MA_OWNED);
794 while (P_SHOULDSTOP(p) ||
795 ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
796 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
797 KASSERT(p->p_singlethread != NULL,
798 ("singlethread not set"));
799 /*
800 * The only suspension in action is a
801 * single-threading. Single threader need not stop.
802 * XXX Should be safe to access unlocked
803 * as it can only be set to be true by us.
804 */
805 if (p->p_singlethread == td)
806 return (0); /* Exempt from stopping. */
807 }
808 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
809 return (EINTR);
810
811 /* Should we goto user boundary if we didn't come from there? */
812 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
813 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
814 return (ERESTART);
815
816 /* If thread will exit, flush its pending signals */
817 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
818 sigqueue_flush(&td->td_sigqueue);
819
820 PROC_SLOCK(p);
821 thread_stopped(p);
822 /*
823 * If the process is waiting for us to exit,
824 * this thread should just suicide.
825 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
826 */
827 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
828 thread_exit();
829 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
830 if (p->p_numthreads == p->p_suspcount + 1) {
831 thread_lock(p->p_singlethread);
832 thread_unsuspend_one(p->p_singlethread);
833 thread_unlock(p->p_singlethread);
834 }
835 }
836 PROC_UNLOCK(p);
837 thread_lock(td);
838 /*
839 * When a thread suspends, it just
840 * gets taken off all queues.
841 */
842 thread_suspend_one(td);
843 if (return_instead == 0) {
844 p->p_boundary_count++;
845 td->td_flags |= TDF_BOUNDARY;
846 }
847 PROC_SUNLOCK(p);
848 mi_switch(SW_INVOL, NULL);
849 if (return_instead == 0)
850 td->td_flags &= ~TDF_BOUNDARY;
851 thread_unlock(td);
852 PROC_LOCK(p);
853 if (return_instead == 0)
854 p->p_boundary_count--;
855 }
856 return (0);
857 }
858
859 void
860 thread_suspend_switch(struct thread *td)
861 {
862 struct proc *p;
863
864 p = td->td_proc;
865 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
866 PROC_LOCK_ASSERT(p, MA_OWNED);
867 PROC_SLOCK_ASSERT(p, MA_OWNED);
868 /*
869 * We implement thread_suspend_one in stages here to avoid
870 * dropping the proc lock while the thread lock is owned.
871 */
872 thread_stopped(p);
873 p->p_suspcount++;
874 PROC_UNLOCK(p);
875 thread_lock(td);
876 sched_sleep(td);
877 TD_SET_SUSPENDED(td);
878 PROC_SUNLOCK(p);
879 DROP_GIANT();
880 mi_switch(SW_VOL, NULL);
881 thread_unlock(td);
882 PICKUP_GIANT();
883 PROC_LOCK(p);
884 PROC_SLOCK(p);
885 }
886
887 void
888 thread_suspend_one(struct thread *td)
889 {
890 struct proc *p = td->td_proc;
891
892 PROC_SLOCK_ASSERT(p, MA_OWNED);
893 THREAD_LOCK_ASSERT(td, MA_OWNED);
894 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
895 p->p_suspcount++;
896 sched_sleep(td);
897 TD_SET_SUSPENDED(td);
898 }
899
900 void
901 thread_unsuspend_one(struct thread *td)
902 {
903 struct proc *p = td->td_proc;
904
905 PROC_SLOCK_ASSERT(p, MA_OWNED);
906 THREAD_LOCK_ASSERT(td, MA_OWNED);
907 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
908 TD_CLR_SUSPENDED(td);
909 p->p_suspcount--;
910 setrunnable(td);
911 }
912
913 /*
914 * Allow all threads blocked by single threading to continue running.
915 */
916 void
917 thread_unsuspend(struct proc *p)
918 {
919 struct thread *td;
920
921 PROC_LOCK_ASSERT(p, MA_OWNED);
922 PROC_SLOCK_ASSERT(p, MA_OWNED);
923 if (!P_SHOULDSTOP(p)) {
924 FOREACH_THREAD_IN_PROC(p, td) {
925 thread_lock(td);
926 if (TD_IS_SUSPENDED(td)) {
927 thread_unsuspend_one(td);
928 }
929 thread_unlock(td);
930 }
931 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
932 (p->p_numthreads == p->p_suspcount)) {
933 /*
934 * Stopping everything also did the job for the single
935 * threading request. Now we've downgraded to single-threaded,
936 * let it continue.
937 */
938 thread_lock(p->p_singlethread);
939 thread_unsuspend_one(p->p_singlethread);
940 thread_unlock(p->p_singlethread);
941 }
942 }
943
944 /*
945 * End the single threading mode..
946 */
947 void
948 thread_single_end(void)
949 {
950 struct thread *td;
951 struct proc *p;
952
953 td = curthread;
954 p = td->td_proc;
955 PROC_LOCK_ASSERT(p, MA_OWNED);
956 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
957 PROC_SLOCK(p);
958 p->p_singlethread = NULL;
959 /*
960 * If there are other threads they mey now run,
961 * unless of course there is a blanket 'stop order'
962 * on the process. The single threader must be allowed
963 * to continue however as this is a bad place to stop.
964 */
965 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
966 FOREACH_THREAD_IN_PROC(p, td) {
967 thread_lock(td);
968 if (TD_IS_SUSPENDED(td)) {
969 thread_unsuspend_one(td);
970 }
971 thread_unlock(td);
972 }
973 }
974 PROC_SUNLOCK(p);
975 }
976
977 struct thread *
978 thread_find(struct proc *p, lwpid_t tid)
979 {
980 struct thread *td;
981
982 PROC_LOCK_ASSERT(p, MA_OWNED);
983 PROC_SLOCK(p);
984 FOREACH_THREAD_IN_PROC(p, td) {
985 if (td->td_tid == tid)
986 break;
987 }
988 PROC_SUNLOCK(p);
989 return (td);
990 }
Cache object: 5966ca4e40fc2f9751cac27b208697b6
|