1 /*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/resourcevar.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/sched.h>
42 #include <sys/sleepqueue.h>
43 #include <sys/turnstile.h>
44 #include <sys/ktr.h>
45 #include <sys/umtx.h>
46 #include <sys/cpuset.h>
47
48 #include <security/audit/audit.h>
49
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52 #include <vm/uma.h>
53 #include <sys/eventhandler.h>
54
55 /*
56 * thread related storage.
57 */
58 static uma_zone_t thread_zone;
59
60 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
61
62 int max_threads_per_proc = 1500;
63 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
64 &max_threads_per_proc, 0, "Limit on threads per proc");
65
66 int max_threads_hits;
67 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
68 &max_threads_hits, 0, "");
69
70 #ifdef KSE
71 int virtual_cpu;
72
73 #endif
74 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
75 static struct mtx zombie_lock;
76 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
77
78 static void thread_zombie(struct thread *);
79
80 #ifdef KSE
81 static int
82 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
83 {
84 int error, new_val;
85 int def_val;
86
87 def_val = mp_ncpus;
88 if (virtual_cpu == 0)
89 new_val = def_val;
90 else
91 new_val = virtual_cpu;
92 error = sysctl_handle_int(oidp, &new_val, 0, req);
93 if (error != 0 || req->newptr == NULL)
94 return (error);
95 if (new_val < 0)
96 return (EINVAL);
97 virtual_cpu = new_val;
98 return (0);
99 }
100
101 /* DEBUG ONLY */
102 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
103 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
104 "debug virtual cpus");
105 #endif
106
107 struct mtx tid_lock;
108 static struct unrhdr *tid_unrhdr;
109
110 /*
111 * Prepare a thread for use.
112 */
113 static int
114 thread_ctor(void *mem, int size, void *arg, int flags)
115 {
116 struct thread *td;
117
118 td = (struct thread *)mem;
119 td->td_state = TDS_INACTIVE;
120 td->td_oncpu = NOCPU;
121
122 td->td_tid = alloc_unr(tid_unrhdr);
123 td->td_syscalls = 0;
124 td->td_incruntime = 0;
125
126 /*
127 * Note that td_critnest begins life as 1 because the thread is not
128 * running and is thereby implicitly waiting to be on the receiving
129 * end of a context switch.
130 */
131 td->td_critnest = 1;
132 EVENTHANDLER_INVOKE(thread_ctor, td);
133 #ifdef AUDIT
134 audit_thread_alloc(td);
135 #endif
136 umtx_thread_alloc(td);
137 return (0);
138 }
139
140 /*
141 * Reclaim a thread after use.
142 */
143 static void
144 thread_dtor(void *mem, int size, void *arg)
145 {
146 struct thread *td;
147
148 td = (struct thread *)mem;
149
150 #ifdef INVARIANTS
151 /* Verify that this thread is in a safe state to free. */
152 switch (td->td_state) {
153 case TDS_INHIBITED:
154 case TDS_RUNNING:
155 case TDS_CAN_RUN:
156 case TDS_RUNQ:
157 /*
158 * We must never unlink a thread that is in one of
159 * these states, because it is currently active.
160 */
161 panic("bad state for thread unlinking");
162 /* NOTREACHED */
163 case TDS_INACTIVE:
164 break;
165 default:
166 panic("bad thread state");
167 /* NOTREACHED */
168 }
169 #endif
170 #ifdef AUDIT
171 audit_thread_free(td);
172 #endif
173 EVENTHANDLER_INVOKE(thread_dtor, td);
174 free_unr(tid_unrhdr, td->td_tid);
175 sched_newthread(td);
176 }
177
178 /*
179 * Initialize type-stable parts of a thread (when newly created).
180 */
181 static int
182 thread_init(void *mem, int size, int flags)
183 {
184 struct thread *td;
185
186 td = (struct thread *)mem;
187
188 td->td_sleepqueue = sleepq_alloc();
189 td->td_turnstile = turnstile_alloc();
190 EVENTHANDLER_INVOKE(thread_init, td);
191 td->td_sched = (struct td_sched *)&td[1];
192 sched_newthread(td);
193 umtx_thread_init(td);
194 td->td_kstack = 0;
195 td->td_fpop = NULL;
196 return (0);
197 }
198
199 /*
200 * Tear down type-stable parts of a thread (just before being discarded).
201 */
202 static void
203 thread_fini(void *mem, int size)
204 {
205 struct thread *td;
206
207 td = (struct thread *)mem;
208 EVENTHANDLER_INVOKE(thread_fini, td);
209 turnstile_free(td->td_turnstile);
210 sleepq_free(td->td_sleepqueue);
211 umtx_thread_fini(td);
212 }
213
214 /*
215 * For a newly created process,
216 * link up all the structures and its initial threads etc.
217 * called from:
218 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
219 * proc_dtor() (should go away)
220 * proc_init()
221 */
222 void
223 proc_linkup0(struct proc *p, struct thread *td)
224 {
225 TAILQ_INIT(&p->p_threads); /* all threads in proc */
226 proc_linkup(p, td);
227 }
228
229 void
230 proc_linkup(struct proc *p, struct thread *td)
231 {
232
233 #ifdef KSE
234 TAILQ_INIT(&p->p_upcalls); /* upcall list */
235 #endif
236 sigqueue_init(&p->p_sigqueue, p);
237 p->p_ksi = ksiginfo_alloc(1);
238 if (p->p_ksi != NULL) {
239 /* XXX p_ksi may be null if ksiginfo zone is not ready */
240 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
241 }
242 LIST_INIT(&p->p_mqnotifier);
243 p->p_numthreads = 0;
244 thread_link(td, p);
245 }
246
247 /*
248 * Initialize global thread allocation resources.
249 */
250 void
251 threadinit(void)
252 {
253
254 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
255 tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock);
256
257 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
258 thread_ctor, thread_dtor, thread_init, thread_fini,
259 16 - 1, 0);
260 #ifdef KSE
261 kseinit(); /* set up kse specific stuff e.g. upcall zone*/
262 #endif
263 }
264
265 /*
266 * Place an unused thread on the zombie list.
267 * Use the slpq as that must be unused by now.
268 */
269 void
270 thread_zombie(struct thread *td)
271 {
272 mtx_lock_spin(&zombie_lock);
273 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
274 mtx_unlock_spin(&zombie_lock);
275 }
276
277 /*
278 * Release a thread that has exited after cpu_throw().
279 */
280 void
281 thread_stash(struct thread *td)
282 {
283 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
284 thread_zombie(td);
285 }
286
287 /*
288 * Reap zombie kse resource.
289 */
290 void
291 thread_reap(void)
292 {
293 struct thread *td_first, *td_next;
294
295 /*
296 * Don't even bother to lock if none at this instant,
297 * we really don't care about the next instant..
298 */
299 if (!TAILQ_EMPTY(&zombie_threads)) {
300 mtx_lock_spin(&zombie_lock);
301 td_first = TAILQ_FIRST(&zombie_threads);
302 if (td_first)
303 TAILQ_INIT(&zombie_threads);
304 mtx_unlock_spin(&zombie_lock);
305 while (td_first) {
306 td_next = TAILQ_NEXT(td_first, td_slpq);
307 if (td_first->td_ucred)
308 crfree(td_first->td_ucred);
309 thread_free(td_first);
310 td_first = td_next;
311 }
312 }
313 #ifdef KSE
314 upcall_reap();
315 #endif
316 }
317
318 /*
319 * Allocate a thread.
320 */
321 struct thread *
322 thread_alloc(void)
323 {
324 struct thread *td;
325
326 thread_reap(); /* check if any zombies to get */
327
328 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
329 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
330 if (!vm_thread_new(td, 0)) {
331 uma_zfree(thread_zone, td);
332 return (NULL);
333 }
334 cpu_thread_alloc(td);
335 return (td);
336 }
337
338
339 /*
340 * Deallocate a thread.
341 */
342 void
343 thread_free(struct thread *td)
344 {
345 if (td->td_cpuset)
346 cpuset_rel(td->td_cpuset);
347 td->td_cpuset = NULL;
348 cpu_thread_free(td);
349 if (td->td_altkstack != 0)
350 vm_thread_dispose_altkstack(td);
351 if (td->td_kstack != 0)
352 vm_thread_dispose(td);
353 uma_zfree(thread_zone, td);
354 }
355
356 /*
357 * Discard the current thread and exit from its context.
358 * Always called with scheduler locked.
359 *
360 * Because we can't free a thread while we're operating under its context,
361 * push the current thread into our CPU's deadthread holder. This means
362 * we needn't worry about someone else grabbing our context before we
363 * do a cpu_throw(). This may not be needed now as we are under schedlock.
364 * Maybe we can just do a thread_stash() as thr_exit1 does.
365 */
366 /* XXX
367 * libthr expects its thread exit to return for the last
368 * thread, meaning that the program is back to non-threaded
369 * mode I guess. Because we do this (cpu_throw) unconditionally
370 * here, they have their own version of it. (thr_exit1())
371 * that doesn't do it all if this was the last thread.
372 * It is also called from thread_suspend_check().
373 * Of course in the end, they end up coming here through exit1
374 * anyhow.. After fixing 'thr' to play by the rules we should be able
375 * to merge these two functions together.
376 *
377 * called from:
378 * exit1()
379 * kse_exit()
380 * thr_exit()
381 * ifdef KSE
382 * thread_user_enter()
383 * thread_userret()
384 * endif
385 * thread_suspend_check()
386 */
387 void
388 thread_exit(void)
389 {
390 uint64_t new_switchtime;
391 struct thread *td;
392 struct thread *td2;
393 struct proc *p;
394 int wakeup_swapper;
395
396 td = curthread;
397 p = td->td_proc;
398
399 PROC_SLOCK_ASSERT(p, MA_OWNED);
400 mtx_assert(&Giant, MA_NOTOWNED);
401
402 PROC_LOCK_ASSERT(p, MA_OWNED);
403 KASSERT(p != NULL, ("thread exiting without a process"));
404 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
405 (long)p->p_pid, p->p_comm);
406 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
407
408 #ifdef AUDIT
409 AUDIT_SYSCALL_EXIT(0, td);
410 #endif
411
412 #ifdef KSE
413 if (td->td_standin != NULL) {
414 /*
415 * Note that we don't need to free the cred here as it
416 * is done in thread_reap().
417 */
418 thread_zombie(td->td_standin);
419 td->td_standin = NULL;
420 }
421 #endif
422
423 umtx_thread_exit(td);
424
425 /*
426 * drop FPU & debug register state storage, or any other
427 * architecture specific resources that
428 * would not be on a new untouched process.
429 */
430 cpu_thread_exit(td); /* XXXSMP */
431
432 /* Do the same timestamp bookkeeping that mi_switch() would do. */
433 new_switchtime = cpu_ticks();
434 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
435 PCPU_SET(switchtime, new_switchtime);
436 PCPU_SET(switchticks, ticks);
437 PCPU_INC(cnt.v_swtch);
438 /* Save our resource usage in our process. */
439 td->td_ru.ru_nvcsw++;
440 rucollect(&p->p_ru, &td->td_ru);
441 /*
442 * The last thread is left attached to the process
443 * So that the whole bundle gets recycled. Skip
444 * all this stuff if we never had threads.
445 * EXIT clears all sign of other threads when
446 * it goes to single threading, so the last thread always
447 * takes the short path.
448 */
449 if (p->p_flag & P_HADTHREADS) {
450 if (p->p_numthreads > 1) {
451 thread_lock(td);
452 #ifdef KSE
453 kse_unlink(td);
454 #else
455 thread_unlink(td);
456 #endif
457 thread_unlock(td);
458 td2 = FIRST_THREAD_IN_PROC(p);
459 sched_exit_thread(td2, td);
460
461 /*
462 * The test below is NOT true if we are the
463 * sole exiting thread. P_STOPPED_SNGL is unset
464 * in exit1() after it is the only survivor.
465 */
466 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
467 if (p->p_numthreads == p->p_suspcount) {
468 thread_lock(p->p_singlethread);
469 wakeup_swapper = thread_unsuspend_one(
470 p->p_singlethread);
471 thread_unlock(p->p_singlethread);
472 if (wakeup_swapper)
473 kick_proc0();
474 }
475 }
476
477 atomic_add_int(&td->td_proc->p_exitthreads, 1);
478 PCPU_SET(deadthread, td);
479 } else {
480 /*
481 * The last thread is exiting.. but not through exit()
482 * what should we do?
483 * Theoretically this can't happen
484 * exit1() - clears threading flags before coming here
485 * kse_exit() - treats last thread specially
486 * thr_exit() - treats last thread specially
487 * ifdef KSE
488 * thread_user_enter() - only if more exist
489 * thread_userret() - only if more exist
490 * endif
491 * thread_suspend_check() - only if more exist
492 */
493 panic ("thread_exit: Last thread exiting on its own");
494 }
495 }
496 PROC_UNLOCK(p);
497 thread_lock(td);
498 /* Save our tick information with both the thread and proc locked */
499 ruxagg(&p->p_rux, td);
500 PROC_SUNLOCK(p);
501 td->td_state = TDS_INACTIVE;
502 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
503 sched_throw(td);
504 panic("I'm a teapot!");
505 /* NOTREACHED */
506 }
507
508 /*
509 * Do any thread specific cleanups that may be needed in wait()
510 * called with Giant, proc and schedlock not held.
511 */
512 void
513 thread_wait(struct proc *p)
514 {
515 struct thread *td;
516
517 mtx_assert(&Giant, MA_NOTOWNED);
518 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
519 td = FIRST_THREAD_IN_PROC(p);
520 #ifdef KSE
521 if (td->td_standin != NULL) {
522 if (td->td_standin->td_ucred != NULL) {
523 crfree(td->td_standin->td_ucred);
524 td->td_standin->td_ucred = NULL;
525 }
526 thread_free(td->td_standin);
527 td->td_standin = NULL;
528 }
529 #endif
530 /* Lock the last thread so we spin until it exits cpu_throw(). */
531 thread_lock(td);
532 thread_unlock(td);
533 /* Wait for any remaining threads to exit cpu_throw(). */
534 while (p->p_exitthreads)
535 sched_relinquish(curthread);
536 cpuset_rel(td->td_cpuset);
537 td->td_cpuset = NULL;
538 cpu_thread_clean(td);
539 crfree(td->td_ucred);
540 thread_reap(); /* check for zombie threads etc. */
541 }
542
543 /*
544 * Link a thread to a process.
545 * set up anything that needs to be initialized for it to
546 * be used by the process.
547 *
548 * Note that we do not link to the proc's ucred here.
549 * The thread is linked as if running but no KSE assigned.
550 * Called from:
551 * proc_linkup()
552 * thread_schedule_upcall()
553 * thr_create()
554 */
555 void
556 thread_link(struct thread *td, struct proc *p)
557 {
558
559 /*
560 * XXX This can't be enabled because it's called for proc0 before
561 * it's spinlock has been created.
562 * PROC_SLOCK_ASSERT(p, MA_OWNED);
563 */
564 td->td_state = TDS_INACTIVE;
565 td->td_proc = p;
566 td->td_flags = TDF_INMEM;
567
568 LIST_INIT(&td->td_contested);
569 sigqueue_init(&td->td_sigqueue, p);
570 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
571 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
572 p->p_numthreads++;
573 }
574
575 /*
576 * Convert a process with one thread to an unthreaded process.
577 * Called from:
578 * thread_single(exit) (called from execve and exit)
579 * kse_exit() XXX may need cleaning up wrt KSE stuff
580 */
581 void
582 thread_unthread(struct thread *td)
583 {
584 struct proc *p = td->td_proc;
585
586 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
587 #ifdef KSE
588 thread_lock(td);
589 upcall_remove(td);
590 thread_unlock(td);
591 p->p_flag &= ~(P_SA|P_HADTHREADS);
592 td->td_mailbox = NULL;
593 td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
594 if (td->td_standin != NULL) {
595 thread_zombie(td->td_standin);
596 td->td_standin = NULL;
597 }
598 #else
599 p->p_flag &= ~P_HADTHREADS;
600 #endif
601 }
602
603 /*
604 * Called from:
605 * thread_exit()
606 */
607 void
608 thread_unlink(struct thread *td)
609 {
610 struct proc *p = td->td_proc;
611
612 PROC_SLOCK_ASSERT(p, MA_OWNED);
613 TAILQ_REMOVE(&p->p_threads, td, td_plist);
614 p->p_numthreads--;
615 /* could clear a few other things here */
616 /* Must NOT clear links to proc! */
617 }
618
619 /*
620 * Enforce single-threading.
621 *
622 * Returns 1 if the caller must abort (another thread is waiting to
623 * exit the process or similar). Process is locked!
624 * Returns 0 when you are successfully the only thread running.
625 * A process has successfully single threaded in the suspend mode when
626 * There are no threads in user mode. Threads in the kernel must be
627 * allowed to continue until they get to the user boundary. They may even
628 * copy out their return values and data before suspending. They may however be
629 * accelerated in reaching the user boundary as we will wake up
630 * any sleeping threads that are interruptable. (PCATCH).
631 */
632 int
633 thread_single(int mode)
634 {
635 struct thread *td;
636 struct thread *td2;
637 struct proc *p;
638 int remaining, wakeup_swapper;
639
640 td = curthread;
641 p = td->td_proc;
642 mtx_assert(&Giant, MA_NOTOWNED);
643 PROC_LOCK_ASSERT(p, MA_OWNED);
644 KASSERT((td != NULL), ("curthread is NULL"));
645
646 if ((p->p_flag & P_HADTHREADS) == 0)
647 return (0);
648
649 /* Is someone already single threading? */
650 if (p->p_singlethread != NULL && p->p_singlethread != td)
651 return (1);
652
653 if (mode == SINGLE_EXIT) {
654 p->p_flag |= P_SINGLE_EXIT;
655 p->p_flag &= ~P_SINGLE_BOUNDARY;
656 } else {
657 p->p_flag &= ~P_SINGLE_EXIT;
658 if (mode == SINGLE_BOUNDARY)
659 p->p_flag |= P_SINGLE_BOUNDARY;
660 else
661 p->p_flag &= ~P_SINGLE_BOUNDARY;
662 }
663 p->p_flag |= P_STOPPED_SINGLE;
664 PROC_SLOCK(p);
665 p->p_singlethread = td;
666 if (mode == SINGLE_EXIT)
667 remaining = p->p_numthreads;
668 else if (mode == SINGLE_BOUNDARY)
669 remaining = p->p_numthreads - p->p_boundary_count;
670 else
671 remaining = p->p_numthreads - p->p_suspcount;
672 while (remaining != 1) {
673 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
674 goto stopme;
675 wakeup_swapper = 0;
676 FOREACH_THREAD_IN_PROC(p, td2) {
677 if (td2 == td)
678 continue;
679 thread_lock(td2);
680 td2->td_flags |= TDF_ASTPENDING;
681 if (TD_IS_INHIBITED(td2)) {
682 switch (mode) {
683 case SINGLE_EXIT:
684 if (td->td_flags & TDF_DBSUSPEND)
685 td->td_flags &= ~TDF_DBSUSPEND;
686 if (TD_IS_SUSPENDED(td2))
687 wakeup_swapper |=
688 thread_unsuspend_one(td2);
689 if (TD_ON_SLEEPQ(td2) &&
690 (td2->td_flags & TDF_SINTR))
691 wakeup_swapper |=
692 sleepq_abort(td2, EINTR);
693 break;
694 case SINGLE_BOUNDARY:
695 if (TD_IS_SUSPENDED(td2) &&
696 !(td2->td_flags & TDF_BOUNDARY))
697 wakeup_swapper |=
698 thread_unsuspend_one(td2);
699 if (TD_ON_SLEEPQ(td2) &&
700 (td2->td_flags & TDF_SINTR))
701 wakeup_swapper |=
702 sleepq_abort(td2, ERESTART);
703 break;
704 default:
705 if (TD_IS_SUSPENDED(td2)) {
706 thread_unlock(td2);
707 continue;
708 }
709 /*
710 * maybe other inhibited states too?
711 */
712 if ((td2->td_flags & TDF_SINTR) &&
713 (td2->td_inhibitors &
714 (TDI_SLEEPING | TDI_SWAPPED)))
715 thread_suspend_one(td2);
716 break;
717 }
718 }
719 #ifdef SMP
720 else if (TD_IS_RUNNING(td2) && td != td2) {
721 forward_signal(td2);
722 }
723 #endif
724 thread_unlock(td2);
725 }
726 if (wakeup_swapper)
727 kick_proc0();
728 if (mode == SINGLE_EXIT)
729 remaining = p->p_numthreads;
730 else if (mode == SINGLE_BOUNDARY)
731 remaining = p->p_numthreads - p->p_boundary_count;
732 else
733 remaining = p->p_numthreads - p->p_suspcount;
734
735 /*
736 * Maybe we suspended some threads.. was it enough?
737 */
738 if (remaining == 1)
739 break;
740
741 stopme:
742 /*
743 * Wake us up when everyone else has suspended.
744 * In the mean time we suspend as well.
745 */
746 thread_suspend_switch(td);
747 if (mode == SINGLE_EXIT)
748 remaining = p->p_numthreads;
749 else if (mode == SINGLE_BOUNDARY)
750 remaining = p->p_numthreads - p->p_boundary_count;
751 else
752 remaining = p->p_numthreads - p->p_suspcount;
753 }
754 if (mode == SINGLE_EXIT) {
755 /*
756 * We have gotten rid of all the other threads and we
757 * are about to either exit or exec. In either case,
758 * we try our utmost to revert to being a non-threaded
759 * process.
760 */
761 p->p_singlethread = NULL;
762 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
763 thread_unthread(td);
764 }
765 PROC_SUNLOCK(p);
766 return (0);
767 }
768
769 /*
770 * Called in from locations that can safely check to see
771 * whether we have to suspend or at least throttle for a
772 * single-thread event (e.g. fork).
773 *
774 * Such locations include userret().
775 * If the "return_instead" argument is non zero, the thread must be able to
776 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
777 *
778 * The 'return_instead' argument tells the function if it may do a
779 * thread_exit() or suspend, or whether the caller must abort and back
780 * out instead.
781 *
782 * If the thread that set the single_threading request has set the
783 * P_SINGLE_EXIT bit in the process flags then this call will never return
784 * if 'return_instead' is false, but will exit.
785 *
786 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
787 *---------------+--------------------+---------------------
788 * 0 | returns 0 | returns 0 or 1
789 * | when ST ends | immediatly
790 *---------------+--------------------+---------------------
791 * 1 | thread exits | returns 1
792 * | | immediatly
793 * 0 = thread_exit() or suspension ok,
794 * other = return error instead of stopping the thread.
795 *
796 * While a full suspension is under effect, even a single threading
797 * thread would be suspended if it made this call (but it shouldn't).
798 * This call should only be made from places where
799 * thread_exit() would be safe as that may be the outcome unless
800 * return_instead is set.
801 */
802 int
803 thread_suspend_check(int return_instead)
804 {
805 struct thread *td;
806 struct proc *p;
807 int wakeup_swapper;
808
809 td = curthread;
810 p = td->td_proc;
811 mtx_assert(&Giant, MA_NOTOWNED);
812 PROC_LOCK_ASSERT(p, MA_OWNED);
813 while (P_SHOULDSTOP(p) ||
814 ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
815 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
816 KASSERT(p->p_singlethread != NULL,
817 ("singlethread not set"));
818 /*
819 * The only suspension in action is a
820 * single-threading. Single threader need not stop.
821 * XXX Should be safe to access unlocked
822 * as it can only be set to be true by us.
823 */
824 if (p->p_singlethread == td)
825 return (0); /* Exempt from stopping. */
826 }
827 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
828 return (EINTR);
829
830 /* Should we goto user boundary if we didn't come from there? */
831 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
832 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
833 return (ERESTART);
834
835 /* If thread will exit, flush its pending signals */
836 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
837 sigqueue_flush(&td->td_sigqueue);
838
839 PROC_SLOCK(p);
840 thread_stopped(p);
841 /*
842 * If the process is waiting for us to exit,
843 * this thread should just suicide.
844 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
845 */
846 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
847 thread_exit();
848 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
849 if (p->p_numthreads == p->p_suspcount + 1) {
850 thread_lock(p->p_singlethread);
851 wakeup_swapper =
852 thread_unsuspend_one(p->p_singlethread);
853 thread_unlock(p->p_singlethread);
854 if (wakeup_swapper)
855 kick_proc0();
856 }
857 }
858 PROC_UNLOCK(p);
859 thread_lock(td);
860 /*
861 * When a thread suspends, it just
862 * gets taken off all queues.
863 */
864 thread_suspend_one(td);
865 if (return_instead == 0) {
866 p->p_boundary_count++;
867 td->td_flags |= TDF_BOUNDARY;
868 }
869 PROC_SUNLOCK(p);
870 mi_switch(SW_INVOL, NULL);
871 if (return_instead == 0)
872 td->td_flags &= ~TDF_BOUNDARY;
873 thread_unlock(td);
874 PROC_LOCK(p);
875 if (return_instead == 0)
876 p->p_boundary_count--;
877 }
878 return (0);
879 }
880
881 void
882 thread_suspend_switch(struct thread *td)
883 {
884 struct proc *p;
885
886 p = td->td_proc;
887 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
888 PROC_LOCK_ASSERT(p, MA_OWNED);
889 PROC_SLOCK_ASSERT(p, MA_OWNED);
890 /*
891 * We implement thread_suspend_one in stages here to avoid
892 * dropping the proc lock while the thread lock is owned.
893 */
894 thread_stopped(p);
895 p->p_suspcount++;
896 PROC_UNLOCK(p);
897 thread_lock(td);
898 sched_sleep(td);
899 TD_SET_SUSPENDED(td);
900 PROC_SUNLOCK(p);
901 DROP_GIANT();
902 mi_switch(SW_VOL, NULL);
903 thread_unlock(td);
904 PICKUP_GIANT();
905 PROC_LOCK(p);
906 PROC_SLOCK(p);
907 }
908
909 void
910 thread_suspend_one(struct thread *td)
911 {
912 struct proc *p = td->td_proc;
913
914 PROC_SLOCK_ASSERT(p, MA_OWNED);
915 THREAD_LOCK_ASSERT(td, MA_OWNED);
916 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
917 p->p_suspcount++;
918 sched_sleep(td);
919 TD_SET_SUSPENDED(td);
920 }
921
922 int
923 thread_unsuspend_one(struct thread *td)
924 {
925 struct proc *p = td->td_proc;
926
927 PROC_SLOCK_ASSERT(p, MA_OWNED);
928 THREAD_LOCK_ASSERT(td, MA_OWNED);
929 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
930 TD_CLR_SUSPENDED(td);
931 p->p_suspcount--;
932 return (setrunnable(td));
933 }
934
935 /*
936 * Allow all threads blocked by single threading to continue running.
937 */
938 void
939 thread_unsuspend(struct proc *p)
940 {
941 struct thread *td;
942 int wakeup_swapper;
943
944 PROC_LOCK_ASSERT(p, MA_OWNED);
945 PROC_SLOCK_ASSERT(p, MA_OWNED);
946 wakeup_swapper = 0;
947 if (!P_SHOULDSTOP(p)) {
948 FOREACH_THREAD_IN_PROC(p, td) {
949 thread_lock(td);
950 if (TD_IS_SUSPENDED(td)) {
951 wakeup_swapper |= thread_unsuspend_one(td);
952 }
953 thread_unlock(td);
954 }
955 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
956 (p->p_numthreads == p->p_suspcount)) {
957 /*
958 * Stopping everything also did the job for the single
959 * threading request. Now we've downgraded to single-threaded,
960 * let it continue.
961 */
962 thread_lock(p->p_singlethread);
963 wakeup_swapper = thread_unsuspend_one(p->p_singlethread);
964 thread_unlock(p->p_singlethread);
965 }
966 if (wakeup_swapper)
967 kick_proc0();
968 }
969
970 /*
971 * End the single threading mode..
972 */
973 void
974 thread_single_end(void)
975 {
976 struct thread *td;
977 struct proc *p;
978 int wakeup_swapper;
979
980 td = curthread;
981 p = td->td_proc;
982 PROC_LOCK_ASSERT(p, MA_OWNED);
983 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
984 PROC_SLOCK(p);
985 p->p_singlethread = NULL;
986 wakeup_swapper = 0;
987 /*
988 * If there are other threads they may now run,
989 * unless of course there is a blanket 'stop order'
990 * on the process. The single threader must be allowed
991 * to continue however as this is a bad place to stop.
992 */
993 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
994 FOREACH_THREAD_IN_PROC(p, td) {
995 thread_lock(td);
996 if (TD_IS_SUSPENDED(td)) {
997 wakeup_swapper |= thread_unsuspend_one(td);
998 }
999 thread_unlock(td);
1000 }
1001 }
1002 PROC_SUNLOCK(p);
1003 if (wakeup_swapper)
1004 kick_proc0();
1005 }
1006
1007 struct thread *
1008 thread_find(struct proc *p, lwpid_t tid)
1009 {
1010 struct thread *td;
1011
1012 PROC_LOCK_ASSERT(p, MA_OWNED);
1013 PROC_SLOCK(p);
1014 FOREACH_THREAD_IN_PROC(p, td) {
1015 if (td->td_tid == tid)
1016 break;
1017 }
1018 PROC_SUNLOCK(p);
1019 return (td);
1020 }
Cache object: 6f5c8ec489342a047ee469bc8dd8c2dc
|