1 /*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include "opt_witness.h"
30 #include "opt_kdtrace.h"
31 #include "opt_hwpmc_hooks.h"
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/10.2/sys/kern/kern_thread.c 283765 2015-05-30 08:54:42Z kib $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/rangelock.h>
43 #include <sys/resourcevar.h>
44 #include <sys/sdt.h>
45 #include <sys/smp.h>
46 #include <sys/sched.h>
47 #include <sys/sleepqueue.h>
48 #include <sys/selinfo.h>
49 #include <sys/turnstile.h>
50 #include <sys/ktr.h>
51 #include <sys/rwlock.h>
52 #include <sys/umtx.h>
53 #include <sys/cpuset.h>
54 #ifdef HWPMC_HOOKS
55 #include <sys/pmckern.h>
56 #endif
57
58 #include <security/audit/audit.h>
59
60 #include <vm/vm.h>
61 #include <vm/vm_extern.h>
62 #include <vm/uma.h>
63 #include <sys/eventhandler.h>
64
65 SDT_PROVIDER_DECLARE(proc);
66 SDT_PROBE_DEFINE(proc, , , lwp__exit);
67
68 /*
69 * thread related storage.
70 */
71 static uma_zone_t thread_zone;
72
73 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
74 static struct mtx zombie_lock;
75 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
76
77 static void thread_zombie(struct thread *);
78 static int thread_unsuspend_one(struct thread *td, struct proc *p,
79 bool boundary);
80
81 #define TID_BUFFER_SIZE 1024
82
83 struct mtx tid_lock;
84 static struct unrhdr *tid_unrhdr;
85 static lwpid_t tid_buffer[TID_BUFFER_SIZE];
86 static int tid_head, tid_tail;
87 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
88
89 struct tidhashhead *tidhashtbl;
90 u_long tidhash;
91 struct rwlock tidhash_lock;
92
93 static lwpid_t
94 tid_alloc(void)
95 {
96 lwpid_t tid;
97
98 tid = alloc_unr(tid_unrhdr);
99 if (tid != -1)
100 return (tid);
101 mtx_lock(&tid_lock);
102 if (tid_head == tid_tail) {
103 mtx_unlock(&tid_lock);
104 return (-1);
105 }
106 tid = tid_buffer[tid_head];
107 tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
108 mtx_unlock(&tid_lock);
109 return (tid);
110 }
111
112 static void
113 tid_free(lwpid_t tid)
114 {
115 lwpid_t tmp_tid = -1;
116
117 mtx_lock(&tid_lock);
118 if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
119 tmp_tid = tid_buffer[tid_head];
120 tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
121 }
122 tid_buffer[tid_tail] = tid;
123 tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
124 mtx_unlock(&tid_lock);
125 if (tmp_tid != -1)
126 free_unr(tid_unrhdr, tmp_tid);
127 }
128
129 /*
130 * Prepare a thread for use.
131 */
132 static int
133 thread_ctor(void *mem, int size, void *arg, int flags)
134 {
135 struct thread *td;
136
137 td = (struct thread *)mem;
138 td->td_state = TDS_INACTIVE;
139 td->td_oncpu = NOCPU;
140
141 td->td_tid = tid_alloc();
142
143 /*
144 * Note that td_critnest begins life as 1 because the thread is not
145 * running and is thereby implicitly waiting to be on the receiving
146 * end of a context switch.
147 */
148 td->td_critnest = 1;
149 td->td_lend_user_pri = PRI_MAX;
150 EVENTHANDLER_INVOKE(thread_ctor, td);
151 #ifdef AUDIT
152 audit_thread_alloc(td);
153 #endif
154 umtx_thread_alloc(td);
155 return (0);
156 }
157
158 /*
159 * Reclaim a thread after use.
160 */
161 static void
162 thread_dtor(void *mem, int size, void *arg)
163 {
164 struct thread *td;
165
166 td = (struct thread *)mem;
167
168 #ifdef INVARIANTS
169 /* Verify that this thread is in a safe state to free. */
170 switch (td->td_state) {
171 case TDS_INHIBITED:
172 case TDS_RUNNING:
173 case TDS_CAN_RUN:
174 case TDS_RUNQ:
175 /*
176 * We must never unlink a thread that is in one of
177 * these states, because it is currently active.
178 */
179 panic("bad state for thread unlinking");
180 /* NOTREACHED */
181 case TDS_INACTIVE:
182 break;
183 default:
184 panic("bad thread state");
185 /* NOTREACHED */
186 }
187 #endif
188 #ifdef AUDIT
189 audit_thread_free(td);
190 #endif
191 /* Free all OSD associated to this thread. */
192 osd_thread_exit(td);
193
194 EVENTHANDLER_INVOKE(thread_dtor, td);
195 tid_free(td->td_tid);
196 }
197
198 /*
199 * Initialize type-stable parts of a thread (when newly created).
200 */
201 static int
202 thread_init(void *mem, int size, int flags)
203 {
204 struct thread *td;
205
206 td = (struct thread *)mem;
207
208 td->td_sleepqueue = sleepq_alloc();
209 td->td_turnstile = turnstile_alloc();
210 td->td_rlqe = NULL;
211 EVENTHANDLER_INVOKE(thread_init, td);
212 td->td_sched = (struct td_sched *)&td[1];
213 umtx_thread_init(td);
214 td->td_kstack = 0;
215 td->td_sel = NULL;
216 return (0);
217 }
218
219 /*
220 * Tear down type-stable parts of a thread (just before being discarded).
221 */
222 static void
223 thread_fini(void *mem, int size)
224 {
225 struct thread *td;
226
227 td = (struct thread *)mem;
228 EVENTHANDLER_INVOKE(thread_fini, td);
229 rlqentry_free(td->td_rlqe);
230 turnstile_free(td->td_turnstile);
231 sleepq_free(td->td_sleepqueue);
232 umtx_thread_fini(td);
233 seltdfini(td);
234 }
235
236 /*
237 * For a newly created process,
238 * link up all the structures and its initial threads etc.
239 * called from:
240 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
241 * proc_dtor() (should go away)
242 * proc_init()
243 */
244 void
245 proc_linkup0(struct proc *p, struct thread *td)
246 {
247 TAILQ_INIT(&p->p_threads); /* all threads in proc */
248 proc_linkup(p, td);
249 }
250
251 void
252 proc_linkup(struct proc *p, struct thread *td)
253 {
254
255 sigqueue_init(&p->p_sigqueue, p);
256 p->p_ksi = ksiginfo_alloc(1);
257 if (p->p_ksi != NULL) {
258 /* XXX p_ksi may be null if ksiginfo zone is not ready */
259 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
260 }
261 LIST_INIT(&p->p_mqnotifier);
262 p->p_numthreads = 0;
263 thread_link(td, p);
264 }
265
266 /*
267 * Initialize global thread allocation resources.
268 */
269 void
270 threadinit(void)
271 {
272
273 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
274
275 /*
276 * pid_max cannot be greater than PID_MAX.
277 * leave one number for thread0.
278 */
279 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
280
281 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
282 thread_ctor, thread_dtor, thread_init, thread_fini,
283 16 - 1, 0);
284 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
285 rw_init(&tidhash_lock, "tidhash");
286 }
287
288 /*
289 * Place an unused thread on the zombie list.
290 * Use the slpq as that must be unused by now.
291 */
292 void
293 thread_zombie(struct thread *td)
294 {
295 mtx_lock_spin(&zombie_lock);
296 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
297 mtx_unlock_spin(&zombie_lock);
298 }
299
300 /*
301 * Release a thread that has exited after cpu_throw().
302 */
303 void
304 thread_stash(struct thread *td)
305 {
306 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
307 thread_zombie(td);
308 }
309
310 /*
311 * Reap zombie resources.
312 */
313 void
314 thread_reap(void)
315 {
316 struct thread *td_first, *td_next;
317
318 /*
319 * Don't even bother to lock if none at this instant,
320 * we really don't care about the next instant..
321 */
322 if (!TAILQ_EMPTY(&zombie_threads)) {
323 mtx_lock_spin(&zombie_lock);
324 td_first = TAILQ_FIRST(&zombie_threads);
325 if (td_first)
326 TAILQ_INIT(&zombie_threads);
327 mtx_unlock_spin(&zombie_lock);
328 while (td_first) {
329 td_next = TAILQ_NEXT(td_first, td_slpq);
330 if (td_first->td_ucred)
331 crfree(td_first->td_ucred);
332 thread_free(td_first);
333 td_first = td_next;
334 }
335 }
336 }
337
338 /*
339 * Allocate a thread.
340 */
341 struct thread *
342 thread_alloc(int pages)
343 {
344 struct thread *td;
345
346 thread_reap(); /* check if any zombies to get */
347
348 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
349 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
350 if (!vm_thread_new(td, pages)) {
351 uma_zfree(thread_zone, td);
352 return (NULL);
353 }
354 cpu_thread_alloc(td);
355 return (td);
356 }
357
358 int
359 thread_alloc_stack(struct thread *td, int pages)
360 {
361
362 KASSERT(td->td_kstack == 0,
363 ("thread_alloc_stack called on a thread with kstack"));
364 if (!vm_thread_new(td, pages))
365 return (0);
366 cpu_thread_alloc(td);
367 return (1);
368 }
369
370 /*
371 * Deallocate a thread.
372 */
373 void
374 thread_free(struct thread *td)
375 {
376
377 lock_profile_thread_exit(td);
378 if (td->td_cpuset)
379 cpuset_rel(td->td_cpuset);
380 td->td_cpuset = NULL;
381 cpu_thread_free(td);
382 if (td->td_kstack != 0)
383 vm_thread_dispose(td);
384 uma_zfree(thread_zone, td);
385 }
386
387 /*
388 * Discard the current thread and exit from its context.
389 * Always called with scheduler locked.
390 *
391 * Because we can't free a thread while we're operating under its context,
392 * push the current thread into our CPU's deadthread holder. This means
393 * we needn't worry about someone else grabbing our context before we
394 * do a cpu_throw().
395 */
396 void
397 thread_exit(void)
398 {
399 uint64_t runtime, new_switchtime;
400 struct thread *td;
401 struct thread *td2;
402 struct proc *p;
403 int wakeup_swapper;
404
405 td = curthread;
406 p = td->td_proc;
407
408 PROC_SLOCK_ASSERT(p, MA_OWNED);
409 mtx_assert(&Giant, MA_NOTOWNED);
410
411 PROC_LOCK_ASSERT(p, MA_OWNED);
412 KASSERT(p != NULL, ("thread exiting without a process"));
413 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
414 (long)p->p_pid, td->td_name);
415 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
416
417 #ifdef AUDIT
418 AUDIT_SYSCALL_EXIT(0, td);
419 #endif
420 /*
421 * drop FPU & debug register state storage, or any other
422 * architecture specific resources that
423 * would not be on a new untouched process.
424 */
425 cpu_thread_exit(td); /* XXXSMP */
426
427 /*
428 * The last thread is left attached to the process
429 * So that the whole bundle gets recycled. Skip
430 * all this stuff if we never had threads.
431 * EXIT clears all sign of other threads when
432 * it goes to single threading, so the last thread always
433 * takes the short path.
434 */
435 if (p->p_flag & P_HADTHREADS) {
436 if (p->p_numthreads > 1) {
437 atomic_add_int(&td->td_proc->p_exitthreads, 1);
438 thread_unlink(td);
439 td2 = FIRST_THREAD_IN_PROC(p);
440 sched_exit_thread(td2, td);
441
442 /*
443 * The test below is NOT true if we are the
444 * sole exiting thread. P_STOPPED_SINGLE is unset
445 * in exit1() after it is the only survivor.
446 */
447 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
448 if (p->p_numthreads == p->p_suspcount) {
449 thread_lock(p->p_singlethread);
450 wakeup_swapper = thread_unsuspend_one(
451 p->p_singlethread, p, false);
452 thread_unlock(p->p_singlethread);
453 if (wakeup_swapper)
454 kick_proc0();
455 }
456 }
457
458 PCPU_SET(deadthread, td);
459 } else {
460 /*
461 * The last thread is exiting.. but not through exit()
462 */
463 panic ("thread_exit: Last thread exiting on its own");
464 }
465 }
466 #ifdef HWPMC_HOOKS
467 /*
468 * If this thread is part of a process that is being tracked by hwpmc(4),
469 * inform the module of the thread's impending exit.
470 */
471 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
472 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
473 #endif
474 PROC_UNLOCK(p);
475
476 /* Do the same timestamp bookkeeping that mi_switch() would do. */
477 new_switchtime = cpu_ticks();
478 runtime = new_switchtime - PCPU_GET(switchtime);
479 td->td_runtime += runtime;
480 td->td_incruntime += runtime;
481 PCPU_SET(switchtime, new_switchtime);
482 PCPU_SET(switchticks, ticks);
483 PCPU_INC(cnt.v_swtch);
484
485 /* Save our resource usage in our process. */
486 td->td_ru.ru_nvcsw++;
487 ruxagg(p, td);
488 rucollect(&p->p_ru, &td->td_ru);
489
490 thread_lock(td);
491 PROC_SUNLOCK(p);
492 td->td_state = TDS_INACTIVE;
493 #ifdef WITNESS
494 witness_thread_exit(td);
495 #endif
496 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
497 sched_throw(td);
498 panic("I'm a teapot!");
499 /* NOTREACHED */
500 }
501
502 /*
503 * Do any thread specific cleanups that may be needed in wait()
504 * called with Giant, proc and schedlock not held.
505 */
506 void
507 thread_wait(struct proc *p)
508 {
509 struct thread *td;
510
511 mtx_assert(&Giant, MA_NOTOWNED);
512 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
513 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
514 td = FIRST_THREAD_IN_PROC(p);
515 /* Lock the last thread so we spin until it exits cpu_throw(). */
516 thread_lock(td);
517 thread_unlock(td);
518 lock_profile_thread_exit(td);
519 cpuset_rel(td->td_cpuset);
520 td->td_cpuset = NULL;
521 cpu_thread_clean(td);
522 crfree(td->td_ucred);
523 thread_reap(); /* check for zombie threads etc. */
524 }
525
526 /*
527 * Link a thread to a process.
528 * set up anything that needs to be initialized for it to
529 * be used by the process.
530 */
531 void
532 thread_link(struct thread *td, struct proc *p)
533 {
534
535 /*
536 * XXX This can't be enabled because it's called for proc0 before
537 * its lock has been created.
538 * PROC_LOCK_ASSERT(p, MA_OWNED);
539 */
540 td->td_state = TDS_INACTIVE;
541 td->td_proc = p;
542 td->td_flags = TDF_INMEM;
543
544 LIST_INIT(&td->td_contested);
545 LIST_INIT(&td->td_lprof[0]);
546 LIST_INIT(&td->td_lprof[1]);
547 sigqueue_init(&td->td_sigqueue, p);
548 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
549 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
550 p->p_numthreads++;
551 }
552
553 /*
554 * Called from:
555 * thread_exit()
556 */
557 void
558 thread_unlink(struct thread *td)
559 {
560 struct proc *p = td->td_proc;
561
562 PROC_LOCK_ASSERT(p, MA_OWNED);
563 TAILQ_REMOVE(&p->p_threads, td, td_plist);
564 p->p_numthreads--;
565 /* could clear a few other things here */
566 /* Must NOT clear links to proc! */
567 }
568
569 static int
570 calc_remaining(struct proc *p, int mode)
571 {
572 int remaining;
573
574 PROC_LOCK_ASSERT(p, MA_OWNED);
575 PROC_SLOCK_ASSERT(p, MA_OWNED);
576 if (mode == SINGLE_EXIT)
577 remaining = p->p_numthreads;
578 else if (mode == SINGLE_BOUNDARY)
579 remaining = p->p_numthreads - p->p_boundary_count;
580 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
581 remaining = p->p_numthreads - p->p_suspcount;
582 else
583 panic("calc_remaining: wrong mode %d", mode);
584 return (remaining);
585 }
586
587 static int
588 remain_for_mode(int mode)
589 {
590
591 return (mode == SINGLE_ALLPROC ? 0 : 1);
592 }
593
594 static int
595 weed_inhib(int mode, struct thread *td2, struct proc *p)
596 {
597 int wakeup_swapper;
598
599 PROC_LOCK_ASSERT(p, MA_OWNED);
600 PROC_SLOCK_ASSERT(p, MA_OWNED);
601 THREAD_LOCK_ASSERT(td2, MA_OWNED);
602
603 wakeup_swapper = 0;
604 switch (mode) {
605 case SINGLE_EXIT:
606 if (TD_IS_SUSPENDED(td2))
607 wakeup_swapper |= thread_unsuspend_one(td2, p, true);
608 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
609 wakeup_swapper |= sleepq_abort(td2, EINTR);
610 break;
611 case SINGLE_BOUNDARY:
612 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
613 wakeup_swapper |= thread_unsuspend_one(td2, p, false);
614 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
615 wakeup_swapper |= sleepq_abort(td2, ERESTART);
616 break;
617 case SINGLE_NO_EXIT:
618 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
619 wakeup_swapper |= thread_unsuspend_one(td2, p, false);
620 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
621 wakeup_swapper |= sleepq_abort(td2, ERESTART);
622 break;
623 case SINGLE_ALLPROC:
624 /*
625 * ALLPROC suspend tries to avoid spurious EINTR for
626 * threads sleeping interruptable, by suspending the
627 * thread directly, similarly to sig_suspend_threads().
628 * Since such sleep is not performed at the user
629 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
630 * is used to avoid immediate un-suspend.
631 */
632 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
633 TDF_ALLPROCSUSP)) == 0)
634 wakeup_swapper |= thread_unsuspend_one(td2, p, false);
635 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
636 if ((td2->td_flags & TDF_SBDRY) == 0) {
637 thread_suspend_one(td2);
638 td2->td_flags |= TDF_ALLPROCSUSP;
639 } else {
640 wakeup_swapper |= sleepq_abort(td2, ERESTART);
641 }
642 }
643 break;
644 }
645 return (wakeup_swapper);
646 }
647
648 /*
649 * Enforce single-threading.
650 *
651 * Returns 1 if the caller must abort (another thread is waiting to
652 * exit the process or similar). Process is locked!
653 * Returns 0 when you are successfully the only thread running.
654 * A process has successfully single threaded in the suspend mode when
655 * There are no threads in user mode. Threads in the kernel must be
656 * allowed to continue until they get to the user boundary. They may even
657 * copy out their return values and data before suspending. They may however be
658 * accelerated in reaching the user boundary as we will wake up
659 * any sleeping threads that are interruptable. (PCATCH).
660 */
661 int
662 thread_single(struct proc *p, int mode)
663 {
664 struct thread *td;
665 struct thread *td2;
666 int remaining, wakeup_swapper;
667
668 td = curthread;
669 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
670 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
671 ("invalid mode %d", mode));
672 /*
673 * If allowing non-ALLPROC singlethreading for non-curproc
674 * callers, calc_remaining() and remain_for_mode() should be
675 * adjusted to also account for td->td_proc != p. For now
676 * this is not implemented because it is not used.
677 */
678 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
679 (mode != SINGLE_ALLPROC && td->td_proc == p),
680 ("mode %d proc %p curproc %p", mode, p, td->td_proc));
681 mtx_assert(&Giant, MA_NOTOWNED);
682 PROC_LOCK_ASSERT(p, MA_OWNED);
683
684 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
685 return (0);
686
687 /* Is someone already single threading? */
688 if (p->p_singlethread != NULL && p->p_singlethread != td)
689 return (1);
690
691 if (mode == SINGLE_EXIT) {
692 p->p_flag |= P_SINGLE_EXIT;
693 p->p_flag &= ~P_SINGLE_BOUNDARY;
694 } else {
695 p->p_flag &= ~P_SINGLE_EXIT;
696 if (mode == SINGLE_BOUNDARY)
697 p->p_flag |= P_SINGLE_BOUNDARY;
698 else
699 p->p_flag &= ~P_SINGLE_BOUNDARY;
700 }
701 if (mode == SINGLE_ALLPROC)
702 p->p_flag |= P_TOTAL_STOP;
703 p->p_flag |= P_STOPPED_SINGLE;
704 PROC_SLOCK(p);
705 p->p_singlethread = td;
706 remaining = calc_remaining(p, mode);
707 while (remaining != remain_for_mode(mode)) {
708 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
709 goto stopme;
710 wakeup_swapper = 0;
711 FOREACH_THREAD_IN_PROC(p, td2) {
712 if (td2 == td)
713 continue;
714 thread_lock(td2);
715 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
716 if (TD_IS_INHIBITED(td2)) {
717 wakeup_swapper |= weed_inhib(mode, td2, p);
718 #ifdef SMP
719 } else if (TD_IS_RUNNING(td2) && td != td2) {
720 forward_signal(td2);
721 #endif
722 }
723 thread_unlock(td2);
724 }
725 if (wakeup_swapper)
726 kick_proc0();
727 remaining = calc_remaining(p, mode);
728
729 /*
730 * Maybe we suspended some threads.. was it enough?
731 */
732 if (remaining == remain_for_mode(mode))
733 break;
734
735 stopme:
736 /*
737 * Wake us up when everyone else has suspended.
738 * In the mean time we suspend as well.
739 */
740 thread_suspend_switch(td, p);
741 remaining = calc_remaining(p, mode);
742 }
743 if (mode == SINGLE_EXIT) {
744 /*
745 * Convert the process to an unthreaded process. The
746 * SINGLE_EXIT is called by exit1() or execve(), in
747 * both cases other threads must be retired.
748 */
749 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
750 p->p_singlethread = NULL;
751 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
752
753 /*
754 * Wait for any remaining threads to exit cpu_throw().
755 */
756 while (p->p_exitthreads != 0) {
757 PROC_SUNLOCK(p);
758 PROC_UNLOCK(p);
759 sched_relinquish(td);
760 PROC_LOCK(p);
761 PROC_SLOCK(p);
762 }
763 } else if (mode == SINGLE_BOUNDARY) {
764 /*
765 * Wait until all suspended threads are removed from
766 * the processors. The thread_suspend_check()
767 * increments p_boundary_count while it is still
768 * running, which makes it possible for the execve()
769 * to destroy vmspace while our other threads are
770 * still using the address space.
771 *
772 * We lock the thread, which is only allowed to
773 * succeed after context switch code finished using
774 * the address space.
775 */
776 FOREACH_THREAD_IN_PROC(p, td2) {
777 if (td2 == td)
778 continue;
779 thread_lock(td2);
780 KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
781 ("td %p not on boundary", td2));
782 KASSERT(TD_IS_SUSPENDED(td2),
783 ("td %p is not suspended", td2));
784 thread_unlock(td2);
785 }
786 }
787 PROC_SUNLOCK(p);
788 return (0);
789 }
790
791 bool
792 thread_suspend_check_needed(void)
793 {
794 struct proc *p;
795 struct thread *td;
796
797 td = curthread;
798 p = td->td_proc;
799 PROC_LOCK_ASSERT(p, MA_OWNED);
800 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
801 (td->td_dbgflags & TDB_SUSPEND) != 0));
802 }
803
804 /*
805 * Called in from locations that can safely check to see
806 * whether we have to suspend or at least throttle for a
807 * single-thread event (e.g. fork).
808 *
809 * Such locations include userret().
810 * If the "return_instead" argument is non zero, the thread must be able to
811 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
812 *
813 * The 'return_instead' argument tells the function if it may do a
814 * thread_exit() or suspend, or whether the caller must abort and back
815 * out instead.
816 *
817 * If the thread that set the single_threading request has set the
818 * P_SINGLE_EXIT bit in the process flags then this call will never return
819 * if 'return_instead' is false, but will exit.
820 *
821 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
822 *---------------+--------------------+---------------------
823 * 0 | returns 0 | returns 0 or 1
824 * | when ST ends | immediately
825 *---------------+--------------------+---------------------
826 * 1 | thread exits | returns 1
827 * | | immediately
828 * 0 = thread_exit() or suspension ok,
829 * other = return error instead of stopping the thread.
830 *
831 * While a full suspension is under effect, even a single threading
832 * thread would be suspended if it made this call (but it shouldn't).
833 * This call should only be made from places where
834 * thread_exit() would be safe as that may be the outcome unless
835 * return_instead is set.
836 */
837 int
838 thread_suspend_check(int return_instead)
839 {
840 struct thread *td;
841 struct proc *p;
842 int wakeup_swapper;
843
844 td = curthread;
845 p = td->td_proc;
846 mtx_assert(&Giant, MA_NOTOWNED);
847 PROC_LOCK_ASSERT(p, MA_OWNED);
848 while (thread_suspend_check_needed()) {
849 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
850 KASSERT(p->p_singlethread != NULL,
851 ("singlethread not set"));
852 /*
853 * The only suspension in action is a
854 * single-threading. Single threader need not stop.
855 * XXX Should be safe to access unlocked
856 * as it can only be set to be true by us.
857 */
858 if (p->p_singlethread == td)
859 return (0); /* Exempt from stopping. */
860 }
861 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
862 return (EINTR);
863
864 /* Should we goto user boundary if we didn't come from there? */
865 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
866 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
867 return (ERESTART);
868
869 /*
870 * Ignore suspend requests if they are deferred.
871 */
872 if ((td->td_flags & TDF_SBDRY) != 0) {
873 KASSERT(return_instead,
874 ("TDF_SBDRY set for unsafe thread_suspend_check"));
875 return (0);
876 }
877
878 /*
879 * If the process is waiting for us to exit,
880 * this thread should just suicide.
881 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
882 */
883 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
884 PROC_UNLOCK(p);
885 tidhash_remove(td);
886 PROC_LOCK(p);
887 tdsigcleanup(td);
888 umtx_thread_exit(td);
889 PROC_SLOCK(p);
890 thread_stopped(p);
891 thread_exit();
892 }
893
894 PROC_SLOCK(p);
895 thread_stopped(p);
896 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
897 if (p->p_numthreads == p->p_suspcount + 1) {
898 thread_lock(p->p_singlethread);
899 wakeup_swapper = thread_unsuspend_one(
900 p->p_singlethread, p, false);
901 thread_unlock(p->p_singlethread);
902 if (wakeup_swapper)
903 kick_proc0();
904 }
905 }
906 PROC_UNLOCK(p);
907 thread_lock(td);
908 /*
909 * When a thread suspends, it just
910 * gets taken off all queues.
911 */
912 thread_suspend_one(td);
913 if (return_instead == 0) {
914 p->p_boundary_count++;
915 td->td_flags |= TDF_BOUNDARY;
916 }
917 PROC_SUNLOCK(p);
918 mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
919 thread_unlock(td);
920 PROC_LOCK(p);
921 }
922 return (0);
923 }
924
925 void
926 thread_suspend_switch(struct thread *td, struct proc *p)
927 {
928
929 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
930 PROC_LOCK_ASSERT(p, MA_OWNED);
931 PROC_SLOCK_ASSERT(p, MA_OWNED);
932 /*
933 * We implement thread_suspend_one in stages here to avoid
934 * dropping the proc lock while the thread lock is owned.
935 */
936 if (p == td->td_proc) {
937 thread_stopped(p);
938 p->p_suspcount++;
939 }
940 PROC_UNLOCK(p);
941 thread_lock(td);
942 td->td_flags &= ~TDF_NEEDSUSPCHK;
943 TD_SET_SUSPENDED(td);
944 sched_sleep(td, 0);
945 PROC_SUNLOCK(p);
946 DROP_GIANT();
947 mi_switch(SW_VOL | SWT_SUSPEND, NULL);
948 thread_unlock(td);
949 PICKUP_GIANT();
950 PROC_LOCK(p);
951 PROC_SLOCK(p);
952 }
953
954 void
955 thread_suspend_one(struct thread *td)
956 {
957 struct proc *p;
958
959 p = td->td_proc;
960 PROC_SLOCK_ASSERT(p, MA_OWNED);
961 THREAD_LOCK_ASSERT(td, MA_OWNED);
962 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
963 p->p_suspcount++;
964 td->td_flags &= ~TDF_NEEDSUSPCHK;
965 TD_SET_SUSPENDED(td);
966 sched_sleep(td, 0);
967 }
968
969 static int
970 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
971 {
972
973 THREAD_LOCK_ASSERT(td, MA_OWNED);
974 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
975 TD_CLR_SUSPENDED(td);
976 td->td_flags &= ~TDF_ALLPROCSUSP;
977 if (td->td_proc == p) {
978 PROC_SLOCK_ASSERT(p, MA_OWNED);
979 p->p_suspcount--;
980 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
981 td->td_flags &= ~TDF_BOUNDARY;
982 p->p_boundary_count--;
983 }
984 }
985 return (setrunnable(td));
986 }
987
988 /*
989 * Allow all threads blocked by single threading to continue running.
990 */
991 void
992 thread_unsuspend(struct proc *p)
993 {
994 struct thread *td;
995 int wakeup_swapper;
996
997 PROC_LOCK_ASSERT(p, MA_OWNED);
998 PROC_SLOCK_ASSERT(p, MA_OWNED);
999 wakeup_swapper = 0;
1000 if (!P_SHOULDSTOP(p)) {
1001 FOREACH_THREAD_IN_PROC(p, td) {
1002 thread_lock(td);
1003 if (TD_IS_SUSPENDED(td)) {
1004 wakeup_swapper |= thread_unsuspend_one(td, p,
1005 true);
1006 }
1007 thread_unlock(td);
1008 }
1009 } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1010 p->p_numthreads == p->p_suspcount) {
1011 /*
1012 * Stopping everything also did the job for the single
1013 * threading request. Now we've downgraded to single-threaded,
1014 * let it continue.
1015 */
1016 if (p->p_singlethread->td_proc == p) {
1017 thread_lock(p->p_singlethread);
1018 wakeup_swapper = thread_unsuspend_one(
1019 p->p_singlethread, p, false);
1020 thread_unlock(p->p_singlethread);
1021 }
1022 }
1023 if (wakeup_swapper)
1024 kick_proc0();
1025 }
1026
1027 /*
1028 * End the single threading mode..
1029 */
1030 void
1031 thread_single_end(struct proc *p, int mode)
1032 {
1033 struct thread *td;
1034 int wakeup_swapper;
1035
1036 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1037 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1038 ("invalid mode %d", mode));
1039 PROC_LOCK_ASSERT(p, MA_OWNED);
1040 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1041 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1042 ("mode %d does not match P_TOTAL_STOP", mode));
1043 KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1044 ("thread_single_end from other thread %p %p",
1045 curthread, p->p_singlethread));
1046 KASSERT(mode != SINGLE_BOUNDARY ||
1047 (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1048 ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1049 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1050 P_TOTAL_STOP);
1051 PROC_SLOCK(p);
1052 p->p_singlethread = NULL;
1053 wakeup_swapper = 0;
1054 /*
1055 * If there are other threads they may now run,
1056 * unless of course there is a blanket 'stop order'
1057 * on the process. The single threader must be allowed
1058 * to continue however as this is a bad place to stop.
1059 */
1060 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1061 FOREACH_THREAD_IN_PROC(p, td) {
1062 thread_lock(td);
1063 if (TD_IS_SUSPENDED(td)) {
1064 wakeup_swapper |= thread_unsuspend_one(td, p,
1065 mode == SINGLE_BOUNDARY);
1066 }
1067 thread_unlock(td);
1068 }
1069 }
1070 KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1071 ("inconsistent boundary count %d", p->p_boundary_count));
1072 PROC_SUNLOCK(p);
1073 if (wakeup_swapper)
1074 kick_proc0();
1075 }
1076
1077 struct thread *
1078 thread_find(struct proc *p, lwpid_t tid)
1079 {
1080 struct thread *td;
1081
1082 PROC_LOCK_ASSERT(p, MA_OWNED);
1083 FOREACH_THREAD_IN_PROC(p, td) {
1084 if (td->td_tid == tid)
1085 break;
1086 }
1087 return (td);
1088 }
1089
1090 /* Locate a thread by number; return with proc lock held. */
1091 struct thread *
1092 tdfind(lwpid_t tid, pid_t pid)
1093 {
1094 #define RUN_THRESH 16
1095 struct thread *td;
1096 int run = 0;
1097
1098 rw_rlock(&tidhash_lock);
1099 LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1100 if (td->td_tid == tid) {
1101 if (pid != -1 && td->td_proc->p_pid != pid) {
1102 td = NULL;
1103 break;
1104 }
1105 PROC_LOCK(td->td_proc);
1106 if (td->td_proc->p_state == PRS_NEW) {
1107 PROC_UNLOCK(td->td_proc);
1108 td = NULL;
1109 break;
1110 }
1111 if (run > RUN_THRESH) {
1112 if (rw_try_upgrade(&tidhash_lock)) {
1113 LIST_REMOVE(td, td_hash);
1114 LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1115 td, td_hash);
1116 rw_wunlock(&tidhash_lock);
1117 return (td);
1118 }
1119 }
1120 break;
1121 }
1122 run++;
1123 }
1124 rw_runlock(&tidhash_lock);
1125 return (td);
1126 }
1127
1128 void
1129 tidhash_add(struct thread *td)
1130 {
1131 rw_wlock(&tidhash_lock);
1132 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1133 rw_wunlock(&tidhash_lock);
1134 }
1135
1136 void
1137 tidhash_remove(struct thread *td)
1138 {
1139 rw_wlock(&tidhash_lock);
1140 LIST_REMOVE(td, td_hash);
1141 rw_wunlock(&tidhash_lock);
1142 }
Cache object: 8a6d7d212ecbac12781e02a99dee6282
|