1 /*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include "opt_witness.h"
30 #include "opt_kdtrace.h"
31 #include "opt_hwpmc_hooks.h"
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/10.3/sys/kern/kern_thread.c 294614 2016-01-23 01:21:11Z jhb $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/rangelock.h>
43 #include <sys/resourcevar.h>
44 #include <sys/sdt.h>
45 #include <sys/smp.h>
46 #include <sys/sched.h>
47 #include <sys/sleepqueue.h>
48 #include <sys/selinfo.h>
49 #include <sys/syscallsubr.h>
50 #include <sys/sysent.h>
51 #include <sys/turnstile.h>
52 #include <sys/ktr.h>
53 #include <sys/rwlock.h>
54 #include <sys/umtx.h>
55 #include <sys/cpuset.h>
56 #ifdef HWPMC_HOOKS
57 #include <sys/pmckern.h>
58 #endif
59
60 #include <security/audit/audit.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 #include <vm/uma.h>
65 #include <sys/eventhandler.h>
66
67 SDT_PROVIDER_DECLARE(proc);
68 SDT_PROBE_DEFINE(proc, , , lwp__exit);
69
70 /*
71 * thread related storage.
72 */
73 static uma_zone_t thread_zone;
74
75 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
76 static struct mtx zombie_lock;
77 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
78
79 static void thread_zombie(struct thread *);
80 static int thread_unsuspend_one(struct thread *td, struct proc *p,
81 bool boundary);
82
83 #define TID_BUFFER_SIZE 1024
84
85 struct mtx tid_lock;
86 static struct unrhdr *tid_unrhdr;
87 static lwpid_t tid_buffer[TID_BUFFER_SIZE];
88 static int tid_head, tid_tail;
89 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
90
91 struct tidhashhead *tidhashtbl;
92 u_long tidhash;
93 struct rwlock tidhash_lock;
94
95 static lwpid_t
96 tid_alloc(void)
97 {
98 lwpid_t tid;
99
100 tid = alloc_unr(tid_unrhdr);
101 if (tid != -1)
102 return (tid);
103 mtx_lock(&tid_lock);
104 if (tid_head == tid_tail) {
105 mtx_unlock(&tid_lock);
106 return (-1);
107 }
108 tid = tid_buffer[tid_head];
109 tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
110 mtx_unlock(&tid_lock);
111 return (tid);
112 }
113
114 static void
115 tid_free(lwpid_t tid)
116 {
117 lwpid_t tmp_tid = -1;
118
119 mtx_lock(&tid_lock);
120 if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
121 tmp_tid = tid_buffer[tid_head];
122 tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
123 }
124 tid_buffer[tid_tail] = tid;
125 tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
126 mtx_unlock(&tid_lock);
127 if (tmp_tid != -1)
128 free_unr(tid_unrhdr, tmp_tid);
129 }
130
131 /*
132 * Prepare a thread for use.
133 */
134 static int
135 thread_ctor(void *mem, int size, void *arg, int flags)
136 {
137 struct thread *td;
138
139 td = (struct thread *)mem;
140 td->td_state = TDS_INACTIVE;
141 td->td_oncpu = NOCPU;
142
143 td->td_tid = tid_alloc();
144
145 /*
146 * Note that td_critnest begins life as 1 because the thread is not
147 * running and is thereby implicitly waiting to be on the receiving
148 * end of a context switch.
149 */
150 td->td_critnest = 1;
151 td->td_lend_user_pri = PRI_MAX;
152 EVENTHANDLER_INVOKE(thread_ctor, td);
153 #ifdef AUDIT
154 audit_thread_alloc(td);
155 #endif
156 umtx_thread_alloc(td);
157 return (0);
158 }
159
160 /*
161 * Reclaim a thread after use.
162 */
163 static void
164 thread_dtor(void *mem, int size, void *arg)
165 {
166 struct thread *td;
167
168 td = (struct thread *)mem;
169
170 #ifdef INVARIANTS
171 /* Verify that this thread is in a safe state to free. */
172 switch (td->td_state) {
173 case TDS_INHIBITED:
174 case TDS_RUNNING:
175 case TDS_CAN_RUN:
176 case TDS_RUNQ:
177 /*
178 * We must never unlink a thread that is in one of
179 * these states, because it is currently active.
180 */
181 panic("bad state for thread unlinking");
182 /* NOTREACHED */
183 case TDS_INACTIVE:
184 break;
185 default:
186 panic("bad thread state");
187 /* NOTREACHED */
188 }
189 #endif
190 #ifdef AUDIT
191 audit_thread_free(td);
192 #endif
193 /* Free all OSD associated to this thread. */
194 osd_thread_exit(td);
195
196 EVENTHANDLER_INVOKE(thread_dtor, td);
197 tid_free(td->td_tid);
198 }
199
200 /*
201 * Initialize type-stable parts of a thread (when newly created).
202 */
203 static int
204 thread_init(void *mem, int size, int flags)
205 {
206 struct thread *td;
207
208 td = (struct thread *)mem;
209
210 td->td_sleepqueue = sleepq_alloc();
211 td->td_turnstile = turnstile_alloc();
212 td->td_rlqe = NULL;
213 EVENTHANDLER_INVOKE(thread_init, td);
214 td->td_sched = (struct td_sched *)&td[1];
215 umtx_thread_init(td);
216 td->td_kstack = 0;
217 td->td_sel = NULL;
218 return (0);
219 }
220
221 /*
222 * Tear down type-stable parts of a thread (just before being discarded).
223 */
224 static void
225 thread_fini(void *mem, int size)
226 {
227 struct thread *td;
228
229 td = (struct thread *)mem;
230 EVENTHANDLER_INVOKE(thread_fini, td);
231 rlqentry_free(td->td_rlqe);
232 turnstile_free(td->td_turnstile);
233 sleepq_free(td->td_sleepqueue);
234 umtx_thread_fini(td);
235 seltdfini(td);
236 }
237
238 /*
239 * For a newly created process,
240 * link up all the structures and its initial threads etc.
241 * called from:
242 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
243 * proc_dtor() (should go away)
244 * proc_init()
245 */
246 void
247 proc_linkup0(struct proc *p, struct thread *td)
248 {
249 TAILQ_INIT(&p->p_threads); /* all threads in proc */
250 proc_linkup(p, td);
251 }
252
253 void
254 proc_linkup(struct proc *p, struct thread *td)
255 {
256
257 sigqueue_init(&p->p_sigqueue, p);
258 p->p_ksi = ksiginfo_alloc(1);
259 if (p->p_ksi != NULL) {
260 /* XXX p_ksi may be null if ksiginfo zone is not ready */
261 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
262 }
263 LIST_INIT(&p->p_mqnotifier);
264 p->p_numthreads = 0;
265 thread_link(td, p);
266 }
267
268 /*
269 * Initialize global thread allocation resources.
270 */
271 void
272 threadinit(void)
273 {
274
275 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
276
277 /*
278 * pid_max cannot be greater than PID_MAX.
279 * leave one number for thread0.
280 */
281 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
282
283 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
284 thread_ctor, thread_dtor, thread_init, thread_fini,
285 16 - 1, UMA_ZONE_NOFREE);
286 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
287 rw_init(&tidhash_lock, "tidhash");
288 }
289
290 /*
291 * Place an unused thread on the zombie list.
292 * Use the slpq as that must be unused by now.
293 */
294 void
295 thread_zombie(struct thread *td)
296 {
297 mtx_lock_spin(&zombie_lock);
298 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
299 mtx_unlock_spin(&zombie_lock);
300 }
301
302 /*
303 * Release a thread that has exited after cpu_throw().
304 */
305 void
306 thread_stash(struct thread *td)
307 {
308 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
309 thread_zombie(td);
310 }
311
312 /*
313 * Reap zombie resources.
314 */
315 void
316 thread_reap(void)
317 {
318 struct thread *td_first, *td_next;
319
320 /*
321 * Don't even bother to lock if none at this instant,
322 * we really don't care about the next instant..
323 */
324 if (!TAILQ_EMPTY(&zombie_threads)) {
325 mtx_lock_spin(&zombie_lock);
326 td_first = TAILQ_FIRST(&zombie_threads);
327 if (td_first)
328 TAILQ_INIT(&zombie_threads);
329 mtx_unlock_spin(&zombie_lock);
330 while (td_first) {
331 td_next = TAILQ_NEXT(td_first, td_slpq);
332 if (td_first->td_ucred)
333 crfree(td_first->td_ucred);
334 thread_free(td_first);
335 td_first = td_next;
336 }
337 }
338 }
339
340 /*
341 * Allocate a thread.
342 */
343 struct thread *
344 thread_alloc(int pages)
345 {
346 struct thread *td;
347
348 thread_reap(); /* check if any zombies to get */
349
350 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
351 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
352 if (!vm_thread_new(td, pages)) {
353 uma_zfree(thread_zone, td);
354 return (NULL);
355 }
356 cpu_thread_alloc(td);
357 return (td);
358 }
359
360 int
361 thread_alloc_stack(struct thread *td, int pages)
362 {
363
364 KASSERT(td->td_kstack == 0,
365 ("thread_alloc_stack called on a thread with kstack"));
366 if (!vm_thread_new(td, pages))
367 return (0);
368 cpu_thread_alloc(td);
369 return (1);
370 }
371
372 /*
373 * Deallocate a thread.
374 */
375 void
376 thread_free(struct thread *td)
377 {
378
379 lock_profile_thread_exit(td);
380 if (td->td_cpuset)
381 cpuset_rel(td->td_cpuset);
382 td->td_cpuset = NULL;
383 cpu_thread_free(td);
384 if (td->td_kstack != 0)
385 vm_thread_dispose(td);
386 uma_zfree(thread_zone, td);
387 }
388
389 /*
390 * Discard the current thread and exit from its context.
391 * Always called with scheduler locked.
392 *
393 * Because we can't free a thread while we're operating under its context,
394 * push the current thread into our CPU's deadthread holder. This means
395 * we needn't worry about someone else grabbing our context before we
396 * do a cpu_throw().
397 */
398 void
399 thread_exit(void)
400 {
401 uint64_t runtime, new_switchtime;
402 struct thread *td;
403 struct thread *td2;
404 struct proc *p;
405 int wakeup_swapper;
406
407 td = curthread;
408 p = td->td_proc;
409
410 PROC_SLOCK_ASSERT(p, MA_OWNED);
411 mtx_assert(&Giant, MA_NOTOWNED);
412
413 PROC_LOCK_ASSERT(p, MA_OWNED);
414 KASSERT(p != NULL, ("thread exiting without a process"));
415 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
416 (long)p->p_pid, td->td_name);
417 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
418
419 #ifdef AUDIT
420 AUDIT_SYSCALL_EXIT(0, td);
421 #endif
422 /*
423 * drop FPU & debug register state storage, or any other
424 * architecture specific resources that
425 * would not be on a new untouched process.
426 */
427 cpu_thread_exit(td); /* XXXSMP */
428
429 /*
430 * The last thread is left attached to the process
431 * So that the whole bundle gets recycled. Skip
432 * all this stuff if we never had threads.
433 * EXIT clears all sign of other threads when
434 * it goes to single threading, so the last thread always
435 * takes the short path.
436 */
437 if (p->p_flag & P_HADTHREADS) {
438 if (p->p_numthreads > 1) {
439 atomic_add_int(&td->td_proc->p_exitthreads, 1);
440 thread_unlink(td);
441 td2 = FIRST_THREAD_IN_PROC(p);
442 sched_exit_thread(td2, td);
443
444 /*
445 * The test below is NOT true if we are the
446 * sole exiting thread. P_STOPPED_SINGLE is unset
447 * in exit1() after it is the only survivor.
448 */
449 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
450 if (p->p_numthreads == p->p_suspcount) {
451 thread_lock(p->p_singlethread);
452 wakeup_swapper = thread_unsuspend_one(
453 p->p_singlethread, p, false);
454 thread_unlock(p->p_singlethread);
455 if (wakeup_swapper)
456 kick_proc0();
457 }
458 }
459
460 PCPU_SET(deadthread, td);
461 } else {
462 /*
463 * The last thread is exiting.. but not through exit()
464 */
465 panic ("thread_exit: Last thread exiting on its own");
466 }
467 }
468 #ifdef HWPMC_HOOKS
469 /*
470 * If this thread is part of a process that is being tracked by hwpmc(4),
471 * inform the module of the thread's impending exit.
472 */
473 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
474 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
475 #endif
476 PROC_UNLOCK(p);
477 PROC_STATLOCK(p);
478 thread_lock(td);
479 PROC_SUNLOCK(p);
480
481 /* Do the same timestamp bookkeeping that mi_switch() would do. */
482 new_switchtime = cpu_ticks();
483 runtime = new_switchtime - PCPU_GET(switchtime);
484 td->td_runtime += runtime;
485 td->td_incruntime += runtime;
486 PCPU_SET(switchtime, new_switchtime);
487 PCPU_SET(switchticks, ticks);
488 PCPU_INC(cnt.v_swtch);
489
490 /* Save our resource usage in our process. */
491 td->td_ru.ru_nvcsw++;
492 ruxagg(p, td);
493 rucollect(&p->p_ru, &td->td_ru);
494 PROC_STATUNLOCK(p);
495
496 td->td_state = TDS_INACTIVE;
497 #ifdef WITNESS
498 witness_thread_exit(td);
499 #endif
500 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
501 sched_throw(td);
502 panic("I'm a teapot!");
503 /* NOTREACHED */
504 }
505
506 /*
507 * Do any thread specific cleanups that may be needed in wait()
508 * called with Giant, proc and schedlock not held.
509 */
510 void
511 thread_wait(struct proc *p)
512 {
513 struct thread *td;
514
515 mtx_assert(&Giant, MA_NOTOWNED);
516 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
517 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
518 td = FIRST_THREAD_IN_PROC(p);
519 /* Lock the last thread so we spin until it exits cpu_throw(). */
520 thread_lock(td);
521 thread_unlock(td);
522 lock_profile_thread_exit(td);
523 cpuset_rel(td->td_cpuset);
524 td->td_cpuset = NULL;
525 cpu_thread_clean(td);
526 crfree(td->td_ucred);
527 thread_reap(); /* check for zombie threads etc. */
528 }
529
530 /*
531 * Link a thread to a process.
532 * set up anything that needs to be initialized for it to
533 * be used by the process.
534 */
535 void
536 thread_link(struct thread *td, struct proc *p)
537 {
538
539 /*
540 * XXX This can't be enabled because it's called for proc0 before
541 * its lock has been created.
542 * PROC_LOCK_ASSERT(p, MA_OWNED);
543 */
544 td->td_state = TDS_INACTIVE;
545 td->td_proc = p;
546 td->td_flags = TDF_INMEM;
547
548 LIST_INIT(&td->td_contested);
549 LIST_INIT(&td->td_lprof[0]);
550 LIST_INIT(&td->td_lprof[1]);
551 sigqueue_init(&td->td_sigqueue, p);
552 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
553 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
554 p->p_numthreads++;
555 }
556
557 /*
558 * Called from:
559 * thread_exit()
560 */
561 void
562 thread_unlink(struct thread *td)
563 {
564 struct proc *p = td->td_proc;
565
566 PROC_LOCK_ASSERT(p, MA_OWNED);
567 TAILQ_REMOVE(&p->p_threads, td, td_plist);
568 p->p_numthreads--;
569 /* could clear a few other things here */
570 /* Must NOT clear links to proc! */
571 }
572
573 static int
574 calc_remaining(struct proc *p, int mode)
575 {
576 int remaining;
577
578 PROC_LOCK_ASSERT(p, MA_OWNED);
579 PROC_SLOCK_ASSERT(p, MA_OWNED);
580 if (mode == SINGLE_EXIT)
581 remaining = p->p_numthreads;
582 else if (mode == SINGLE_BOUNDARY)
583 remaining = p->p_numthreads - p->p_boundary_count;
584 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
585 remaining = p->p_numthreads - p->p_suspcount;
586 else
587 panic("calc_remaining: wrong mode %d", mode);
588 return (remaining);
589 }
590
591 static int
592 remain_for_mode(int mode)
593 {
594
595 return (mode == SINGLE_ALLPROC ? 0 : 1);
596 }
597
598 static int
599 weed_inhib(int mode, struct thread *td2, struct proc *p)
600 {
601 int wakeup_swapper;
602
603 PROC_LOCK_ASSERT(p, MA_OWNED);
604 PROC_SLOCK_ASSERT(p, MA_OWNED);
605 THREAD_LOCK_ASSERT(td2, MA_OWNED);
606
607 wakeup_swapper = 0;
608 switch (mode) {
609 case SINGLE_EXIT:
610 if (TD_IS_SUSPENDED(td2))
611 wakeup_swapper |= thread_unsuspend_one(td2, p, true);
612 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
613 wakeup_swapper |= sleepq_abort(td2, EINTR);
614 break;
615 case SINGLE_BOUNDARY:
616 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
617 wakeup_swapper |= thread_unsuspend_one(td2, p, false);
618 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
619 wakeup_swapper |= sleepq_abort(td2, ERESTART);
620 break;
621 case SINGLE_NO_EXIT:
622 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
623 wakeup_swapper |= thread_unsuspend_one(td2, p, false);
624 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
625 wakeup_swapper |= sleepq_abort(td2, ERESTART);
626 break;
627 case SINGLE_ALLPROC:
628 /*
629 * ALLPROC suspend tries to avoid spurious EINTR for
630 * threads sleeping interruptable, by suspending the
631 * thread directly, similarly to sig_suspend_threads().
632 * Since such sleep is not performed at the user
633 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
634 * is used to avoid immediate un-suspend.
635 */
636 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
637 TDF_ALLPROCSUSP)) == 0)
638 wakeup_swapper |= thread_unsuspend_one(td2, p, false);
639 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
640 if ((td2->td_flags & TDF_SBDRY) == 0) {
641 thread_suspend_one(td2);
642 td2->td_flags |= TDF_ALLPROCSUSP;
643 } else {
644 wakeup_swapper |= sleepq_abort(td2, ERESTART);
645 }
646 }
647 break;
648 }
649 return (wakeup_swapper);
650 }
651
652 /*
653 * Enforce single-threading.
654 *
655 * Returns 1 if the caller must abort (another thread is waiting to
656 * exit the process or similar). Process is locked!
657 * Returns 0 when you are successfully the only thread running.
658 * A process has successfully single threaded in the suspend mode when
659 * There are no threads in user mode. Threads in the kernel must be
660 * allowed to continue until they get to the user boundary. They may even
661 * copy out their return values and data before suspending. They may however be
662 * accelerated in reaching the user boundary as we will wake up
663 * any sleeping threads that are interruptable. (PCATCH).
664 */
665 int
666 thread_single(struct proc *p, int mode)
667 {
668 struct thread *td;
669 struct thread *td2;
670 int remaining, wakeup_swapper;
671
672 td = curthread;
673 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
674 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
675 ("invalid mode %d", mode));
676 /*
677 * If allowing non-ALLPROC singlethreading for non-curproc
678 * callers, calc_remaining() and remain_for_mode() should be
679 * adjusted to also account for td->td_proc != p. For now
680 * this is not implemented because it is not used.
681 */
682 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
683 (mode != SINGLE_ALLPROC && td->td_proc == p),
684 ("mode %d proc %p curproc %p", mode, p, td->td_proc));
685 mtx_assert(&Giant, MA_NOTOWNED);
686 PROC_LOCK_ASSERT(p, MA_OWNED);
687
688 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
689 return (0);
690
691 /* Is someone already single threading? */
692 if (p->p_singlethread != NULL && p->p_singlethread != td)
693 return (1);
694
695 if (mode == SINGLE_EXIT) {
696 p->p_flag |= P_SINGLE_EXIT;
697 p->p_flag &= ~P_SINGLE_BOUNDARY;
698 } else {
699 p->p_flag &= ~P_SINGLE_EXIT;
700 if (mode == SINGLE_BOUNDARY)
701 p->p_flag |= P_SINGLE_BOUNDARY;
702 else
703 p->p_flag &= ~P_SINGLE_BOUNDARY;
704 }
705 if (mode == SINGLE_ALLPROC)
706 p->p_flag |= P_TOTAL_STOP;
707 p->p_flag |= P_STOPPED_SINGLE;
708 PROC_SLOCK(p);
709 p->p_singlethread = td;
710 remaining = calc_remaining(p, mode);
711 while (remaining != remain_for_mode(mode)) {
712 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
713 goto stopme;
714 wakeup_swapper = 0;
715 FOREACH_THREAD_IN_PROC(p, td2) {
716 if (td2 == td)
717 continue;
718 thread_lock(td2);
719 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
720 if (TD_IS_INHIBITED(td2)) {
721 wakeup_swapper |= weed_inhib(mode, td2, p);
722 #ifdef SMP
723 } else if (TD_IS_RUNNING(td2) && td != td2) {
724 forward_signal(td2);
725 #endif
726 }
727 thread_unlock(td2);
728 }
729 if (wakeup_swapper)
730 kick_proc0();
731 remaining = calc_remaining(p, mode);
732
733 /*
734 * Maybe we suspended some threads.. was it enough?
735 */
736 if (remaining == remain_for_mode(mode))
737 break;
738
739 stopme:
740 /*
741 * Wake us up when everyone else has suspended.
742 * In the mean time we suspend as well.
743 */
744 thread_suspend_switch(td, p);
745 remaining = calc_remaining(p, mode);
746 }
747 if (mode == SINGLE_EXIT) {
748 /*
749 * Convert the process to an unthreaded process. The
750 * SINGLE_EXIT is called by exit1() or execve(), in
751 * both cases other threads must be retired.
752 */
753 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
754 p->p_singlethread = NULL;
755 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
756
757 /*
758 * Wait for any remaining threads to exit cpu_throw().
759 */
760 while (p->p_exitthreads != 0) {
761 PROC_SUNLOCK(p);
762 PROC_UNLOCK(p);
763 sched_relinquish(td);
764 PROC_LOCK(p);
765 PROC_SLOCK(p);
766 }
767 } else if (mode == SINGLE_BOUNDARY) {
768 /*
769 * Wait until all suspended threads are removed from
770 * the processors. The thread_suspend_check()
771 * increments p_boundary_count while it is still
772 * running, which makes it possible for the execve()
773 * to destroy vmspace while our other threads are
774 * still using the address space.
775 *
776 * We lock the thread, which is only allowed to
777 * succeed after context switch code finished using
778 * the address space.
779 */
780 FOREACH_THREAD_IN_PROC(p, td2) {
781 if (td2 == td)
782 continue;
783 thread_lock(td2);
784 KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
785 ("td %p not on boundary", td2));
786 KASSERT(TD_IS_SUSPENDED(td2),
787 ("td %p is not suspended", td2));
788 thread_unlock(td2);
789 }
790 }
791 PROC_SUNLOCK(p);
792 return (0);
793 }
794
795 bool
796 thread_suspend_check_needed(void)
797 {
798 struct proc *p;
799 struct thread *td;
800
801 td = curthread;
802 p = td->td_proc;
803 PROC_LOCK_ASSERT(p, MA_OWNED);
804 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
805 (td->td_dbgflags & TDB_SUSPEND) != 0));
806 }
807
808 /*
809 * Called in from locations that can safely check to see
810 * whether we have to suspend or at least throttle for a
811 * single-thread event (e.g. fork).
812 *
813 * Such locations include userret().
814 * If the "return_instead" argument is non zero, the thread must be able to
815 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
816 *
817 * The 'return_instead' argument tells the function if it may do a
818 * thread_exit() or suspend, or whether the caller must abort and back
819 * out instead.
820 *
821 * If the thread that set the single_threading request has set the
822 * P_SINGLE_EXIT bit in the process flags then this call will never return
823 * if 'return_instead' is false, but will exit.
824 *
825 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
826 *---------------+--------------------+---------------------
827 * 0 | returns 0 | returns 0 or 1
828 * | when ST ends | immediately
829 *---------------+--------------------+---------------------
830 * 1 | thread exits | returns 1
831 * | | immediately
832 * 0 = thread_exit() or suspension ok,
833 * other = return error instead of stopping the thread.
834 *
835 * While a full suspension is under effect, even a single threading
836 * thread would be suspended if it made this call (but it shouldn't).
837 * This call should only be made from places where
838 * thread_exit() would be safe as that may be the outcome unless
839 * return_instead is set.
840 */
841 int
842 thread_suspend_check(int return_instead)
843 {
844 struct thread *td;
845 struct proc *p;
846 int wakeup_swapper;
847
848 td = curthread;
849 p = td->td_proc;
850 mtx_assert(&Giant, MA_NOTOWNED);
851 PROC_LOCK_ASSERT(p, MA_OWNED);
852 while (thread_suspend_check_needed()) {
853 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
854 KASSERT(p->p_singlethread != NULL,
855 ("singlethread not set"));
856 /*
857 * The only suspension in action is a
858 * single-threading. Single threader need not stop.
859 * XXX Should be safe to access unlocked
860 * as it can only be set to be true by us.
861 */
862 if (p->p_singlethread == td)
863 return (0); /* Exempt from stopping. */
864 }
865 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
866 return (EINTR);
867
868 /* Should we goto user boundary if we didn't come from there? */
869 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
870 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
871 return (ERESTART);
872
873 /*
874 * Ignore suspend requests if they are deferred.
875 */
876 if ((td->td_flags & TDF_SBDRY) != 0) {
877 KASSERT(return_instead,
878 ("TDF_SBDRY set for unsafe thread_suspend_check"));
879 return (0);
880 }
881
882 /*
883 * If the process is waiting for us to exit,
884 * this thread should just suicide.
885 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
886 */
887 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
888 PROC_UNLOCK(p);
889
890 /*
891 * Allow Linux emulation layer to do some work
892 * before thread suicide.
893 */
894 if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
895 (p->p_sysent->sv_thread_detach)(td);
896 kern_thr_exit(td);
897 panic("stopped thread did not exit");
898 }
899
900 PROC_SLOCK(p);
901 thread_stopped(p);
902 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
903 if (p->p_numthreads == p->p_suspcount + 1) {
904 thread_lock(p->p_singlethread);
905 wakeup_swapper = thread_unsuspend_one(
906 p->p_singlethread, p, false);
907 thread_unlock(p->p_singlethread);
908 if (wakeup_swapper)
909 kick_proc0();
910 }
911 }
912 PROC_UNLOCK(p);
913 thread_lock(td);
914 /*
915 * When a thread suspends, it just
916 * gets taken off all queues.
917 */
918 thread_suspend_one(td);
919 if (return_instead == 0) {
920 p->p_boundary_count++;
921 td->td_flags |= TDF_BOUNDARY;
922 }
923 PROC_SUNLOCK(p);
924 mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
925 thread_unlock(td);
926 PROC_LOCK(p);
927 }
928 return (0);
929 }
930
931 void
932 thread_suspend_switch(struct thread *td, struct proc *p)
933 {
934
935 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
936 PROC_LOCK_ASSERT(p, MA_OWNED);
937 PROC_SLOCK_ASSERT(p, MA_OWNED);
938 /*
939 * We implement thread_suspend_one in stages here to avoid
940 * dropping the proc lock while the thread lock is owned.
941 */
942 if (p == td->td_proc) {
943 thread_stopped(p);
944 p->p_suspcount++;
945 }
946 PROC_UNLOCK(p);
947 thread_lock(td);
948 td->td_flags &= ~TDF_NEEDSUSPCHK;
949 TD_SET_SUSPENDED(td);
950 sched_sleep(td, 0);
951 PROC_SUNLOCK(p);
952 DROP_GIANT();
953 mi_switch(SW_VOL | SWT_SUSPEND, NULL);
954 thread_unlock(td);
955 PICKUP_GIANT();
956 PROC_LOCK(p);
957 PROC_SLOCK(p);
958 }
959
960 void
961 thread_suspend_one(struct thread *td)
962 {
963 struct proc *p;
964
965 p = td->td_proc;
966 PROC_SLOCK_ASSERT(p, MA_OWNED);
967 THREAD_LOCK_ASSERT(td, MA_OWNED);
968 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
969 p->p_suspcount++;
970 td->td_flags &= ~TDF_NEEDSUSPCHK;
971 TD_SET_SUSPENDED(td);
972 sched_sleep(td, 0);
973 }
974
975 static int
976 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
977 {
978
979 THREAD_LOCK_ASSERT(td, MA_OWNED);
980 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
981 TD_CLR_SUSPENDED(td);
982 td->td_flags &= ~TDF_ALLPROCSUSP;
983 if (td->td_proc == p) {
984 PROC_SLOCK_ASSERT(p, MA_OWNED);
985 p->p_suspcount--;
986 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
987 td->td_flags &= ~TDF_BOUNDARY;
988 p->p_boundary_count--;
989 }
990 }
991 return (setrunnable(td));
992 }
993
994 /*
995 * Allow all threads blocked by single threading to continue running.
996 */
997 void
998 thread_unsuspend(struct proc *p)
999 {
1000 struct thread *td;
1001 int wakeup_swapper;
1002
1003 PROC_LOCK_ASSERT(p, MA_OWNED);
1004 PROC_SLOCK_ASSERT(p, MA_OWNED);
1005 wakeup_swapper = 0;
1006 if (!P_SHOULDSTOP(p)) {
1007 FOREACH_THREAD_IN_PROC(p, td) {
1008 thread_lock(td);
1009 if (TD_IS_SUSPENDED(td)) {
1010 wakeup_swapper |= thread_unsuspend_one(td, p,
1011 true);
1012 }
1013 thread_unlock(td);
1014 }
1015 } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1016 p->p_numthreads == p->p_suspcount) {
1017 /*
1018 * Stopping everything also did the job for the single
1019 * threading request. Now we've downgraded to single-threaded,
1020 * let it continue.
1021 */
1022 if (p->p_singlethread->td_proc == p) {
1023 thread_lock(p->p_singlethread);
1024 wakeup_swapper = thread_unsuspend_one(
1025 p->p_singlethread, p, false);
1026 thread_unlock(p->p_singlethread);
1027 }
1028 }
1029 if (wakeup_swapper)
1030 kick_proc0();
1031 }
1032
1033 /*
1034 * End the single threading mode..
1035 */
1036 void
1037 thread_single_end(struct proc *p, int mode)
1038 {
1039 struct thread *td;
1040 int wakeup_swapper;
1041
1042 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1043 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1044 ("invalid mode %d", mode));
1045 PROC_LOCK_ASSERT(p, MA_OWNED);
1046 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1047 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1048 ("mode %d does not match P_TOTAL_STOP", mode));
1049 KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1050 ("thread_single_end from other thread %p %p",
1051 curthread, p->p_singlethread));
1052 KASSERT(mode != SINGLE_BOUNDARY ||
1053 (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1054 ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1055 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1056 P_TOTAL_STOP);
1057 PROC_SLOCK(p);
1058 p->p_singlethread = NULL;
1059 wakeup_swapper = 0;
1060 /*
1061 * If there are other threads they may now run,
1062 * unless of course there is a blanket 'stop order'
1063 * on the process. The single threader must be allowed
1064 * to continue however as this is a bad place to stop.
1065 */
1066 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1067 FOREACH_THREAD_IN_PROC(p, td) {
1068 thread_lock(td);
1069 if (TD_IS_SUSPENDED(td)) {
1070 wakeup_swapper |= thread_unsuspend_one(td, p,
1071 mode == SINGLE_BOUNDARY);
1072 }
1073 thread_unlock(td);
1074 }
1075 }
1076 KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1077 ("inconsistent boundary count %d", p->p_boundary_count));
1078 PROC_SUNLOCK(p);
1079 if (wakeup_swapper)
1080 kick_proc0();
1081 }
1082
1083 struct thread *
1084 thread_find(struct proc *p, lwpid_t tid)
1085 {
1086 struct thread *td;
1087
1088 PROC_LOCK_ASSERT(p, MA_OWNED);
1089 FOREACH_THREAD_IN_PROC(p, td) {
1090 if (td->td_tid == tid)
1091 break;
1092 }
1093 return (td);
1094 }
1095
1096 /* Locate a thread by number; return with proc lock held. */
1097 struct thread *
1098 tdfind(lwpid_t tid, pid_t pid)
1099 {
1100 #define RUN_THRESH 16
1101 struct thread *td;
1102 int run = 0;
1103
1104 rw_rlock(&tidhash_lock);
1105 LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1106 if (td->td_tid == tid) {
1107 if (pid != -1 && td->td_proc->p_pid != pid) {
1108 td = NULL;
1109 break;
1110 }
1111 PROC_LOCK(td->td_proc);
1112 if (td->td_proc->p_state == PRS_NEW) {
1113 PROC_UNLOCK(td->td_proc);
1114 td = NULL;
1115 break;
1116 }
1117 if (run > RUN_THRESH) {
1118 if (rw_try_upgrade(&tidhash_lock)) {
1119 LIST_REMOVE(td, td_hash);
1120 LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1121 td, td_hash);
1122 rw_wunlock(&tidhash_lock);
1123 return (td);
1124 }
1125 }
1126 break;
1127 }
1128 run++;
1129 }
1130 rw_runlock(&tidhash_lock);
1131 return (td);
1132 }
1133
1134 void
1135 tidhash_add(struct thread *td)
1136 {
1137 rw_wlock(&tidhash_lock);
1138 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1139 rw_wunlock(&tidhash_lock);
1140 }
1141
1142 void
1143 tidhash_remove(struct thread *td)
1144 {
1145 rw_wlock(&tidhash_lock);
1146 LIST_REMOVE(td, td_hash);
1147 rw_wunlock(&tidhash_lock);
1148 }
Cache object: 88cb5581401266a5951de0cb8f4671aa
|