1 /*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include "opt_witness.h"
30 #include "opt_hwpmc_hooks.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/11.2/sys/kern/kern_thread.c 331727 2018-03-29 04:41:45Z mjoras $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/rangelock.h>
42 #include <sys/resourcevar.h>
43 #include <sys/sdt.h>
44 #include <sys/smp.h>
45 #include <sys/sched.h>
46 #include <sys/sleepqueue.h>
47 #include <sys/selinfo.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/sysent.h>
50 #include <sys/turnstile.h>
51 #include <sys/ktr.h>
52 #include <sys/rwlock.h>
53 #include <sys/umtx.h>
54 #include <sys/vmmeter.h>
55 #include <sys/cpuset.h>
56 #ifdef HWPMC_HOOKS
57 #include <sys/pmckern.h>
58 #endif
59
60 #include <security/audit/audit.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 #include <vm/uma.h>
65 #include <vm/vm_domain.h>
66 #include <sys/eventhandler.h>
67
68 /*
69 * Asserts below verify the stability of struct thread and struct proc
70 * layout, as exposed by KBI to modules. On head, the KBI is allowed
71 * to drift, change to the structures must be accompanied by the
72 * assert update.
73 *
74 * On the stable branches after KBI freeze, conditions must not be
75 * violated. Typically new fields are moved to the end of the
76 * structures.
77 */
78 #ifdef __amd64__
79 _Static_assert(offsetof(struct thread, td_flags) == 0xe4,
80 "struct thread KBI td_flags");
81 _Static_assert(offsetof(struct thread, td_pflags) == 0xec,
82 "struct thread KBI td_pflags");
83 _Static_assert(offsetof(struct thread, td_frame) == 0x418,
84 "struct thread KBI td_frame");
85 _Static_assert(offsetof(struct thread, td_emuldata) == 0x4c0,
86 "struct thread KBI td_emuldata");
87 _Static_assert(offsetof(struct proc, p_flag) == 0xb0,
88 "struct proc KBI p_flag");
89 _Static_assert(offsetof(struct proc, p_pid) == 0xbc,
90 "struct proc KBI p_pid");
91 _Static_assert(offsetof(struct proc, p_filemon) == 0x3c0,
92 "struct proc KBI p_filemon");
93 _Static_assert(offsetof(struct proc, p_comm) == 0x3d0,
94 "struct proc KBI p_comm");
95 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4a0,
96 "struct proc KBI p_emuldata");
97 #endif
98 #ifdef __i386__
99 _Static_assert(offsetof(struct thread, td_flags) == 0x8c,
100 "struct thread KBI td_flags");
101 _Static_assert(offsetof(struct thread, td_pflags) == 0x94,
102 "struct thread KBI td_pflags");
103 _Static_assert(offsetof(struct thread, td_frame) == 0x2c0,
104 "struct thread KBI td_frame");
105 _Static_assert(offsetof(struct thread, td_emuldata) == 0x30c,
106 "struct thread KBI td_emuldata");
107 _Static_assert(offsetof(struct proc, p_flag) == 0x68,
108 "struct proc KBI p_flag");
109 _Static_assert(offsetof(struct proc, p_pid) == 0x74,
110 "struct proc KBI p_pid");
111 _Static_assert(offsetof(struct proc, p_filemon) == 0x268,
112 "struct proc KBI p_filemon");
113 _Static_assert(offsetof(struct proc, p_comm) == 0x274,
114 "struct proc KBI p_comm");
115 _Static_assert(offsetof(struct proc, p_emuldata) == 0x2f4,
116 "struct proc KBI p_emuldata");
117 #endif
118
119 SDT_PROVIDER_DECLARE(proc);
120 SDT_PROBE_DEFINE(proc, , , lwp__exit);
121
122 /*
123 * thread related storage.
124 */
125 static uma_zone_t thread_zone;
126
127 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
128 static struct mtx zombie_lock;
129 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
130
131 static void thread_zombie(struct thread *);
132 static int thread_unsuspend_one(struct thread *td, struct proc *p,
133 bool boundary);
134
135 #define TID_BUFFER_SIZE 1024
136
137 struct mtx tid_lock;
138 static struct unrhdr *tid_unrhdr;
139 static lwpid_t tid_buffer[TID_BUFFER_SIZE];
140 static int tid_head, tid_tail;
141 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
142
143 struct tidhashhead *tidhashtbl;
144 u_long tidhash;
145 struct rwlock tidhash_lock;
146
147 EVENTHANDLER_LIST_DEFINE(thread_ctor);
148 EVENTHANDLER_LIST_DEFINE(thread_dtor);
149 EVENTHANDLER_LIST_DEFINE(thread_init);
150 EVENTHANDLER_LIST_DEFINE(thread_fini);
151
152 static lwpid_t
153 tid_alloc(void)
154 {
155 lwpid_t tid;
156
157 tid = alloc_unr(tid_unrhdr);
158 if (tid != -1)
159 return (tid);
160 mtx_lock(&tid_lock);
161 if (tid_head == tid_tail) {
162 mtx_unlock(&tid_lock);
163 return (-1);
164 }
165 tid = tid_buffer[tid_head];
166 tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
167 mtx_unlock(&tid_lock);
168 return (tid);
169 }
170
171 static void
172 tid_free(lwpid_t tid)
173 {
174 lwpid_t tmp_tid = -1;
175
176 mtx_lock(&tid_lock);
177 if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
178 tmp_tid = tid_buffer[tid_head];
179 tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
180 }
181 tid_buffer[tid_tail] = tid;
182 tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
183 mtx_unlock(&tid_lock);
184 if (tmp_tid != -1)
185 free_unr(tid_unrhdr, tmp_tid);
186 }
187
188 /*
189 * Prepare a thread for use.
190 */
191 static int
192 thread_ctor(void *mem, int size, void *arg, int flags)
193 {
194 struct thread *td;
195
196 td = (struct thread *)mem;
197 td->td_state = TDS_INACTIVE;
198 td->td_oncpu = NOCPU;
199
200 td->td_tid = tid_alloc();
201
202 /*
203 * Note that td_critnest begins life as 1 because the thread is not
204 * running and is thereby implicitly waiting to be on the receiving
205 * end of a context switch.
206 */
207 td->td_critnest = 1;
208 td->td_lend_user_pri = PRI_MAX;
209 EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
210 #ifdef AUDIT
211 audit_thread_alloc(td);
212 #endif
213 umtx_thread_alloc(td);
214 return (0);
215 }
216
217 /*
218 * Reclaim a thread after use.
219 */
220 static void
221 thread_dtor(void *mem, int size, void *arg)
222 {
223 struct thread *td;
224
225 td = (struct thread *)mem;
226
227 #ifdef INVARIANTS
228 /* Verify that this thread is in a safe state to free. */
229 switch (td->td_state) {
230 case TDS_INHIBITED:
231 case TDS_RUNNING:
232 case TDS_CAN_RUN:
233 case TDS_RUNQ:
234 /*
235 * We must never unlink a thread that is in one of
236 * these states, because it is currently active.
237 */
238 panic("bad state for thread unlinking");
239 /* NOTREACHED */
240 case TDS_INACTIVE:
241 break;
242 default:
243 panic("bad thread state");
244 /* NOTREACHED */
245 }
246 #endif
247 #ifdef AUDIT
248 audit_thread_free(td);
249 #endif
250 /* Free all OSD associated to this thread. */
251 osd_thread_exit(td);
252 td_softdep_cleanup(td);
253 MPASS(td->td_su == NULL);
254
255 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
256 tid_free(td->td_tid);
257 }
258
259 /*
260 * Initialize type-stable parts of a thread (when newly created).
261 */
262 static int
263 thread_init(void *mem, int size, int flags)
264 {
265 struct thread *td;
266
267 td = (struct thread *)mem;
268
269 td->td_sleepqueue = sleepq_alloc();
270 td->td_turnstile = turnstile_alloc();
271 td->td_rlqe = NULL;
272 EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
273 umtx_thread_init(td);
274 td->td_kstack = 0;
275 td->td_sel = NULL;
276 return (0);
277 }
278
279 /*
280 * Tear down type-stable parts of a thread (just before being discarded).
281 */
282 static void
283 thread_fini(void *mem, int size)
284 {
285 struct thread *td;
286
287 td = (struct thread *)mem;
288 EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
289 rlqentry_free(td->td_rlqe);
290 turnstile_free(td->td_turnstile);
291 sleepq_free(td->td_sleepqueue);
292 umtx_thread_fini(td);
293 seltdfini(td);
294 }
295
296 /*
297 * For a newly created process,
298 * link up all the structures and its initial threads etc.
299 * called from:
300 * {arch}/{arch}/machdep.c {arch}_init(), init386() etc.
301 * proc_dtor() (should go away)
302 * proc_init()
303 */
304 void
305 proc_linkup0(struct proc *p, struct thread *td)
306 {
307 TAILQ_INIT(&p->p_threads); /* all threads in proc */
308 proc_linkup(p, td);
309 }
310
311 void
312 proc_linkup(struct proc *p, struct thread *td)
313 {
314
315 sigqueue_init(&p->p_sigqueue, p);
316 p->p_ksi = ksiginfo_alloc(1);
317 if (p->p_ksi != NULL) {
318 /* XXX p_ksi may be null if ksiginfo zone is not ready */
319 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
320 }
321 LIST_INIT(&p->p_mqnotifier);
322 p->p_numthreads = 0;
323 thread_link(td, p);
324 }
325
326 /*
327 * Initialize global thread allocation resources.
328 */
329 void
330 threadinit(void)
331 {
332
333 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
334
335 /*
336 * pid_max cannot be greater than PID_MAX.
337 * leave one number for thread0.
338 */
339 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
340
341 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
342 thread_ctor, thread_dtor, thread_init, thread_fini,
343 32 - 1, UMA_ZONE_NOFREE);
344 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
345 rw_init(&tidhash_lock, "tidhash");
346 }
347
348 /*
349 * Place an unused thread on the zombie list.
350 * Use the slpq as that must be unused by now.
351 */
352 void
353 thread_zombie(struct thread *td)
354 {
355 mtx_lock_spin(&zombie_lock);
356 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
357 mtx_unlock_spin(&zombie_lock);
358 }
359
360 /*
361 * Release a thread that has exited after cpu_throw().
362 */
363 void
364 thread_stash(struct thread *td)
365 {
366 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
367 thread_zombie(td);
368 }
369
370 /*
371 * Reap zombie resources.
372 */
373 void
374 thread_reap(void)
375 {
376 struct thread *td_first, *td_next;
377
378 /*
379 * Don't even bother to lock if none at this instant,
380 * we really don't care about the next instant.
381 */
382 if (!TAILQ_EMPTY(&zombie_threads)) {
383 mtx_lock_spin(&zombie_lock);
384 td_first = TAILQ_FIRST(&zombie_threads);
385 if (td_first)
386 TAILQ_INIT(&zombie_threads);
387 mtx_unlock_spin(&zombie_lock);
388 while (td_first) {
389 td_next = TAILQ_NEXT(td_first, td_slpq);
390 thread_cow_free(td_first);
391 thread_free(td_first);
392 td_first = td_next;
393 }
394 }
395 }
396
397 /*
398 * Allocate a thread.
399 */
400 struct thread *
401 thread_alloc(int pages)
402 {
403 struct thread *td;
404
405 thread_reap(); /* check if any zombies to get */
406
407 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
408 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
409 if (!vm_thread_new(td, pages)) {
410 uma_zfree(thread_zone, td);
411 return (NULL);
412 }
413 cpu_thread_alloc(td);
414 vm_domain_policy_init(&td->td_vm_dom_policy);
415 return (td);
416 }
417
418 int
419 thread_alloc_stack(struct thread *td, int pages)
420 {
421
422 KASSERT(td->td_kstack == 0,
423 ("thread_alloc_stack called on a thread with kstack"));
424 if (!vm_thread_new(td, pages))
425 return (0);
426 cpu_thread_alloc(td);
427 return (1);
428 }
429
430 /*
431 * Deallocate a thread.
432 */
433 void
434 thread_free(struct thread *td)
435 {
436
437 lock_profile_thread_exit(td);
438 if (td->td_cpuset)
439 cpuset_rel(td->td_cpuset);
440 td->td_cpuset = NULL;
441 cpu_thread_free(td);
442 if (td->td_kstack != 0)
443 vm_thread_dispose(td);
444 vm_domain_policy_cleanup(&td->td_vm_dom_policy);
445 callout_drain(&td->td_slpcallout);
446 uma_zfree(thread_zone, td);
447 }
448
449 void
450 thread_cow_get_proc(struct thread *newtd, struct proc *p)
451 {
452
453 PROC_LOCK_ASSERT(p, MA_OWNED);
454 newtd->td_ucred = crhold(p->p_ucred);
455 newtd->td_limit = lim_hold(p->p_limit);
456 newtd->td_cowgen = p->p_cowgen;
457 }
458
459 void
460 thread_cow_get(struct thread *newtd, struct thread *td)
461 {
462
463 newtd->td_ucred = crhold(td->td_ucred);
464 newtd->td_limit = lim_hold(td->td_limit);
465 newtd->td_cowgen = td->td_cowgen;
466 }
467
468 void
469 thread_cow_free(struct thread *td)
470 {
471
472 if (td->td_ucred != NULL)
473 crfree(td->td_ucred);
474 if (td->td_limit != NULL)
475 lim_free(td->td_limit);
476 }
477
478 void
479 thread_cow_update(struct thread *td)
480 {
481 struct proc *p;
482 struct ucred *oldcred;
483 struct plimit *oldlimit;
484
485 p = td->td_proc;
486 oldcred = NULL;
487 oldlimit = NULL;
488 PROC_LOCK(p);
489 if (td->td_ucred != p->p_ucred) {
490 oldcred = td->td_ucred;
491 td->td_ucred = crhold(p->p_ucred);
492 }
493 if (td->td_limit != p->p_limit) {
494 oldlimit = td->td_limit;
495 td->td_limit = lim_hold(p->p_limit);
496 }
497 td->td_cowgen = p->p_cowgen;
498 PROC_UNLOCK(p);
499 if (oldcred != NULL)
500 crfree(oldcred);
501 if (oldlimit != NULL)
502 lim_free(oldlimit);
503 }
504
505 /*
506 * Discard the current thread and exit from its context.
507 * Always called with scheduler locked.
508 *
509 * Because we can't free a thread while we're operating under its context,
510 * push the current thread into our CPU's deadthread holder. This means
511 * we needn't worry about someone else grabbing our context before we
512 * do a cpu_throw().
513 */
514 void
515 thread_exit(void)
516 {
517 uint64_t runtime, new_switchtime;
518 struct thread *td;
519 struct thread *td2;
520 struct proc *p;
521 int wakeup_swapper;
522
523 td = curthread;
524 p = td->td_proc;
525
526 PROC_SLOCK_ASSERT(p, MA_OWNED);
527 mtx_assert(&Giant, MA_NOTOWNED);
528
529 PROC_LOCK_ASSERT(p, MA_OWNED);
530 KASSERT(p != NULL, ("thread exiting without a process"));
531 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
532 (long)p->p_pid, td->td_name);
533 SDT_PROBE0(proc, , , lwp__exit);
534 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
535
536 #ifdef AUDIT
537 AUDIT_SYSCALL_EXIT(0, td);
538 #endif
539 /*
540 * drop FPU & debug register state storage, or any other
541 * architecture specific resources that
542 * would not be on a new untouched process.
543 */
544 cpu_thread_exit(td);
545
546 /*
547 * The last thread is left attached to the process
548 * So that the whole bundle gets recycled. Skip
549 * all this stuff if we never had threads.
550 * EXIT clears all sign of other threads when
551 * it goes to single threading, so the last thread always
552 * takes the short path.
553 */
554 if (p->p_flag & P_HADTHREADS) {
555 if (p->p_numthreads > 1) {
556 atomic_add_int(&td->td_proc->p_exitthreads, 1);
557 thread_unlink(td);
558 td2 = FIRST_THREAD_IN_PROC(p);
559 sched_exit_thread(td2, td);
560
561 /*
562 * The test below is NOT true if we are the
563 * sole exiting thread. P_STOPPED_SINGLE is unset
564 * in exit1() after it is the only survivor.
565 */
566 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
567 if (p->p_numthreads == p->p_suspcount) {
568 thread_lock(p->p_singlethread);
569 wakeup_swapper = thread_unsuspend_one(
570 p->p_singlethread, p, false);
571 thread_unlock(p->p_singlethread);
572 if (wakeup_swapper)
573 kick_proc0();
574 }
575 }
576
577 PCPU_SET(deadthread, td);
578 } else {
579 /*
580 * The last thread is exiting.. but not through exit()
581 */
582 panic ("thread_exit: Last thread exiting on its own");
583 }
584 }
585 #ifdef HWPMC_HOOKS
586 /*
587 * If this thread is part of a process that is being tracked by hwpmc(4),
588 * inform the module of the thread's impending exit.
589 */
590 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
591 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
592 #endif
593 PROC_UNLOCK(p);
594 PROC_STATLOCK(p);
595 thread_lock(td);
596 PROC_SUNLOCK(p);
597
598 /* Do the same timestamp bookkeeping that mi_switch() would do. */
599 new_switchtime = cpu_ticks();
600 runtime = new_switchtime - PCPU_GET(switchtime);
601 td->td_runtime += runtime;
602 td->td_incruntime += runtime;
603 PCPU_SET(switchtime, new_switchtime);
604 PCPU_SET(switchticks, ticks);
605 PCPU_INC(cnt.v_swtch);
606
607 /* Save our resource usage in our process. */
608 td->td_ru.ru_nvcsw++;
609 ruxagg(p, td);
610 rucollect(&p->p_ru, &td->td_ru);
611 PROC_STATUNLOCK(p);
612
613 td->td_state = TDS_INACTIVE;
614 #ifdef WITNESS
615 witness_thread_exit(td);
616 #endif
617 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
618 sched_throw(td);
619 panic("I'm a teapot!");
620 /* NOTREACHED */
621 }
622
623 /*
624 * Do any thread specific cleanups that may be needed in wait()
625 * called with Giant, proc and schedlock not held.
626 */
627 void
628 thread_wait(struct proc *p)
629 {
630 struct thread *td;
631
632 mtx_assert(&Giant, MA_NOTOWNED);
633 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
634 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
635 td = FIRST_THREAD_IN_PROC(p);
636 /* Lock the last thread so we spin until it exits cpu_throw(). */
637 thread_lock(td);
638 thread_unlock(td);
639 lock_profile_thread_exit(td);
640 cpuset_rel(td->td_cpuset);
641 td->td_cpuset = NULL;
642 cpu_thread_clean(td);
643 thread_cow_free(td);
644 callout_drain(&td->td_slpcallout);
645 thread_reap(); /* check for zombie threads etc. */
646 }
647
648 /*
649 * Link a thread to a process.
650 * set up anything that needs to be initialized for it to
651 * be used by the process.
652 */
653 void
654 thread_link(struct thread *td, struct proc *p)
655 {
656
657 /*
658 * XXX This can't be enabled because it's called for proc0 before
659 * its lock has been created.
660 * PROC_LOCK_ASSERT(p, MA_OWNED);
661 */
662 td->td_state = TDS_INACTIVE;
663 td->td_proc = p;
664 td->td_flags = TDF_INMEM;
665
666 LIST_INIT(&td->td_contested);
667 LIST_INIT(&td->td_lprof[0]);
668 LIST_INIT(&td->td_lprof[1]);
669 sigqueue_init(&td->td_sigqueue, p);
670 callout_init(&td->td_slpcallout, 1);
671 TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
672 p->p_numthreads++;
673 }
674
675 /*
676 * Called from:
677 * thread_exit()
678 */
679 void
680 thread_unlink(struct thread *td)
681 {
682 struct proc *p = td->td_proc;
683
684 PROC_LOCK_ASSERT(p, MA_OWNED);
685 TAILQ_REMOVE(&p->p_threads, td, td_plist);
686 p->p_numthreads--;
687 /* could clear a few other things here */
688 /* Must NOT clear links to proc! */
689 }
690
691 static int
692 calc_remaining(struct proc *p, int mode)
693 {
694 int remaining;
695
696 PROC_LOCK_ASSERT(p, MA_OWNED);
697 PROC_SLOCK_ASSERT(p, MA_OWNED);
698 if (mode == SINGLE_EXIT)
699 remaining = p->p_numthreads;
700 else if (mode == SINGLE_BOUNDARY)
701 remaining = p->p_numthreads - p->p_boundary_count;
702 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
703 remaining = p->p_numthreads - p->p_suspcount;
704 else
705 panic("calc_remaining: wrong mode %d", mode);
706 return (remaining);
707 }
708
709 static int
710 remain_for_mode(int mode)
711 {
712
713 return (mode == SINGLE_ALLPROC ? 0 : 1);
714 }
715
716 static int
717 weed_inhib(int mode, struct thread *td2, struct proc *p)
718 {
719 int wakeup_swapper;
720
721 PROC_LOCK_ASSERT(p, MA_OWNED);
722 PROC_SLOCK_ASSERT(p, MA_OWNED);
723 THREAD_LOCK_ASSERT(td2, MA_OWNED);
724
725 wakeup_swapper = 0;
726 switch (mode) {
727 case SINGLE_EXIT:
728 if (TD_IS_SUSPENDED(td2))
729 wakeup_swapper |= thread_unsuspend_one(td2, p, true);
730 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
731 wakeup_swapper |= sleepq_abort(td2, EINTR);
732 break;
733 case SINGLE_BOUNDARY:
734 case SINGLE_NO_EXIT:
735 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
736 wakeup_swapper |= thread_unsuspend_one(td2, p, false);
737 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
738 wakeup_swapper |= sleepq_abort(td2, ERESTART);
739 break;
740 case SINGLE_ALLPROC:
741 /*
742 * ALLPROC suspend tries to avoid spurious EINTR for
743 * threads sleeping interruptable, by suspending the
744 * thread directly, similarly to sig_suspend_threads().
745 * Since such sleep is not performed at the user
746 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
747 * is used to avoid immediate un-suspend.
748 */
749 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
750 TDF_ALLPROCSUSP)) == 0)
751 wakeup_swapper |= thread_unsuspend_one(td2, p, false);
752 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
753 if ((td2->td_flags & TDF_SBDRY) == 0) {
754 thread_suspend_one(td2);
755 td2->td_flags |= TDF_ALLPROCSUSP;
756 } else {
757 wakeup_swapper |= sleepq_abort(td2, ERESTART);
758 }
759 }
760 break;
761 }
762 return (wakeup_swapper);
763 }
764
765 /*
766 * Enforce single-threading.
767 *
768 * Returns 1 if the caller must abort (another thread is waiting to
769 * exit the process or similar). Process is locked!
770 * Returns 0 when you are successfully the only thread running.
771 * A process has successfully single threaded in the suspend mode when
772 * There are no threads in user mode. Threads in the kernel must be
773 * allowed to continue until they get to the user boundary. They may even
774 * copy out their return values and data before suspending. They may however be
775 * accelerated in reaching the user boundary as we will wake up
776 * any sleeping threads that are interruptable. (PCATCH).
777 */
778 int
779 thread_single(struct proc *p, int mode)
780 {
781 struct thread *td;
782 struct thread *td2;
783 int remaining, wakeup_swapper;
784
785 td = curthread;
786 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
787 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
788 ("invalid mode %d", mode));
789 /*
790 * If allowing non-ALLPROC singlethreading for non-curproc
791 * callers, calc_remaining() and remain_for_mode() should be
792 * adjusted to also account for td->td_proc != p. For now
793 * this is not implemented because it is not used.
794 */
795 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
796 (mode != SINGLE_ALLPROC && td->td_proc == p),
797 ("mode %d proc %p curproc %p", mode, p, td->td_proc));
798 mtx_assert(&Giant, MA_NOTOWNED);
799 PROC_LOCK_ASSERT(p, MA_OWNED);
800
801 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
802 return (0);
803
804 /* Is someone already single threading? */
805 if (p->p_singlethread != NULL && p->p_singlethread != td)
806 return (1);
807
808 if (mode == SINGLE_EXIT) {
809 p->p_flag |= P_SINGLE_EXIT;
810 p->p_flag &= ~P_SINGLE_BOUNDARY;
811 } else {
812 p->p_flag &= ~P_SINGLE_EXIT;
813 if (mode == SINGLE_BOUNDARY)
814 p->p_flag |= P_SINGLE_BOUNDARY;
815 else
816 p->p_flag &= ~P_SINGLE_BOUNDARY;
817 }
818 if (mode == SINGLE_ALLPROC)
819 p->p_flag |= P_TOTAL_STOP;
820 p->p_flag |= P_STOPPED_SINGLE;
821 PROC_SLOCK(p);
822 p->p_singlethread = td;
823 remaining = calc_remaining(p, mode);
824 while (remaining != remain_for_mode(mode)) {
825 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
826 goto stopme;
827 wakeup_swapper = 0;
828 FOREACH_THREAD_IN_PROC(p, td2) {
829 if (td2 == td)
830 continue;
831 thread_lock(td2);
832 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
833 if (TD_IS_INHIBITED(td2)) {
834 wakeup_swapper |= weed_inhib(mode, td2, p);
835 #ifdef SMP
836 } else if (TD_IS_RUNNING(td2) && td != td2) {
837 forward_signal(td2);
838 #endif
839 }
840 thread_unlock(td2);
841 }
842 if (wakeup_swapper)
843 kick_proc0();
844 remaining = calc_remaining(p, mode);
845
846 /*
847 * Maybe we suspended some threads.. was it enough?
848 */
849 if (remaining == remain_for_mode(mode))
850 break;
851
852 stopme:
853 /*
854 * Wake us up when everyone else has suspended.
855 * In the mean time we suspend as well.
856 */
857 thread_suspend_switch(td, p);
858 remaining = calc_remaining(p, mode);
859 }
860 if (mode == SINGLE_EXIT) {
861 /*
862 * Convert the process to an unthreaded process. The
863 * SINGLE_EXIT is called by exit1() or execve(), in
864 * both cases other threads must be retired.
865 */
866 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
867 p->p_singlethread = NULL;
868 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
869
870 /*
871 * Wait for any remaining threads to exit cpu_throw().
872 */
873 while (p->p_exitthreads != 0) {
874 PROC_SUNLOCK(p);
875 PROC_UNLOCK(p);
876 sched_relinquish(td);
877 PROC_LOCK(p);
878 PROC_SLOCK(p);
879 }
880 } else if (mode == SINGLE_BOUNDARY) {
881 /*
882 * Wait until all suspended threads are removed from
883 * the processors. The thread_suspend_check()
884 * increments p_boundary_count while it is still
885 * running, which makes it possible for the execve()
886 * to destroy vmspace while our other threads are
887 * still using the address space.
888 *
889 * We lock the thread, which is only allowed to
890 * succeed after context switch code finished using
891 * the address space.
892 */
893 FOREACH_THREAD_IN_PROC(p, td2) {
894 if (td2 == td)
895 continue;
896 thread_lock(td2);
897 KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
898 ("td %p not on boundary", td2));
899 KASSERT(TD_IS_SUSPENDED(td2),
900 ("td %p is not suspended", td2));
901 thread_unlock(td2);
902 }
903 }
904 PROC_SUNLOCK(p);
905 return (0);
906 }
907
908 bool
909 thread_suspend_check_needed(void)
910 {
911 struct proc *p;
912 struct thread *td;
913
914 td = curthread;
915 p = td->td_proc;
916 PROC_LOCK_ASSERT(p, MA_OWNED);
917 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
918 (td->td_dbgflags & TDB_SUSPEND) != 0));
919 }
920
921 /*
922 * Called in from locations that can safely check to see
923 * whether we have to suspend or at least throttle for a
924 * single-thread event (e.g. fork).
925 *
926 * Such locations include userret().
927 * If the "return_instead" argument is non zero, the thread must be able to
928 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
929 *
930 * The 'return_instead' argument tells the function if it may do a
931 * thread_exit() or suspend, or whether the caller must abort and back
932 * out instead.
933 *
934 * If the thread that set the single_threading request has set the
935 * P_SINGLE_EXIT bit in the process flags then this call will never return
936 * if 'return_instead' is false, but will exit.
937 *
938 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
939 *---------------+--------------------+---------------------
940 * 0 | returns 0 | returns 0 or 1
941 * | when ST ends | immediately
942 *---------------+--------------------+---------------------
943 * 1 | thread exits | returns 1
944 * | | immediately
945 * 0 = thread_exit() or suspension ok,
946 * other = return error instead of stopping the thread.
947 *
948 * While a full suspension is under effect, even a single threading
949 * thread would be suspended if it made this call (but it shouldn't).
950 * This call should only be made from places where
951 * thread_exit() would be safe as that may be the outcome unless
952 * return_instead is set.
953 */
954 int
955 thread_suspend_check(int return_instead)
956 {
957 struct thread *td;
958 struct proc *p;
959 int wakeup_swapper;
960
961 td = curthread;
962 p = td->td_proc;
963 mtx_assert(&Giant, MA_NOTOWNED);
964 PROC_LOCK_ASSERT(p, MA_OWNED);
965 while (thread_suspend_check_needed()) {
966 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
967 KASSERT(p->p_singlethread != NULL,
968 ("singlethread not set"));
969 /*
970 * The only suspension in action is a
971 * single-threading. Single threader need not stop.
972 * It is safe to access p->p_singlethread unlocked
973 * because it can only be set to our address by us.
974 */
975 if (p->p_singlethread == td)
976 return (0); /* Exempt from stopping. */
977 }
978 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
979 return (EINTR);
980
981 /* Should we goto user boundary if we didn't come from there? */
982 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
983 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
984 return (ERESTART);
985
986 /*
987 * Ignore suspend requests if they are deferred.
988 */
989 if ((td->td_flags & TDF_SBDRY) != 0) {
990 KASSERT(return_instead,
991 ("TDF_SBDRY set for unsafe thread_suspend_check"));
992 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
993 (TDF_SEINTR | TDF_SERESTART),
994 ("both TDF_SEINTR and TDF_SERESTART"));
995 return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
996 }
997
998 /*
999 * If the process is waiting for us to exit,
1000 * this thread should just suicide.
1001 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1002 */
1003 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1004 PROC_UNLOCK(p);
1005
1006 /*
1007 * Allow Linux emulation layer to do some work
1008 * before thread suicide.
1009 */
1010 if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
1011 (p->p_sysent->sv_thread_detach)(td);
1012 umtx_thread_exit(td);
1013 kern_thr_exit(td);
1014 panic("stopped thread did not exit");
1015 }
1016
1017 PROC_SLOCK(p);
1018 thread_stopped(p);
1019 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1020 if (p->p_numthreads == p->p_suspcount + 1) {
1021 thread_lock(p->p_singlethread);
1022 wakeup_swapper = thread_unsuspend_one(
1023 p->p_singlethread, p, false);
1024 thread_unlock(p->p_singlethread);
1025 if (wakeup_swapper)
1026 kick_proc0();
1027 }
1028 }
1029 PROC_UNLOCK(p);
1030 thread_lock(td);
1031 /*
1032 * When a thread suspends, it just
1033 * gets taken off all queues.
1034 */
1035 thread_suspend_one(td);
1036 if (return_instead == 0) {
1037 p->p_boundary_count++;
1038 td->td_flags |= TDF_BOUNDARY;
1039 }
1040 PROC_SUNLOCK(p);
1041 mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
1042 thread_unlock(td);
1043 PROC_LOCK(p);
1044 }
1045 return (0);
1046 }
1047
1048 void
1049 thread_suspend_switch(struct thread *td, struct proc *p)
1050 {
1051
1052 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1053 PROC_LOCK_ASSERT(p, MA_OWNED);
1054 PROC_SLOCK_ASSERT(p, MA_OWNED);
1055 /*
1056 * We implement thread_suspend_one in stages here to avoid
1057 * dropping the proc lock while the thread lock is owned.
1058 */
1059 if (p == td->td_proc) {
1060 thread_stopped(p);
1061 p->p_suspcount++;
1062 }
1063 PROC_UNLOCK(p);
1064 thread_lock(td);
1065 td->td_flags &= ~TDF_NEEDSUSPCHK;
1066 TD_SET_SUSPENDED(td);
1067 sched_sleep(td, 0);
1068 PROC_SUNLOCK(p);
1069 DROP_GIANT();
1070 mi_switch(SW_VOL | SWT_SUSPEND, NULL);
1071 thread_unlock(td);
1072 PICKUP_GIANT();
1073 PROC_LOCK(p);
1074 PROC_SLOCK(p);
1075 }
1076
1077 void
1078 thread_suspend_one(struct thread *td)
1079 {
1080 struct proc *p;
1081
1082 p = td->td_proc;
1083 PROC_SLOCK_ASSERT(p, MA_OWNED);
1084 THREAD_LOCK_ASSERT(td, MA_OWNED);
1085 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1086 p->p_suspcount++;
1087 td->td_flags &= ~TDF_NEEDSUSPCHK;
1088 TD_SET_SUSPENDED(td);
1089 sched_sleep(td, 0);
1090 }
1091
1092 static int
1093 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
1094 {
1095
1096 THREAD_LOCK_ASSERT(td, MA_OWNED);
1097 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1098 TD_CLR_SUSPENDED(td);
1099 td->td_flags &= ~TDF_ALLPROCSUSP;
1100 if (td->td_proc == p) {
1101 PROC_SLOCK_ASSERT(p, MA_OWNED);
1102 p->p_suspcount--;
1103 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
1104 td->td_flags &= ~TDF_BOUNDARY;
1105 p->p_boundary_count--;
1106 }
1107 }
1108 return (setrunnable(td));
1109 }
1110
1111 /*
1112 * Allow all threads blocked by single threading to continue running.
1113 */
1114 void
1115 thread_unsuspend(struct proc *p)
1116 {
1117 struct thread *td;
1118 int wakeup_swapper;
1119
1120 PROC_LOCK_ASSERT(p, MA_OWNED);
1121 PROC_SLOCK_ASSERT(p, MA_OWNED);
1122 wakeup_swapper = 0;
1123 if (!P_SHOULDSTOP(p)) {
1124 FOREACH_THREAD_IN_PROC(p, td) {
1125 thread_lock(td);
1126 if (TD_IS_SUSPENDED(td)) {
1127 wakeup_swapper |= thread_unsuspend_one(td, p,
1128 true);
1129 }
1130 thread_unlock(td);
1131 }
1132 } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1133 p->p_numthreads == p->p_suspcount) {
1134 /*
1135 * Stopping everything also did the job for the single
1136 * threading request. Now we've downgraded to single-threaded,
1137 * let it continue.
1138 */
1139 if (p->p_singlethread->td_proc == p) {
1140 thread_lock(p->p_singlethread);
1141 wakeup_swapper = thread_unsuspend_one(
1142 p->p_singlethread, p, false);
1143 thread_unlock(p->p_singlethread);
1144 }
1145 }
1146 if (wakeup_swapper)
1147 kick_proc0();
1148 }
1149
1150 /*
1151 * End the single threading mode..
1152 */
1153 void
1154 thread_single_end(struct proc *p, int mode)
1155 {
1156 struct thread *td;
1157 int wakeup_swapper;
1158
1159 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1160 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1161 ("invalid mode %d", mode));
1162 PROC_LOCK_ASSERT(p, MA_OWNED);
1163 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1164 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1165 ("mode %d does not match P_TOTAL_STOP", mode));
1166 KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1167 ("thread_single_end from other thread %p %p",
1168 curthread, p->p_singlethread));
1169 KASSERT(mode != SINGLE_BOUNDARY ||
1170 (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1171 ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1172 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1173 P_TOTAL_STOP);
1174 PROC_SLOCK(p);
1175 p->p_singlethread = NULL;
1176 wakeup_swapper = 0;
1177 /*
1178 * If there are other threads they may now run,
1179 * unless of course there is a blanket 'stop order'
1180 * on the process. The single threader must be allowed
1181 * to continue however as this is a bad place to stop.
1182 */
1183 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1184 FOREACH_THREAD_IN_PROC(p, td) {
1185 thread_lock(td);
1186 if (TD_IS_SUSPENDED(td)) {
1187 wakeup_swapper |= thread_unsuspend_one(td, p,
1188 mode == SINGLE_BOUNDARY);
1189 }
1190 thread_unlock(td);
1191 }
1192 }
1193 KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1194 ("inconsistent boundary count %d", p->p_boundary_count));
1195 PROC_SUNLOCK(p);
1196 if (wakeup_swapper)
1197 kick_proc0();
1198 }
1199
1200 struct thread *
1201 thread_find(struct proc *p, lwpid_t tid)
1202 {
1203 struct thread *td;
1204
1205 PROC_LOCK_ASSERT(p, MA_OWNED);
1206 FOREACH_THREAD_IN_PROC(p, td) {
1207 if (td->td_tid == tid)
1208 break;
1209 }
1210 return (td);
1211 }
1212
1213 /* Locate a thread by number; return with proc lock held. */
1214 struct thread *
1215 tdfind(lwpid_t tid, pid_t pid)
1216 {
1217 #define RUN_THRESH 16
1218 struct thread *td;
1219 int run = 0;
1220
1221 rw_rlock(&tidhash_lock);
1222 LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1223 if (td->td_tid == tid) {
1224 if (pid != -1 && td->td_proc->p_pid != pid) {
1225 td = NULL;
1226 break;
1227 }
1228 PROC_LOCK(td->td_proc);
1229 if (td->td_proc->p_state == PRS_NEW) {
1230 PROC_UNLOCK(td->td_proc);
1231 td = NULL;
1232 break;
1233 }
1234 if (run > RUN_THRESH) {
1235 if (rw_try_upgrade(&tidhash_lock)) {
1236 LIST_REMOVE(td, td_hash);
1237 LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1238 td, td_hash);
1239 rw_wunlock(&tidhash_lock);
1240 return (td);
1241 }
1242 }
1243 break;
1244 }
1245 run++;
1246 }
1247 rw_runlock(&tidhash_lock);
1248 return (td);
1249 }
1250
1251 void
1252 tidhash_add(struct thread *td)
1253 {
1254 rw_wlock(&tidhash_lock);
1255 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1256 rw_wunlock(&tidhash_lock);
1257 }
1258
1259 void
1260 tidhash_remove(struct thread *td)
1261 {
1262 rw_wlock(&tidhash_lock);
1263 LIST_REMOVE(td, td_hash);
1264 rw_wunlock(&tidhash_lock);
1265 }
Cache object: d3179ed8468654bef75a5a33e1e6990b
|