1 /*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29 #include "opt_witness.h"
30 #include "opt_hwpmc_hooks.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/rangelock.h>
42 #include <sys/resourcevar.h>
43 #include <sys/sdt.h>
44 #include <sys/smp.h>
45 #include <sys/sched.h>
46 #include <sys/sleepqueue.h>
47 #include <sys/selinfo.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/sysent.h>
50 #include <sys/turnstile.h>
51 #include <sys/ktr.h>
52 #include <sys/rwlock.h>
53 #include <sys/umtx.h>
54 #include <sys/vmmeter.h>
55 #include <sys/cpuset.h>
56 #ifdef HWPMC_HOOKS
57 #include <sys/pmckern.h>
58 #endif
59
60 #include <security/audit/audit.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 #include <vm/uma.h>
65 #include <vm/vm_domain.h>
66 #include <sys/eventhandler.h>
67
68 /*
69 * Asserts below verify the stability of struct thread and struct proc
70 * layout, as exposed by KBI to modules. On head, the KBI is allowed
71 * to drift, change to the structures must be accompanied by the
72 * assert update.
73 *
74 * On the stable branches after KBI freeze, conditions must not be
75 * violated. Typically new fields are moved to the end of the
76 * structures.
77 */
78 #ifdef __amd64__
79 _Static_assert(offsetof(struct thread, td_flags) == 0xe4,
80 "struct thread KBI td_flags");
81 _Static_assert(offsetof(struct thread, td_pflags) == 0xec,
82 "struct thread KBI td_pflags");
83 _Static_assert(offsetof(struct thread, td_frame) == 0x418,
84 "struct thread KBI td_frame");
85 _Static_assert(offsetof(struct thread, td_emuldata) == 0x4c0,
86 "struct thread KBI td_emuldata");
87 _Static_assert(offsetof(struct proc, p_flag) == 0xb0,
88 "struct proc KBI p_flag");
89 _Static_assert(offsetof(struct proc, p_pid) == 0xbc,
90 "struct proc KBI p_pid");
91 _Static_assert(offsetof(struct proc, p_filemon) == 0x3c0,
92 "struct proc KBI p_filemon");
93 _Static_assert(offsetof(struct proc, p_comm) == 0x3d0,
94 "struct proc KBI p_comm");
95 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4a0,
96 "struct proc KBI p_emuldata");
97 #endif
98 #ifdef __i386__
99 _Static_assert(offsetof(struct thread, td_flags) == 0x8c,
100 "struct thread KBI td_flags");
101 _Static_assert(offsetof(struct thread, td_pflags) == 0x94,
102 "struct thread KBI td_pflags");
103 _Static_assert(offsetof(struct thread, td_frame) == 0x2c0,
104 "struct thread KBI td_frame");
105 _Static_assert(offsetof(struct thread, td_emuldata) == 0x30c,
106 "struct thread KBI td_emuldata");
107 _Static_assert(offsetof(struct proc, p_flag) == 0x68,
108 "struct proc KBI p_flag");
109 _Static_assert(offsetof(struct proc, p_pid) == 0x74,
110 "struct proc KBI p_pid");
111 _Static_assert(offsetof(struct proc, p_filemon) == 0x268,
112 "struct proc KBI p_filemon");
113 _Static_assert(offsetof(struct proc, p_comm) == 0x274,
114 "struct proc KBI p_comm");
115 _Static_assert(offsetof(struct proc, p_emuldata) == 0x2f4,
116 "struct proc KBI p_emuldata");
117 #endif
118
119 SDT_PROVIDER_DECLARE(proc);
120 SDT_PROBE_DEFINE(proc, , , lwp__exit);
121
122 /*
123 * thread related storage.
124 */
125 static uma_zone_t thread_zone;
126
127 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
128 static struct mtx zombie_lock;
129 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
130
131 static void thread_zombie(struct thread *);
132 static int thread_unsuspend_one(struct thread *td, struct proc *p,
133 bool boundary);
134
135 #define TID_BUFFER_SIZE 1024
136
137 struct mtx tid_lock;
138 static struct unrhdr *tid_unrhdr;
139 static lwpid_t tid_buffer[TID_BUFFER_SIZE];
140 static int tid_head, tid_tail;
141 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
142
143 struct tidhashhead *tidhashtbl;
144 u_long tidhash;
145 struct rwlock tidhash_lock;
146
147 EVENTHANDLER_LIST_DEFINE(thread_ctor);
148 EVENTHANDLER_LIST_DEFINE(thread_dtor);
149 EVENTHANDLER_LIST_DEFINE(thread_init);
150 EVENTHANDLER_LIST_DEFINE(thread_fini);
151
152 static lwpid_t
153 tid_alloc(void)
154 {
155 lwpid_t tid;
156
157 tid = alloc_unr(tid_unrhdr);
158 if (tid != -1)
159 return (tid);
160 mtx_lock(&tid_lock);
161 if (tid_head == tid_tail) {
162 mtx_unlock(&tid_lock);
163 return (-1);
164 }
165 tid = tid_buffer[tid_head];
166 tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
167 mtx_unlock(&tid_lock);
168 return (tid);
169 }
170
171 static void
172 tid_free(lwpid_t tid)
173 {
174 lwpid_t tmp_tid = -1;
175
176 mtx_lock(&tid_lock);
177 if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) {
178 tmp_tid = tid_buffer[tid_head];
179 tid_head = (tid_head + 1) % TID_BUFFER_SIZE;
180 }
181 tid_buffer[tid_tail] = tid;
182 tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE;
183 mtx_unlock(&tid_lock);
184 if (tmp_tid != -1)
185 free_unr(tid_unrhdr, tmp_tid);
186 }
187
188 /*
189 * Prepare a thread for use.
190 */
191 static int
192 thread_ctor(void *mem, int size, void *arg, int flags)
193 {
194 struct thread *td;
195
196 td = (struct thread *)mem;
197 td->td_state = TDS_INACTIVE;
198 td->td_oncpu = NOCPU;
199
200 td->td_tid = tid_alloc();
201
202 /*
203 * Note that td_critnest begins life as 1 because the thread is not
204 * running and is thereby implicitly waiting to be on the receiving
205 * end of a context switch.
206 */
207 td->td_critnest = 1;
208 td->td_lend_user_pri = PRI_MAX;
209 EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
210 #ifdef AUDIT
211 audit_thread_alloc(td);
212 #endif
213 umtx_thread_alloc(td);
214 return (0);
215 }
216
217 /*
218 * Reclaim a thread after use.
219 */
220 static void
221 thread_dtor(void *mem, int size, void *arg)
222 {
223 struct thread *td;
224
225 td = (struct thread *)mem;
226
227 #ifdef INVARIANTS
228 /* Verify that this thread is in a safe state to free. */
229 switch (td->td_state) {
230 case TDS_INHIBITED:
231 case TDS_RUNNING:
232 case TDS_CAN_RUN:
233 case TDS_RUNQ:
234 /*
235 * We must never unlink a thread that is in one of
236 * these states, because it is currently active.
237 */
238 panic("bad state for thread unlinking");
239 /* NOTREACHED */
240 case TDS_INACTIVE:
241 break;
242 default:
243 panic("bad thread state");
244 /* NOTREACHED */
245 }
246 #endif
247 #ifdef AUDIT
248 audit_thread_free(td);
249 #endif
250 /* Free all OSD associated to this thread. */
251 osd_thread_exit(td);
252 td_softdep_cleanup(td);
253 MPASS(td->td_su == NULL);
254
255 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
256 tid_free(td->td_tid);
257 }
258
259 /*
260 * Initialize type-stable parts of a thread (when newly created).
261 */
262 static int
263 thread_init(void *mem, int size, int flags)
264 {
265 struct thread *td;
266
267 td = (struct thread *)mem;
268
269 td->td_sleepqueue = sleepq_alloc();
270 td->td_turnstile = turnstile_alloc();
271 td->td_rlqe = NULL;
272 EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
273 umtx_thread_init(td);
274 td->td_kstack = 0;
275 td->td_sel = NULL;
276 return (0);
277 }
278
279 /*
280 * Tear down type-stable parts of a thread (just before being discarded).
281 */
282 static void
283 thread_fini(void *mem, int size)
284 {
285 struct thread *td;
286
287 td = (struct thread *)mem;
288 EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
289 rlqentry_free(td->td_rlqe);
290 turnstile_free(td->td_turnstile);
291 sleepq_free(td->td_sleepqueue);
292 umtx_thread_fini(td);
293 seltdfini(td);
294 }
295
296 /*
297 * For a newly created process,
298 * link up all the structures and its initial threads etc.
299 * called from:
300 * {arch}/{arch}/machdep.c {arch}_init(), init386() etc.
301 * proc_dtor() (should go away)
302 * proc_init()
303 */
304 void
305 proc_linkup0(struct proc *p, struct thread *td)
306 {
307 TAILQ_INIT(&p->p_threads); /* all threads in proc */
308 proc_linkup(p, td);
309 }
310
311 void
312 proc_linkup(struct proc *p, struct thread *td)
313 {
314
315 sigqueue_init(&p->p_sigqueue, p);
316 p->p_ksi = ksiginfo_alloc(1);
317 if (p->p_ksi != NULL) {
318 /* XXX p_ksi may be null if ksiginfo zone is not ready */
319 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
320 }
321 LIST_INIT(&p->p_mqnotifier);
322 p->p_numthreads = 0;
323 thread_link(td, p);
324 }
325
326 /*
327 * Initialize global thread allocation resources.
328 */
329 void
330 threadinit(void)
331 {
332
333 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
334
335 /*
336 * pid_max cannot be greater than PID_MAX.
337 * leave one number for thread0.
338 */
339 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
340
341 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
342 thread_ctor, thread_dtor, thread_init, thread_fini,
343 32 - 1, UMA_ZONE_NOFREE);
344 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
345 rw_init(&tidhash_lock, "tidhash");
346 }
347
348 /*
349 * Place an unused thread on the zombie list.
350 * Use the slpq as that must be unused by now.
351 */
352 void
353 thread_zombie(struct thread *td)
354 {
355 mtx_lock_spin(&zombie_lock);
356 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
357 mtx_unlock_spin(&zombie_lock);
358 }
359
360 /*
361 * Release a thread that has exited after cpu_throw().
362 */
363 void
364 thread_stash(struct thread *td)
365 {
366 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
367 thread_zombie(td);
368 }
369
370 /*
371 * Reap zombie resources.
372 */
373 void
374 thread_reap(void)
375 {
376 struct thread *td_first, *td_next;
377
378 /*
379 * Don't even bother to lock if none at this instant,
380 * we really don't care about the next instant.
381 */
382 if (!TAILQ_EMPTY(&zombie_threads)) {
383 mtx_lock_spin(&zombie_lock);
384 td_first = TAILQ_FIRST(&zombie_threads);
385 if (td_first)
386 TAILQ_INIT(&zombie_threads);
387 mtx_unlock_spin(&zombie_lock);
388 while (td_first) {
389 td_next = TAILQ_NEXT(td_first, td_slpq);
390 thread_cow_free(td_first);
391 thread_free(td_first);
392 td_first = td_next;
393 }
394 }
395 }
396
397 /*
398 * Allocate a thread.
399 */
400 struct thread *
401 thread_alloc(int pages)
402 {
403 struct thread *td;
404
405 thread_reap(); /* check if any zombies to get */
406
407 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
408 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
409 if (!vm_thread_new(td, pages)) {
410 uma_zfree(thread_zone, td);
411 return (NULL);
412 }
413 cpu_thread_alloc(td);
414 vm_domain_policy_init(&td->td_vm_dom_policy);
415 return (td);
416 }
417
418 int
419 thread_alloc_stack(struct thread *td, int pages)
420 {
421
422 KASSERT(td->td_kstack == 0,
423 ("thread_alloc_stack called on a thread with kstack"));
424 if (!vm_thread_new(td, pages))
425 return (0);
426 cpu_thread_alloc(td);
427 return (1);
428 }
429
430 /*
431 * Deallocate a thread.
432 */
433 void
434 thread_free(struct thread *td)
435 {
436
437 lock_profile_thread_exit(td);
438 if (td->td_cpuset)
439 cpuset_rel(td->td_cpuset);
440 td->td_cpuset = NULL;
441 cpu_thread_free(td);
442 if (td->td_kstack != 0)
443 vm_thread_dispose(td);
444 vm_domain_policy_cleanup(&td->td_vm_dom_policy);
445 callout_drain(&td->td_slpcallout);
446 uma_zfree(thread_zone, td);
447 }
448
449 void
450 thread_cow_get_proc(struct thread *newtd, struct proc *p)
451 {
452
453 PROC_LOCK_ASSERT(p, MA_OWNED);
454 newtd->td_ucred = crhold(p->p_ucred);
455 newtd->td_limit = lim_hold(p->p_limit);
456 newtd->td_cowgen = p->p_cowgen;
457 }
458
459 void
460 thread_cow_get(struct thread *newtd, struct thread *td)
461 {
462
463 newtd->td_ucred = crhold(td->td_ucred);
464 newtd->td_limit = lim_hold(td->td_limit);
465 newtd->td_cowgen = td->td_cowgen;
466 }
467
468 void
469 thread_cow_free(struct thread *td)
470 {
471
472 if (td->td_ucred != NULL)
473 crfree(td->td_ucred);
474 if (td->td_limit != NULL)
475 lim_free(td->td_limit);
476 }
477
478 void
479 thread_cow_update(struct thread *td)
480 {
481 struct proc *p;
482 struct ucred *oldcred;
483 struct plimit *oldlimit;
484
485 p = td->td_proc;
486 oldcred = NULL;
487 oldlimit = NULL;
488 PROC_LOCK(p);
489 if (td->td_ucred != p->p_ucred) {
490 oldcred = td->td_ucred;
491 td->td_ucred = crhold(p->p_ucred);
492 }
493 if (td->td_limit != p->p_limit) {
494 oldlimit = td->td_limit;
495 td->td_limit = lim_hold(p->p_limit);
496 }
497 td->td_cowgen = p->p_cowgen;
498 PROC_UNLOCK(p);
499 if (oldcred != NULL)
500 crfree(oldcred);
501 if (oldlimit != NULL)
502 lim_free(oldlimit);
503 }
504
505 /*
506 * Discard the current thread and exit from its context.
507 * Always called with scheduler locked.
508 *
509 * Because we can't free a thread while we're operating under its context,
510 * push the current thread into our CPU's deadthread holder. This means
511 * we needn't worry about someone else grabbing our context before we
512 * do a cpu_throw().
513 */
514 void
515 thread_exit(void)
516 {
517 uint64_t runtime, new_switchtime;
518 struct thread *td;
519 struct thread *td2;
520 struct proc *p;
521 int wakeup_swapper;
522
523 td = curthread;
524 p = td->td_proc;
525
526 PROC_SLOCK_ASSERT(p, MA_OWNED);
527 mtx_assert(&Giant, MA_NOTOWNED);
528
529 PROC_LOCK_ASSERT(p, MA_OWNED);
530 KASSERT(p != NULL, ("thread exiting without a process"));
531 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
532 (long)p->p_pid, td->td_name);
533 SDT_PROBE0(proc, , , lwp__exit);
534 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
535
536 /*
537 * drop FPU & debug register state storage, or any other
538 * architecture specific resources that
539 * would not be on a new untouched process.
540 */
541 cpu_thread_exit(td);
542
543 /*
544 * The last thread is left attached to the process
545 * So that the whole bundle gets recycled. Skip
546 * all this stuff if we never had threads.
547 * EXIT clears all sign of other threads when
548 * it goes to single threading, so the last thread always
549 * takes the short path.
550 */
551 if (p->p_flag & P_HADTHREADS) {
552 if (p->p_numthreads > 1) {
553 atomic_add_int(&td->td_proc->p_exitthreads, 1);
554 thread_unlink(td);
555 td2 = FIRST_THREAD_IN_PROC(p);
556 sched_exit_thread(td2, td);
557
558 /*
559 * The test below is NOT true if we are the
560 * sole exiting thread. P_STOPPED_SINGLE is unset
561 * in exit1() after it is the only survivor.
562 */
563 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
564 if (p->p_numthreads == p->p_suspcount) {
565 thread_lock(p->p_singlethread);
566 wakeup_swapper = thread_unsuspend_one(
567 p->p_singlethread, p, false);
568 thread_unlock(p->p_singlethread);
569 if (wakeup_swapper)
570 kick_proc0();
571 }
572 }
573
574 PCPU_SET(deadthread, td);
575 } else {
576 /*
577 * The last thread is exiting.. but not through exit()
578 */
579 panic ("thread_exit: Last thread exiting on its own");
580 }
581 }
582 #ifdef HWPMC_HOOKS
583 /*
584 * If this thread is part of a process that is being tracked by hwpmc(4),
585 * inform the module of the thread's impending exit.
586 */
587 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
588 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
589 #endif
590 PROC_UNLOCK(p);
591 PROC_STATLOCK(p);
592 thread_lock(td);
593 PROC_SUNLOCK(p);
594
595 /* Do the same timestamp bookkeeping that mi_switch() would do. */
596 new_switchtime = cpu_ticks();
597 runtime = new_switchtime - PCPU_GET(switchtime);
598 td->td_runtime += runtime;
599 td->td_incruntime += runtime;
600 PCPU_SET(switchtime, new_switchtime);
601 PCPU_SET(switchticks, ticks);
602 PCPU_INC(cnt.v_swtch);
603
604 /* Save our resource usage in our process. */
605 td->td_ru.ru_nvcsw++;
606 ruxagg(p, td);
607 rucollect(&p->p_ru, &td->td_ru);
608 PROC_STATUNLOCK(p);
609
610 td->td_state = TDS_INACTIVE;
611 #ifdef WITNESS
612 witness_thread_exit(td);
613 #endif
614 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
615 sched_throw(td);
616 panic("I'm a teapot!");
617 /* NOTREACHED */
618 }
619
620 /*
621 * Do any thread specific cleanups that may be needed in wait()
622 * called with Giant, proc and schedlock not held.
623 */
624 void
625 thread_wait(struct proc *p)
626 {
627 struct thread *td;
628
629 mtx_assert(&Giant, MA_NOTOWNED);
630 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
631 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
632 td = FIRST_THREAD_IN_PROC(p);
633 /* Lock the last thread so we spin until it exits cpu_throw(). */
634 thread_lock(td);
635 thread_unlock(td);
636 lock_profile_thread_exit(td);
637 cpuset_rel(td->td_cpuset);
638 td->td_cpuset = NULL;
639 cpu_thread_clean(td);
640 thread_cow_free(td);
641 callout_drain(&td->td_slpcallout);
642 thread_reap(); /* check for zombie threads etc. */
643 }
644
645 /*
646 * Link a thread to a process.
647 * set up anything that needs to be initialized for it to
648 * be used by the process.
649 */
650 void
651 thread_link(struct thread *td, struct proc *p)
652 {
653
654 /*
655 * XXX This can't be enabled because it's called for proc0 before
656 * its lock has been created.
657 * PROC_LOCK_ASSERT(p, MA_OWNED);
658 */
659 td->td_state = TDS_INACTIVE;
660 td->td_proc = p;
661 td->td_flags = TDF_INMEM;
662
663 LIST_INIT(&td->td_contested);
664 LIST_INIT(&td->td_lprof[0]);
665 LIST_INIT(&td->td_lprof[1]);
666 sigqueue_init(&td->td_sigqueue, p);
667 callout_init(&td->td_slpcallout, 1);
668 TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
669 p->p_numthreads++;
670 }
671
672 /*
673 * Called from:
674 * thread_exit()
675 */
676 void
677 thread_unlink(struct thread *td)
678 {
679 struct proc *p = td->td_proc;
680
681 PROC_LOCK_ASSERT(p, MA_OWNED);
682 TAILQ_REMOVE(&p->p_threads, td, td_plist);
683 p->p_numthreads--;
684 /* could clear a few other things here */
685 /* Must NOT clear links to proc! */
686 }
687
688 static int
689 calc_remaining(struct proc *p, int mode)
690 {
691 int remaining;
692
693 PROC_LOCK_ASSERT(p, MA_OWNED);
694 PROC_SLOCK_ASSERT(p, MA_OWNED);
695 if (mode == SINGLE_EXIT)
696 remaining = p->p_numthreads;
697 else if (mode == SINGLE_BOUNDARY)
698 remaining = p->p_numthreads - p->p_boundary_count;
699 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
700 remaining = p->p_numthreads - p->p_suspcount;
701 else
702 panic("calc_remaining: wrong mode %d", mode);
703 return (remaining);
704 }
705
706 static int
707 remain_for_mode(int mode)
708 {
709
710 return (mode == SINGLE_ALLPROC ? 0 : 1);
711 }
712
713 static int
714 weed_inhib(int mode, struct thread *td2, struct proc *p)
715 {
716 int wakeup_swapper;
717
718 PROC_LOCK_ASSERT(p, MA_OWNED);
719 PROC_SLOCK_ASSERT(p, MA_OWNED);
720 THREAD_LOCK_ASSERT(td2, MA_OWNED);
721
722 wakeup_swapper = 0;
723 switch (mode) {
724 case SINGLE_EXIT:
725 if (TD_IS_SUSPENDED(td2))
726 wakeup_swapper |= thread_unsuspend_one(td2, p, true);
727 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
728 wakeup_swapper |= sleepq_abort(td2, EINTR);
729 break;
730 case SINGLE_BOUNDARY:
731 case SINGLE_NO_EXIT:
732 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
733 wakeup_swapper |= thread_unsuspend_one(td2, p, false);
734 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
735 wakeup_swapper |= sleepq_abort(td2, ERESTART);
736 break;
737 case SINGLE_ALLPROC:
738 /*
739 * ALLPROC suspend tries to avoid spurious EINTR for
740 * threads sleeping interruptable, by suspending the
741 * thread directly, similarly to sig_suspend_threads().
742 * Since such sleep is not performed at the user
743 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
744 * is used to avoid immediate un-suspend.
745 */
746 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
747 TDF_ALLPROCSUSP)) == 0)
748 wakeup_swapper |= thread_unsuspend_one(td2, p, false);
749 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
750 if ((td2->td_flags & TDF_SBDRY) == 0) {
751 thread_suspend_one(td2);
752 td2->td_flags |= TDF_ALLPROCSUSP;
753 } else {
754 wakeup_swapper |= sleepq_abort(td2, ERESTART);
755 }
756 }
757 break;
758 }
759 return (wakeup_swapper);
760 }
761
762 /*
763 * Enforce single-threading.
764 *
765 * Returns 1 if the caller must abort (another thread is waiting to
766 * exit the process or similar). Process is locked!
767 * Returns 0 when you are successfully the only thread running.
768 * A process has successfully single threaded in the suspend mode when
769 * There are no threads in user mode. Threads in the kernel must be
770 * allowed to continue until they get to the user boundary. They may even
771 * copy out their return values and data before suspending. They may however be
772 * accelerated in reaching the user boundary as we will wake up
773 * any sleeping threads that are interruptable. (PCATCH).
774 */
775 int
776 thread_single(struct proc *p, int mode)
777 {
778 struct thread *td;
779 struct thread *td2;
780 int remaining, wakeup_swapper;
781
782 td = curthread;
783 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
784 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
785 ("invalid mode %d", mode));
786 /*
787 * If allowing non-ALLPROC singlethreading for non-curproc
788 * callers, calc_remaining() and remain_for_mode() should be
789 * adjusted to also account for td->td_proc != p. For now
790 * this is not implemented because it is not used.
791 */
792 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
793 (mode != SINGLE_ALLPROC && td->td_proc == p),
794 ("mode %d proc %p curproc %p", mode, p, td->td_proc));
795 mtx_assert(&Giant, MA_NOTOWNED);
796 PROC_LOCK_ASSERT(p, MA_OWNED);
797
798 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
799 return (0);
800
801 /* Is someone already single threading? */
802 if (p->p_singlethread != NULL && p->p_singlethread != td)
803 return (1);
804
805 if (mode == SINGLE_EXIT) {
806 p->p_flag |= P_SINGLE_EXIT;
807 p->p_flag &= ~P_SINGLE_BOUNDARY;
808 } else {
809 p->p_flag &= ~P_SINGLE_EXIT;
810 if (mode == SINGLE_BOUNDARY)
811 p->p_flag |= P_SINGLE_BOUNDARY;
812 else
813 p->p_flag &= ~P_SINGLE_BOUNDARY;
814 }
815 if (mode == SINGLE_ALLPROC)
816 p->p_flag |= P_TOTAL_STOP;
817 p->p_flag |= P_STOPPED_SINGLE;
818 PROC_SLOCK(p);
819 p->p_singlethread = td;
820 remaining = calc_remaining(p, mode);
821 while (remaining != remain_for_mode(mode)) {
822 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
823 goto stopme;
824 wakeup_swapper = 0;
825 FOREACH_THREAD_IN_PROC(p, td2) {
826 if (td2 == td)
827 continue;
828 thread_lock(td2);
829 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
830 if (TD_IS_INHIBITED(td2)) {
831 wakeup_swapper |= weed_inhib(mode, td2, p);
832 #ifdef SMP
833 } else if (TD_IS_RUNNING(td2) && td != td2) {
834 forward_signal(td2);
835 #endif
836 }
837 thread_unlock(td2);
838 }
839 if (wakeup_swapper)
840 kick_proc0();
841 remaining = calc_remaining(p, mode);
842
843 /*
844 * Maybe we suspended some threads.. was it enough?
845 */
846 if (remaining == remain_for_mode(mode))
847 break;
848
849 stopme:
850 /*
851 * Wake us up when everyone else has suspended.
852 * In the mean time we suspend as well.
853 */
854 thread_suspend_switch(td, p);
855 remaining = calc_remaining(p, mode);
856 }
857 if (mode == SINGLE_EXIT) {
858 /*
859 * Convert the process to an unthreaded process. The
860 * SINGLE_EXIT is called by exit1() or execve(), in
861 * both cases other threads must be retired.
862 */
863 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
864 p->p_singlethread = NULL;
865 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
866
867 /*
868 * Wait for any remaining threads to exit cpu_throw().
869 */
870 while (p->p_exitthreads != 0) {
871 PROC_SUNLOCK(p);
872 PROC_UNLOCK(p);
873 sched_relinquish(td);
874 PROC_LOCK(p);
875 PROC_SLOCK(p);
876 }
877 } else if (mode == SINGLE_BOUNDARY) {
878 /*
879 * Wait until all suspended threads are removed from
880 * the processors. The thread_suspend_check()
881 * increments p_boundary_count while it is still
882 * running, which makes it possible for the execve()
883 * to destroy vmspace while our other threads are
884 * still using the address space.
885 *
886 * We lock the thread, which is only allowed to
887 * succeed after context switch code finished using
888 * the address space.
889 */
890 FOREACH_THREAD_IN_PROC(p, td2) {
891 if (td2 == td)
892 continue;
893 thread_lock(td2);
894 KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
895 ("td %p not on boundary", td2));
896 KASSERT(TD_IS_SUSPENDED(td2),
897 ("td %p is not suspended", td2));
898 thread_unlock(td2);
899 }
900 }
901 PROC_SUNLOCK(p);
902 return (0);
903 }
904
905 bool
906 thread_suspend_check_needed(void)
907 {
908 struct proc *p;
909 struct thread *td;
910
911 td = curthread;
912 p = td->td_proc;
913 PROC_LOCK_ASSERT(p, MA_OWNED);
914 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
915 (td->td_dbgflags & TDB_SUSPEND) != 0));
916 }
917
918 /*
919 * Called in from locations that can safely check to see
920 * whether we have to suspend or at least throttle for a
921 * single-thread event (e.g. fork).
922 *
923 * Such locations include userret().
924 * If the "return_instead" argument is non zero, the thread must be able to
925 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
926 *
927 * The 'return_instead' argument tells the function if it may do a
928 * thread_exit() or suspend, or whether the caller must abort and back
929 * out instead.
930 *
931 * If the thread that set the single_threading request has set the
932 * P_SINGLE_EXIT bit in the process flags then this call will never return
933 * if 'return_instead' is false, but will exit.
934 *
935 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
936 *---------------+--------------------+---------------------
937 * 0 | returns 0 | returns 0 or 1
938 * | when ST ends | immediately
939 *---------------+--------------------+---------------------
940 * 1 | thread exits | returns 1
941 * | | immediately
942 * 0 = thread_exit() or suspension ok,
943 * other = return error instead of stopping the thread.
944 *
945 * While a full suspension is under effect, even a single threading
946 * thread would be suspended if it made this call (but it shouldn't).
947 * This call should only be made from places where
948 * thread_exit() would be safe as that may be the outcome unless
949 * return_instead is set.
950 */
951 int
952 thread_suspend_check(int return_instead)
953 {
954 struct thread *td;
955 struct proc *p;
956 int wakeup_swapper;
957
958 td = curthread;
959 p = td->td_proc;
960 mtx_assert(&Giant, MA_NOTOWNED);
961 PROC_LOCK_ASSERT(p, MA_OWNED);
962 while (thread_suspend_check_needed()) {
963 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
964 KASSERT(p->p_singlethread != NULL,
965 ("singlethread not set"));
966 /*
967 * The only suspension in action is a
968 * single-threading. Single threader need not stop.
969 * It is safe to access p->p_singlethread unlocked
970 * because it can only be set to our address by us.
971 */
972 if (p->p_singlethread == td)
973 return (0); /* Exempt from stopping. */
974 }
975 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
976 return (EINTR);
977
978 /* Should we goto user boundary if we didn't come from there? */
979 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
980 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
981 return (ERESTART);
982
983 /*
984 * Ignore suspend requests if they are deferred.
985 */
986 if ((td->td_flags & TDF_SBDRY) != 0) {
987 KASSERT(return_instead,
988 ("TDF_SBDRY set for unsafe thread_suspend_check"));
989 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
990 (TDF_SEINTR | TDF_SERESTART),
991 ("both TDF_SEINTR and TDF_SERESTART"));
992 return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
993 }
994
995 /*
996 * If the process is waiting for us to exit,
997 * this thread should just suicide.
998 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
999 */
1000 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1001 PROC_UNLOCK(p);
1002
1003 /*
1004 * Allow Linux emulation layer to do some work
1005 * before thread suicide.
1006 */
1007 if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
1008 (p->p_sysent->sv_thread_detach)(td);
1009 umtx_thread_exit(td);
1010 kern_thr_exit(td);
1011 panic("stopped thread did not exit");
1012 }
1013
1014 PROC_SLOCK(p);
1015 thread_stopped(p);
1016 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1017 if (p->p_numthreads == p->p_suspcount + 1) {
1018 thread_lock(p->p_singlethread);
1019 wakeup_swapper = thread_unsuspend_one(
1020 p->p_singlethread, p, false);
1021 thread_unlock(p->p_singlethread);
1022 if (wakeup_swapper)
1023 kick_proc0();
1024 }
1025 }
1026 PROC_UNLOCK(p);
1027 thread_lock(td);
1028 /*
1029 * When a thread suspends, it just
1030 * gets taken off all queues.
1031 */
1032 thread_suspend_one(td);
1033 if (return_instead == 0) {
1034 p->p_boundary_count++;
1035 td->td_flags |= TDF_BOUNDARY;
1036 }
1037 PROC_SUNLOCK(p);
1038 mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
1039 thread_unlock(td);
1040 PROC_LOCK(p);
1041 }
1042 return (0);
1043 }
1044
1045 void
1046 thread_suspend_switch(struct thread *td, struct proc *p)
1047 {
1048
1049 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1050 PROC_LOCK_ASSERT(p, MA_OWNED);
1051 PROC_SLOCK_ASSERT(p, MA_OWNED);
1052 /*
1053 * We implement thread_suspend_one in stages here to avoid
1054 * dropping the proc lock while the thread lock is owned.
1055 */
1056 if (p == td->td_proc) {
1057 thread_stopped(p);
1058 p->p_suspcount++;
1059 }
1060 PROC_UNLOCK(p);
1061 thread_lock(td);
1062 td->td_flags &= ~TDF_NEEDSUSPCHK;
1063 TD_SET_SUSPENDED(td);
1064 sched_sleep(td, 0);
1065 PROC_SUNLOCK(p);
1066 DROP_GIANT();
1067 mi_switch(SW_VOL | SWT_SUSPEND, NULL);
1068 thread_unlock(td);
1069 PICKUP_GIANT();
1070 PROC_LOCK(p);
1071 PROC_SLOCK(p);
1072 }
1073
1074 void
1075 thread_suspend_one(struct thread *td)
1076 {
1077 struct proc *p;
1078
1079 p = td->td_proc;
1080 PROC_SLOCK_ASSERT(p, MA_OWNED);
1081 THREAD_LOCK_ASSERT(td, MA_OWNED);
1082 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1083 p->p_suspcount++;
1084 td->td_flags &= ~TDF_NEEDSUSPCHK;
1085 TD_SET_SUSPENDED(td);
1086 sched_sleep(td, 0);
1087 }
1088
1089 static int
1090 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
1091 {
1092
1093 THREAD_LOCK_ASSERT(td, MA_OWNED);
1094 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1095 TD_CLR_SUSPENDED(td);
1096 td->td_flags &= ~TDF_ALLPROCSUSP;
1097 if (td->td_proc == p) {
1098 PROC_SLOCK_ASSERT(p, MA_OWNED);
1099 p->p_suspcount--;
1100 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
1101 td->td_flags &= ~TDF_BOUNDARY;
1102 p->p_boundary_count--;
1103 }
1104 }
1105 return (setrunnable(td));
1106 }
1107
1108 /*
1109 * Allow all threads blocked by single threading to continue running.
1110 */
1111 void
1112 thread_unsuspend(struct proc *p)
1113 {
1114 struct thread *td;
1115 int wakeup_swapper;
1116
1117 PROC_LOCK_ASSERT(p, MA_OWNED);
1118 PROC_SLOCK_ASSERT(p, MA_OWNED);
1119 wakeup_swapper = 0;
1120 if (!P_SHOULDSTOP(p)) {
1121 FOREACH_THREAD_IN_PROC(p, td) {
1122 thread_lock(td);
1123 if (TD_IS_SUSPENDED(td)) {
1124 wakeup_swapper |= thread_unsuspend_one(td, p,
1125 true);
1126 }
1127 thread_unlock(td);
1128 }
1129 } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1130 p->p_numthreads == p->p_suspcount) {
1131 /*
1132 * Stopping everything also did the job for the single
1133 * threading request. Now we've downgraded to single-threaded,
1134 * let it continue.
1135 */
1136 if (p->p_singlethread->td_proc == p) {
1137 thread_lock(p->p_singlethread);
1138 wakeup_swapper = thread_unsuspend_one(
1139 p->p_singlethread, p, false);
1140 thread_unlock(p->p_singlethread);
1141 }
1142 }
1143 if (wakeup_swapper)
1144 kick_proc0();
1145 }
1146
1147 /*
1148 * End the single threading mode..
1149 */
1150 void
1151 thread_single_end(struct proc *p, int mode)
1152 {
1153 struct thread *td;
1154 int wakeup_swapper;
1155
1156 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1157 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1158 ("invalid mode %d", mode));
1159 PROC_LOCK_ASSERT(p, MA_OWNED);
1160 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1161 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1162 ("mode %d does not match P_TOTAL_STOP", mode));
1163 KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1164 ("thread_single_end from other thread %p %p",
1165 curthread, p->p_singlethread));
1166 KASSERT(mode != SINGLE_BOUNDARY ||
1167 (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1168 ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1169 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1170 P_TOTAL_STOP);
1171 PROC_SLOCK(p);
1172 p->p_singlethread = NULL;
1173 wakeup_swapper = 0;
1174 /*
1175 * If there are other threads they may now run,
1176 * unless of course there is a blanket 'stop order'
1177 * on the process. The single threader must be allowed
1178 * to continue however as this is a bad place to stop.
1179 */
1180 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1181 FOREACH_THREAD_IN_PROC(p, td) {
1182 thread_lock(td);
1183 if (TD_IS_SUSPENDED(td)) {
1184 wakeup_swapper |= thread_unsuspend_one(td, p,
1185 mode == SINGLE_BOUNDARY);
1186 }
1187 thread_unlock(td);
1188 }
1189 }
1190 KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1191 ("inconsistent boundary count %d", p->p_boundary_count));
1192 PROC_SUNLOCK(p);
1193 if (wakeup_swapper)
1194 kick_proc0();
1195 }
1196
1197 struct thread *
1198 thread_find(struct proc *p, lwpid_t tid)
1199 {
1200 struct thread *td;
1201
1202 PROC_LOCK_ASSERT(p, MA_OWNED);
1203 FOREACH_THREAD_IN_PROC(p, td) {
1204 if (td->td_tid == tid)
1205 break;
1206 }
1207 return (td);
1208 }
1209
1210 /* Locate a thread by number; return with proc lock held. */
1211 struct thread *
1212 tdfind(lwpid_t tid, pid_t pid)
1213 {
1214 #define RUN_THRESH 16
1215 struct thread *td;
1216 int run = 0;
1217
1218 rw_rlock(&tidhash_lock);
1219 LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1220 if (td->td_tid == tid) {
1221 if (pid != -1 && td->td_proc->p_pid != pid) {
1222 td = NULL;
1223 break;
1224 }
1225 PROC_LOCK(td->td_proc);
1226 if (td->td_proc->p_state == PRS_NEW) {
1227 PROC_UNLOCK(td->td_proc);
1228 td = NULL;
1229 break;
1230 }
1231 if (run > RUN_THRESH) {
1232 if (rw_try_upgrade(&tidhash_lock)) {
1233 LIST_REMOVE(td, td_hash);
1234 LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1235 td, td_hash);
1236 rw_wunlock(&tidhash_lock);
1237 return (td);
1238 }
1239 }
1240 break;
1241 }
1242 run++;
1243 }
1244 rw_runlock(&tidhash_lock);
1245 return (td);
1246 }
1247
1248 void
1249 tidhash_add(struct thread *td)
1250 {
1251 rw_wlock(&tidhash_lock);
1252 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1253 rw_wunlock(&tidhash_lock);
1254 }
1255
1256 void
1257 tidhash_remove(struct thread *td)
1258 {
1259 rw_wlock(&tidhash_lock);
1260 LIST_REMOVE(td, td_hash);
1261 rw_wunlock(&tidhash_lock);
1262 }
Cache object: 2470795ebbb8682afe134e0fd11b924d
|