The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_thread.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
    5  *  All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice(s), this list of conditions and the following disclaimer as
   12  *    the first lines of this file unmodified other than the possible
   13  *    addition of one or more copyright notices.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice(s), this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   28  * DAMAGE.
   29  */
   30 
   31 #include "opt_witness.h"
   32 #include "opt_hwpmc_hooks.h"
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD$");
   36 
   37 #include <sys/param.h>
   38 #include <sys/systm.h>
   39 #include <sys/kernel.h>
   40 #include <sys/lock.h>
   41 #include <sys/msan.h>
   42 #include <sys/mutex.h>
   43 #include <sys/proc.h>
   44 #include <sys/bitstring.h>
   45 #include <sys/epoch.h>
   46 #include <sys/rangelock.h>
   47 #include <sys/resourcevar.h>
   48 #include <sys/sdt.h>
   49 #include <sys/smp.h>
   50 #include <sys/sched.h>
   51 #include <sys/sleepqueue.h>
   52 #include <sys/selinfo.h>
   53 #include <sys/syscallsubr.h>
   54 #include <sys/dtrace_bsd.h>
   55 #include <sys/sysent.h>
   56 #include <sys/turnstile.h>
   57 #include <sys/taskqueue.h>
   58 #include <sys/ktr.h>
   59 #include <sys/rwlock.h>
   60 #include <sys/umtxvar.h>
   61 #include <sys/vmmeter.h>
   62 #include <sys/cpuset.h>
   63 #ifdef  HWPMC_HOOKS
   64 #include <sys/pmckern.h>
   65 #endif
   66 #include <sys/priv.h>
   67 
   68 #include <security/audit/audit.h>
   69 
   70 #include <vm/pmap.h>
   71 #include <vm/vm.h>
   72 #include <vm/vm_extern.h>
   73 #include <vm/uma.h>
   74 #include <vm/vm_phys.h>
   75 #include <sys/eventhandler.h>
   76 
   77 /*
   78  * Asserts below verify the stability of struct thread and struct proc
   79  * layout, as exposed by KBI to modules.  On head, the KBI is allowed
   80  * to drift, change to the structures must be accompanied by the
   81  * assert update.
   82  *
   83  * On the stable branches after KBI freeze, conditions must not be
   84  * violated.  Typically new fields are moved to the end of the
   85  * structures.
   86  */
   87 #ifdef __amd64__
   88 _Static_assert(offsetof(struct thread, td_flags) == 0x108,
   89     "struct thread KBI td_flags");
   90 _Static_assert(offsetof(struct thread, td_pflags) == 0x114,
   91     "struct thread KBI td_pflags");
   92 _Static_assert(offsetof(struct thread, td_frame) == 0x4b0,
   93     "struct thread KBI td_frame");
   94 _Static_assert(offsetof(struct thread, td_emuldata) == 0x6c0,
   95     "struct thread KBI td_emuldata");
   96 _Static_assert(offsetof(struct proc, p_flag) == 0xb8,
   97     "struct proc KBI p_flag");
   98 _Static_assert(offsetof(struct proc, p_pid) == 0xc4,
   99     "struct proc KBI p_pid");
  100 _Static_assert(offsetof(struct proc, p_filemon) == 0x3c8,
  101     "struct proc KBI p_filemon");
  102 _Static_assert(offsetof(struct proc, p_comm) == 0x3e0,
  103     "struct proc KBI p_comm");
  104 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4c8,
  105     "struct proc KBI p_emuldata");
  106 #endif
  107 #ifdef __i386__
  108 _Static_assert(offsetof(struct thread, td_flags) == 0x9c,
  109     "struct thread KBI td_flags");
  110 _Static_assert(offsetof(struct thread, td_pflags) == 0xa8,
  111     "struct thread KBI td_pflags");
  112 _Static_assert(offsetof(struct thread, td_frame) == 0x30c,
  113     "struct thread KBI td_frame");
  114 _Static_assert(offsetof(struct thread, td_emuldata) == 0x350,
  115     "struct thread KBI td_emuldata");
  116 _Static_assert(offsetof(struct proc, p_flag) == 0x6c,
  117     "struct proc KBI p_flag");
  118 _Static_assert(offsetof(struct proc, p_pid) == 0x78,
  119     "struct proc KBI p_pid");
  120 _Static_assert(offsetof(struct proc, p_filemon) == 0x270,
  121     "struct proc KBI p_filemon");
  122 _Static_assert(offsetof(struct proc, p_comm) == 0x284,
  123     "struct proc KBI p_comm");
  124 _Static_assert(offsetof(struct proc, p_emuldata) == 0x310,
  125     "struct proc KBI p_emuldata");
  126 #endif
  127 
  128 SDT_PROVIDER_DECLARE(proc);
  129 SDT_PROBE_DEFINE(proc, , , lwp__exit);
  130 
  131 /*
  132  * thread related storage.
  133  */
  134 static uma_zone_t thread_zone;
  135 
  136 struct thread_domain_data {
  137         struct thread   *tdd_zombies;
  138         int             tdd_reapticks;
  139 } __aligned(CACHE_LINE_SIZE);
  140 
  141 static struct thread_domain_data thread_domain_data[MAXMEMDOM];
  142 
  143 static struct task      thread_reap_task;
  144 static struct callout   thread_reap_callout;
  145 
  146 static void thread_zombie(struct thread *);
  147 static void thread_reap(void);
  148 static void thread_reap_all(void);
  149 static void thread_reap_task_cb(void *, int);
  150 static void thread_reap_callout_cb(void *);
  151 static int thread_unsuspend_one(struct thread *td, struct proc *p,
  152     bool boundary);
  153 static void thread_free_batched(struct thread *td);
  154 
  155 static __exclusive_cache_line struct mtx tid_lock;
  156 static bitstr_t *tid_bitmap;
  157 
  158 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
  159 
  160 static int maxthread;
  161 SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN,
  162     &maxthread, 0, "Maximum number of threads");
  163 
  164 static __exclusive_cache_line int nthreads;
  165 
  166 static LIST_HEAD(tidhashhead, thread) *tidhashtbl;
  167 static u_long   tidhash;
  168 static u_long   tidhashlock;
  169 static struct   rwlock *tidhashtbl_lock;
  170 #define TIDHASH(tid)            (&tidhashtbl[(tid) & tidhash])
  171 #define TIDHASHLOCK(tid)        (&tidhashtbl_lock[(tid) & tidhashlock])
  172 
  173 EVENTHANDLER_LIST_DEFINE(thread_ctor);
  174 EVENTHANDLER_LIST_DEFINE(thread_dtor);
  175 EVENTHANDLER_LIST_DEFINE(thread_init);
  176 EVENTHANDLER_LIST_DEFINE(thread_fini);
  177 
  178 static bool
  179 thread_count_inc_try(void)
  180 {
  181         int nthreads_new;
  182 
  183         nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1;
  184         if (nthreads_new >= maxthread - 100) {
  185                 if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 ||
  186                     nthreads_new >= maxthread) {
  187                         atomic_subtract_int(&nthreads, 1);
  188                         return (false);
  189                 }
  190         }
  191         return (true);
  192 }
  193 
  194 static bool
  195 thread_count_inc(void)
  196 {
  197         static struct timeval lastfail;
  198         static int curfail;
  199 
  200         thread_reap();
  201         if (thread_count_inc_try()) {
  202                 return (true);
  203         }
  204 
  205         thread_reap_all();
  206         if (thread_count_inc_try()) {
  207                 return (true);
  208         }
  209 
  210         if (ppsratecheck(&lastfail, &curfail, 1)) {
  211                 printf("maxthread limit exceeded by uid %u "
  212                     "(pid %d); consider increasing kern.maxthread\n",
  213                     curthread->td_ucred->cr_ruid, curproc->p_pid);
  214         }
  215         return (false);
  216 }
  217 
  218 static void
  219 thread_count_sub(int n)
  220 {
  221 
  222         atomic_subtract_int(&nthreads, n);
  223 }
  224 
  225 static void
  226 thread_count_dec(void)
  227 {
  228 
  229         thread_count_sub(1);
  230 }
  231 
  232 static lwpid_t
  233 tid_alloc(void)
  234 {
  235         static lwpid_t trytid;
  236         lwpid_t tid;
  237 
  238         mtx_lock(&tid_lock);
  239         /*
  240          * It is an invariant that the bitmap is big enough to hold maxthread
  241          * IDs. If we got to this point there has to be at least one free.
  242          */
  243         if (trytid >= maxthread)
  244                 trytid = 0;
  245         bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
  246         if (tid == -1) {
  247                 KASSERT(trytid != 0, ("unexpectedly ran out of IDs"));
  248                 trytid = 0;
  249                 bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
  250                 KASSERT(tid != -1, ("unexpectedly ran out of IDs"));
  251         }
  252         bit_set(tid_bitmap, tid);
  253         trytid = tid + 1;
  254         mtx_unlock(&tid_lock);
  255         return (tid + NO_PID);
  256 }
  257 
  258 static void
  259 tid_free_locked(lwpid_t rtid)
  260 {
  261         lwpid_t tid;
  262 
  263         mtx_assert(&tid_lock, MA_OWNED);
  264         KASSERT(rtid >= NO_PID,
  265             ("%s: invalid tid %d\n", __func__, rtid));
  266         tid = rtid - NO_PID;
  267         KASSERT(bit_test(tid_bitmap, tid) != 0,
  268             ("thread ID %d not allocated\n", rtid));
  269         bit_clear(tid_bitmap, tid);
  270 }
  271 
  272 static void
  273 tid_free(lwpid_t rtid)
  274 {
  275 
  276         mtx_lock(&tid_lock);
  277         tid_free_locked(rtid);
  278         mtx_unlock(&tid_lock);
  279 }
  280 
  281 static void
  282 tid_free_batch(lwpid_t *batch, int n)
  283 {
  284         int i;
  285 
  286         mtx_lock(&tid_lock);
  287         for (i = 0; i < n; i++) {
  288                 tid_free_locked(batch[i]);
  289         }
  290         mtx_unlock(&tid_lock);
  291 }
  292 
  293 /*
  294  * Batching for thread reapping.
  295  */
  296 struct tidbatch {
  297         lwpid_t tab[16];
  298         int n;
  299 };
  300 
  301 static void
  302 tidbatch_prep(struct tidbatch *tb)
  303 {
  304 
  305         tb->n = 0;
  306 }
  307 
  308 static void
  309 tidbatch_add(struct tidbatch *tb, struct thread *td)
  310 {
  311 
  312         KASSERT(tb->n < nitems(tb->tab),
  313             ("%s: count too high %d", __func__, tb->n));
  314         tb->tab[tb->n] = td->td_tid;
  315         tb->n++;
  316 }
  317 
  318 static void
  319 tidbatch_process(struct tidbatch *tb)
  320 {
  321 
  322         KASSERT(tb->n <= nitems(tb->tab),
  323             ("%s: count too high %d", __func__, tb->n));
  324         if (tb->n == nitems(tb->tab)) {
  325                 tid_free_batch(tb->tab, tb->n);
  326                 tb->n = 0;
  327         }
  328 }
  329 
  330 static void
  331 tidbatch_final(struct tidbatch *tb)
  332 {
  333 
  334         KASSERT(tb->n <= nitems(tb->tab),
  335             ("%s: count too high %d", __func__, tb->n));
  336         if (tb->n != 0) {
  337                 tid_free_batch(tb->tab, tb->n);
  338         }
  339 }
  340 
  341 /*
  342  * Prepare a thread for use.
  343  */
  344 static int
  345 thread_ctor(void *mem, int size, void *arg, int flags)
  346 {
  347         struct thread   *td;
  348 
  349         td = (struct thread *)mem;
  350         TD_SET_STATE(td, TDS_INACTIVE);
  351         td->td_lastcpu = td->td_oncpu = NOCPU;
  352 
  353         /*
  354          * Note that td_critnest begins life as 1 because the thread is not
  355          * running and is thereby implicitly waiting to be on the receiving
  356          * end of a context switch.
  357          */
  358         td->td_critnest = 1;
  359         td->td_lend_user_pri = PRI_MAX;
  360 #ifdef AUDIT
  361         audit_thread_alloc(td);
  362 #endif
  363 #ifdef KDTRACE_HOOKS
  364         kdtrace_thread_ctor(td);
  365 #endif
  366         umtx_thread_alloc(td);
  367         MPASS(td->td_sel == NULL);
  368         return (0);
  369 }
  370 
  371 /*
  372  * Reclaim a thread after use.
  373  */
  374 static void
  375 thread_dtor(void *mem, int size, void *arg)
  376 {
  377         struct thread *td;
  378 
  379         td = (struct thread *)mem;
  380 
  381 #ifdef INVARIANTS
  382         /* Verify that this thread is in a safe state to free. */
  383         switch (TD_GET_STATE(td)) {
  384         case TDS_INHIBITED:
  385         case TDS_RUNNING:
  386         case TDS_CAN_RUN:
  387         case TDS_RUNQ:
  388                 /*
  389                  * We must never unlink a thread that is in one of
  390                  * these states, because it is currently active.
  391                  */
  392                 panic("bad state for thread unlinking");
  393                 /* NOTREACHED */
  394         case TDS_INACTIVE:
  395                 break;
  396         default:
  397                 panic("bad thread state");
  398                 /* NOTREACHED */
  399         }
  400 #endif
  401 #ifdef AUDIT
  402         audit_thread_free(td);
  403 #endif
  404 #ifdef KDTRACE_HOOKS
  405         kdtrace_thread_dtor(td);
  406 #endif
  407         /* Free all OSD associated to this thread. */
  408         osd_thread_exit(td);
  409         ast_kclear(td);
  410         seltdfini(td);
  411 }
  412 
  413 /*
  414  * Initialize type-stable parts of a thread (when newly created).
  415  */
  416 static int
  417 thread_init(void *mem, int size, int flags)
  418 {
  419         struct thread *td;
  420 
  421         td = (struct thread *)mem;
  422 
  423         td->td_allocdomain = vm_phys_domain(vtophys(td));
  424         td->td_sleepqueue = sleepq_alloc();
  425         td->td_turnstile = turnstile_alloc();
  426         td->td_rlqe = NULL;
  427         EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
  428         umtx_thread_init(td);
  429         td->td_kstack = 0;
  430         td->td_sel = NULL;
  431         return (0);
  432 }
  433 
  434 /*
  435  * Tear down type-stable parts of a thread (just before being discarded).
  436  */
  437 static void
  438 thread_fini(void *mem, int size)
  439 {
  440         struct thread *td;
  441 
  442         td = (struct thread *)mem;
  443         EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
  444         rlqentry_free(td->td_rlqe);
  445         turnstile_free(td->td_turnstile);
  446         sleepq_free(td->td_sleepqueue);
  447         umtx_thread_fini(td);
  448         MPASS(td->td_sel == NULL);
  449 }
  450 
  451 /*
  452  * For a newly created process,
  453  * link up all the structures and its initial threads etc.
  454  * called from:
  455  * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
  456  * proc_dtor() (should go away)
  457  * proc_init()
  458  */
  459 void
  460 proc_linkup0(struct proc *p, struct thread *td)
  461 {
  462         TAILQ_INIT(&p->p_threads);           /* all threads in proc */
  463         proc_linkup(p, td);
  464 }
  465 
  466 void
  467 proc_linkup(struct proc *p, struct thread *td)
  468 {
  469 
  470         sigqueue_init(&p->p_sigqueue, p);
  471         p->p_ksi = ksiginfo_alloc(M_WAITOK);
  472         if (p->p_ksi != NULL) {
  473                 /* XXX p_ksi may be null if ksiginfo zone is not ready */
  474                 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
  475         }
  476         LIST_INIT(&p->p_mqnotifier);
  477         p->p_numthreads = 0;
  478         thread_link(td, p);
  479 }
  480 
  481 static void
  482 ast_suspend(struct thread *td, int tda __unused)
  483 {
  484         struct proc *p;
  485 
  486         p = td->td_proc;
  487         /*
  488          * We need to check to see if we have to exit or wait due to a
  489          * single threading requirement or some other STOP condition.
  490          */
  491         PROC_LOCK(p);
  492         thread_suspend_check(0);
  493         PROC_UNLOCK(p);
  494 }
  495 
  496 extern int max_threads_per_proc;
  497 
  498 /*
  499  * Initialize global thread allocation resources.
  500  */
  501 void
  502 threadinit(void)
  503 {
  504         u_long i;
  505         lwpid_t tid0;
  506 
  507         /*
  508          * Place an upper limit on threads which can be allocated.
  509          *
  510          * Note that other factors may make the de facto limit much lower.
  511          *
  512          * Platform limits are somewhat arbitrary but deemed "more than good
  513          * enough" for the foreseable future.
  514          */
  515         if (maxthread == 0) {
  516 #ifdef _LP64
  517                 maxthread = MIN(maxproc * max_threads_per_proc, 1000000);
  518 #else
  519                 maxthread = MIN(maxproc * max_threads_per_proc, 100000);
  520 #endif
  521         }
  522 
  523         mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
  524         tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK);
  525         /*
  526          * Handle thread0.
  527          */
  528         thread_count_inc();
  529         tid0 = tid_alloc();
  530         if (tid0 != THREAD0_TID)
  531                 panic("tid0 %d != %d\n", tid0, THREAD0_TID);
  532 
  533         thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
  534             thread_ctor, thread_dtor, thread_init, thread_fini,
  535             32 - 1, UMA_ZONE_NOFREE);
  536         tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
  537         tidhashlock = (tidhash + 1) / 64;
  538         if (tidhashlock > 0)
  539                 tidhashlock--;
  540         tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1),
  541             M_TIDHASH, M_WAITOK | M_ZERO);
  542         for (i = 0; i < tidhashlock + 1; i++)
  543                 rw_init(&tidhashtbl_lock[i], "tidhash");
  544 
  545         TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL);
  546         callout_init(&thread_reap_callout, 1);
  547         callout_reset(&thread_reap_callout, 5 * hz,
  548             thread_reap_callout_cb, NULL);
  549         ast_register(TDA_SUSPEND, ASTR_ASTF_REQUIRED, 0, ast_suspend);
  550 }
  551 
  552 /*
  553  * Place an unused thread on the zombie list.
  554  */
  555 void
  556 thread_zombie(struct thread *td)
  557 {
  558         struct thread_domain_data *tdd;
  559         struct thread *ztd;
  560 
  561         tdd = &thread_domain_data[td->td_allocdomain];
  562         ztd = atomic_load_ptr(&tdd->tdd_zombies);
  563         for (;;) {
  564                 td->td_zombie = ztd;
  565                 if (atomic_fcmpset_rel_ptr((uintptr_t *)&tdd->tdd_zombies,
  566                     (uintptr_t *)&ztd, (uintptr_t)td))
  567                         break;
  568                 continue;
  569         }
  570 }
  571 
  572 /*
  573  * Release a thread that has exited after cpu_throw().
  574  */
  575 void
  576 thread_stash(struct thread *td)
  577 {
  578         atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
  579         thread_zombie(td);
  580 }
  581 
  582 /*
  583  * Reap zombies from passed domain.
  584  */
  585 static void
  586 thread_reap_domain(struct thread_domain_data *tdd)
  587 {
  588         struct thread *itd, *ntd;
  589         struct tidbatch tidbatch;
  590         struct credbatch credbatch;
  591         int tdcount;
  592         struct plimit *lim;
  593         int limcount;
  594 
  595         /*
  596          * Reading upfront is pessimal if followed by concurrent atomic_swap,
  597          * but most of the time the list is empty.
  598          */
  599         if (tdd->tdd_zombies == NULL)
  600                 return;
  601 
  602         itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&tdd->tdd_zombies,
  603             (uintptr_t)NULL);
  604         if (itd == NULL)
  605                 return;
  606 
  607         /*
  608          * Multiple CPUs can get here, the race is fine as ticks is only
  609          * advisory.
  610          */
  611         tdd->tdd_reapticks = ticks;
  612 
  613         tidbatch_prep(&tidbatch);
  614         credbatch_prep(&credbatch);
  615         tdcount = 0;
  616         lim = NULL;
  617         limcount = 0;
  618 
  619         while (itd != NULL) {
  620                 ntd = itd->td_zombie;
  621                 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd);
  622                 tidbatch_add(&tidbatch, itd);
  623                 credbatch_add(&credbatch, itd);
  624                 MPASS(itd->td_limit != NULL);
  625                 if (lim != itd->td_limit) {
  626                         if (limcount != 0) {
  627                                 lim_freen(lim, limcount);
  628                                 limcount = 0;
  629                         }
  630                 }
  631                 lim = itd->td_limit;
  632                 limcount++;
  633                 thread_free_batched(itd);
  634                 tidbatch_process(&tidbatch);
  635                 credbatch_process(&credbatch);
  636                 tdcount++;
  637                 if (tdcount == 32) {
  638                         thread_count_sub(tdcount);
  639                         tdcount = 0;
  640                 }
  641                 itd = ntd;
  642         }
  643 
  644         tidbatch_final(&tidbatch);
  645         credbatch_final(&credbatch);
  646         if (tdcount != 0) {
  647                 thread_count_sub(tdcount);
  648         }
  649         MPASS(limcount != 0);
  650         lim_freen(lim, limcount);
  651 }
  652 
  653 /*
  654  * Reap zombies from all domains.
  655  */
  656 static void
  657 thread_reap_all(void)
  658 {
  659         struct thread_domain_data *tdd;
  660         int i, domain;
  661 
  662         domain = PCPU_GET(domain);
  663         for (i = 0; i < vm_ndomains; i++) {
  664                 tdd = &thread_domain_data[(i + domain) % vm_ndomains];
  665                 thread_reap_domain(tdd);
  666         }
  667 }
  668 
  669 /*
  670  * Reap zombies from local domain.
  671  */
  672 static void
  673 thread_reap(void)
  674 {
  675         struct thread_domain_data *tdd;
  676         int domain;
  677 
  678         domain = PCPU_GET(domain);
  679         tdd = &thread_domain_data[domain];
  680 
  681         thread_reap_domain(tdd);
  682 }
  683 
  684 static void
  685 thread_reap_task_cb(void *arg __unused, int pending __unused)
  686 {
  687 
  688         thread_reap_all();
  689 }
  690 
  691 static void
  692 thread_reap_callout_cb(void *arg __unused)
  693 {
  694         struct thread_domain_data *tdd;
  695         int i, cticks, lticks;
  696         bool wantreap;
  697 
  698         wantreap = false;
  699         cticks = atomic_load_int(&ticks);
  700         for (i = 0; i < vm_ndomains; i++) {
  701                 tdd = &thread_domain_data[i];
  702                 lticks = tdd->tdd_reapticks;
  703                 if (tdd->tdd_zombies != NULL &&
  704                     (u_int)(cticks - lticks) > 5 * hz) {
  705                         wantreap = true;
  706                         break;
  707                 }
  708         }
  709 
  710         if (wantreap)
  711                 taskqueue_enqueue(taskqueue_thread, &thread_reap_task);
  712         callout_reset(&thread_reap_callout, 5 * hz,
  713             thread_reap_callout_cb, NULL);
  714 }
  715 
  716 /*
  717  * Calling this function guarantees that any thread that exited before
  718  * the call is reaped when the function returns.  By 'exited' we mean
  719  * a thread removed from the process linkage with thread_unlink().
  720  * Practically this means that caller must lock/unlock corresponding
  721  * process lock before the call, to synchronize with thread_exit().
  722  */
  723 void
  724 thread_reap_barrier(void)
  725 {
  726         struct task *t;
  727 
  728         /*
  729          * First do context switches to each CPU to ensure that all
  730          * PCPU pc_deadthreads are moved to zombie list.
  731          */
  732         quiesce_all_cpus("", PDROP);
  733 
  734         /*
  735          * Second, fire the task in the same thread as normal
  736          * thread_reap() is done, to serialize reaping.
  737          */
  738         t = malloc(sizeof(*t), M_TEMP, M_WAITOK);
  739         TASK_INIT(t, 0, thread_reap_task_cb, t);
  740         taskqueue_enqueue(taskqueue_thread, t);
  741         taskqueue_drain(taskqueue_thread, t);
  742         free(t, M_TEMP);
  743 }
  744 
  745 /*
  746  * Allocate a thread.
  747  */
  748 struct thread *
  749 thread_alloc(int pages)
  750 {
  751         struct thread *td;
  752         lwpid_t tid;
  753 
  754         if (!thread_count_inc()) {
  755                 return (NULL);
  756         }
  757 
  758         tid = tid_alloc();
  759         td = uma_zalloc(thread_zone, M_WAITOK);
  760         KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
  761         if (!vm_thread_new(td, pages)) {
  762                 uma_zfree(thread_zone, td);
  763                 tid_free(tid);
  764                 thread_count_dec();
  765                 return (NULL);
  766         }
  767         td->td_tid = tid;
  768         bzero(&td->td_sa.args, sizeof(td->td_sa.args));
  769         kmsan_thread_alloc(td);
  770         cpu_thread_alloc(td);
  771         EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
  772         return (td);
  773 }
  774 
  775 int
  776 thread_alloc_stack(struct thread *td, int pages)
  777 {
  778 
  779         KASSERT(td->td_kstack == 0,
  780             ("thread_alloc_stack called on a thread with kstack"));
  781         if (!vm_thread_new(td, pages))
  782                 return (0);
  783         cpu_thread_alloc(td);
  784         return (1);
  785 }
  786 
  787 /*
  788  * Deallocate a thread.
  789  */
  790 static void
  791 thread_free_batched(struct thread *td)
  792 {
  793 
  794         lock_profile_thread_exit(td);
  795         if (td->td_cpuset)
  796                 cpuset_rel(td->td_cpuset);
  797         td->td_cpuset = NULL;
  798         cpu_thread_free(td);
  799         if (td->td_kstack != 0)
  800                 vm_thread_dispose(td);
  801         callout_drain(&td->td_slpcallout);
  802         /*
  803          * Freeing handled by the caller.
  804          */
  805         td->td_tid = -1;
  806         kmsan_thread_free(td);
  807         uma_zfree(thread_zone, td);
  808 }
  809 
  810 void
  811 thread_free(struct thread *td)
  812 {
  813         lwpid_t tid;
  814 
  815         EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
  816         tid = td->td_tid;
  817         thread_free_batched(td);
  818         tid_free(tid);
  819         thread_count_dec();
  820 }
  821 
  822 void
  823 thread_cow_get_proc(struct thread *newtd, struct proc *p)
  824 {
  825 
  826         PROC_LOCK_ASSERT(p, MA_OWNED);
  827         newtd->td_realucred = crcowget(p->p_ucred);
  828         newtd->td_ucred = newtd->td_realucred;
  829         newtd->td_limit = lim_hold(p->p_limit);
  830         newtd->td_cowgen = p->p_cowgen;
  831 }
  832 
  833 void
  834 thread_cow_get(struct thread *newtd, struct thread *td)
  835 {
  836 
  837         MPASS(td->td_realucred == td->td_ucred);
  838         newtd->td_realucred = crcowget(td->td_realucred);
  839         newtd->td_ucred = newtd->td_realucred;
  840         newtd->td_limit = lim_hold(td->td_limit);
  841         newtd->td_cowgen = td->td_cowgen;
  842 }
  843 
  844 void
  845 thread_cow_free(struct thread *td)
  846 {
  847 
  848         if (td->td_realucred != NULL)
  849                 crcowfree(td);
  850         if (td->td_limit != NULL)
  851                 lim_free(td->td_limit);
  852 }
  853 
  854 void
  855 thread_cow_update(struct thread *td)
  856 {
  857         struct proc *p;
  858         struct ucred *oldcred;
  859         struct plimit *oldlimit;
  860 
  861         p = td->td_proc;
  862         PROC_LOCK(p);
  863         oldcred = crcowsync();
  864         oldlimit = lim_cowsync();
  865         td->td_cowgen = p->p_cowgen;
  866         PROC_UNLOCK(p);
  867         if (oldcred != NULL)
  868                 crfree(oldcred);
  869         if (oldlimit != NULL)
  870                 lim_free(oldlimit);
  871 }
  872 
  873 void
  874 thread_cow_synced(struct thread *td)
  875 {
  876         struct proc *p;
  877 
  878         p = td->td_proc;
  879         PROC_LOCK_ASSERT(p, MA_OWNED);
  880         MPASS(td->td_cowgen != p->p_cowgen);
  881         MPASS(td->td_ucred == p->p_ucred);
  882         MPASS(td->td_limit == p->p_limit);
  883         td->td_cowgen = p->p_cowgen;
  884 }
  885 
  886 /*
  887  * Discard the current thread and exit from its context.
  888  * Always called with scheduler locked.
  889  *
  890  * Because we can't free a thread while we're operating under its context,
  891  * push the current thread into our CPU's deadthread holder. This means
  892  * we needn't worry about someone else grabbing our context before we
  893  * do a cpu_throw().
  894  */
  895 void
  896 thread_exit(void)
  897 {
  898         uint64_t runtime, new_switchtime;
  899         struct thread *td;
  900         struct thread *td2;
  901         struct proc *p;
  902         int wakeup_swapper;
  903 
  904         td = curthread;
  905         p = td->td_proc;
  906 
  907         PROC_SLOCK_ASSERT(p, MA_OWNED);
  908         mtx_assert(&Giant, MA_NOTOWNED);
  909 
  910         PROC_LOCK_ASSERT(p, MA_OWNED);
  911         KASSERT(p != NULL, ("thread exiting without a process"));
  912         CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
  913             (long)p->p_pid, td->td_name);
  914         SDT_PROBE0(proc, , , lwp__exit);
  915         KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
  916         MPASS(td->td_realucred == td->td_ucred);
  917 
  918         /*
  919          * drop FPU & debug register state storage, or any other
  920          * architecture specific resources that
  921          * would not be on a new untouched process.
  922          */
  923         cpu_thread_exit(td);
  924 
  925         /*
  926          * The last thread is left attached to the process
  927          * So that the whole bundle gets recycled. Skip
  928          * all this stuff if we never had threads.
  929          * EXIT clears all sign of other threads when
  930          * it goes to single threading, so the last thread always
  931          * takes the short path.
  932          */
  933         if (p->p_flag & P_HADTHREADS) {
  934                 if (p->p_numthreads > 1) {
  935                         atomic_add_int(&td->td_proc->p_exitthreads, 1);
  936                         thread_unlink(td);
  937                         td2 = FIRST_THREAD_IN_PROC(p);
  938                         sched_exit_thread(td2, td);
  939 
  940                         /*
  941                          * The test below is NOT true if we are the
  942                          * sole exiting thread. P_STOPPED_SINGLE is unset
  943                          * in exit1() after it is the only survivor.
  944                          */
  945                         if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
  946                                 if (p->p_numthreads == p->p_suspcount) {
  947                                         thread_lock(p->p_singlethread);
  948                                         wakeup_swapper = thread_unsuspend_one(
  949                                                 p->p_singlethread, p, false);
  950                                         if (wakeup_swapper)
  951                                                 kick_proc0();
  952                                 }
  953                         }
  954 
  955                         PCPU_SET(deadthread, td);
  956                 } else {
  957                         /*
  958                          * The last thread is exiting.. but not through exit()
  959                          */
  960                         panic ("thread_exit: Last thread exiting on its own");
  961                 }
  962         } 
  963 #ifdef  HWPMC_HOOKS
  964         /*
  965          * If this thread is part of a process that is being tracked by hwpmc(4),
  966          * inform the module of the thread's impending exit.
  967          */
  968         if (PMC_PROC_IS_USING_PMCS(td->td_proc)) {
  969                 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
  970                 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL);
  971         } else if (PMC_SYSTEM_SAMPLING_ACTIVE())
  972                 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL);
  973 #endif
  974         PROC_UNLOCK(p);
  975         PROC_STATLOCK(p);
  976         thread_lock(td);
  977         PROC_SUNLOCK(p);
  978 
  979         /* Do the same timestamp bookkeeping that mi_switch() would do. */
  980         new_switchtime = cpu_ticks();
  981         runtime = new_switchtime - PCPU_GET(switchtime);
  982         td->td_runtime += runtime;
  983         td->td_incruntime += runtime;
  984         PCPU_SET(switchtime, new_switchtime);
  985         PCPU_SET(switchticks, ticks);
  986         VM_CNT_INC(v_swtch);
  987 
  988         /* Save our resource usage in our process. */
  989         td->td_ru.ru_nvcsw++;
  990         ruxagg_locked(p, td);
  991         rucollect(&p->p_ru, &td->td_ru);
  992         PROC_STATUNLOCK(p);
  993 
  994         TD_SET_STATE(td, TDS_INACTIVE);
  995 #ifdef WITNESS
  996         witness_thread_exit(td);
  997 #endif
  998         CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
  999         sched_throw(td);
 1000         panic("I'm a teapot!");
 1001         /* NOTREACHED */
 1002 }
 1003 
 1004 /*
 1005  * Do any thread specific cleanups that may be needed in wait()
 1006  * called with Giant, proc and schedlock not held.
 1007  */
 1008 void
 1009 thread_wait(struct proc *p)
 1010 {
 1011         struct thread *td;
 1012 
 1013         mtx_assert(&Giant, MA_NOTOWNED);
 1014         KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
 1015         KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
 1016         td = FIRST_THREAD_IN_PROC(p);
 1017         /* Lock the last thread so we spin until it exits cpu_throw(). */
 1018         thread_lock(td);
 1019         thread_unlock(td);
 1020         lock_profile_thread_exit(td);
 1021         cpuset_rel(td->td_cpuset);
 1022         td->td_cpuset = NULL;
 1023         cpu_thread_clean(td);
 1024         thread_cow_free(td);
 1025         callout_drain(&td->td_slpcallout);
 1026         thread_reap();  /* check for zombie threads etc. */
 1027 }
 1028 
 1029 /*
 1030  * Link a thread to a process.
 1031  * set up anything that needs to be initialized for it to
 1032  * be used by the process.
 1033  */
 1034 void
 1035 thread_link(struct thread *td, struct proc *p)
 1036 {
 1037 
 1038         /*
 1039          * XXX This can't be enabled because it's called for proc0 before
 1040          * its lock has been created.
 1041          * PROC_LOCK_ASSERT(p, MA_OWNED);
 1042          */
 1043         TD_SET_STATE(td, TDS_INACTIVE);
 1044         td->td_proc     = p;
 1045         td->td_flags    = TDF_INMEM;
 1046 
 1047         LIST_INIT(&td->td_contested);
 1048         LIST_INIT(&td->td_lprof[0]);
 1049         LIST_INIT(&td->td_lprof[1]);
 1050 #ifdef EPOCH_TRACE
 1051         SLIST_INIT(&td->td_epochs);
 1052 #endif
 1053         sigqueue_init(&td->td_sigqueue, p);
 1054         callout_init(&td->td_slpcallout, 1);
 1055         TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
 1056         p->p_numthreads++;
 1057 }
 1058 
 1059 /*
 1060  * Called from:
 1061  *  thread_exit()
 1062  */
 1063 void
 1064 thread_unlink(struct thread *td)
 1065 {
 1066         struct proc *p = td->td_proc;
 1067 
 1068         PROC_LOCK_ASSERT(p, MA_OWNED);
 1069 #ifdef EPOCH_TRACE
 1070         MPASS(SLIST_EMPTY(&td->td_epochs));
 1071 #endif
 1072 
 1073         TAILQ_REMOVE(&p->p_threads, td, td_plist);
 1074         p->p_numthreads--;
 1075         /* could clear a few other things here */
 1076         /* Must  NOT clear links to proc! */
 1077 }
 1078 
 1079 static int
 1080 calc_remaining(struct proc *p, int mode)
 1081 {
 1082         int remaining;
 1083 
 1084         PROC_LOCK_ASSERT(p, MA_OWNED);
 1085         PROC_SLOCK_ASSERT(p, MA_OWNED);
 1086         if (mode == SINGLE_EXIT)
 1087                 remaining = p->p_numthreads;
 1088         else if (mode == SINGLE_BOUNDARY)
 1089                 remaining = p->p_numthreads - p->p_boundary_count;
 1090         else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
 1091                 remaining = p->p_numthreads - p->p_suspcount;
 1092         else
 1093                 panic("calc_remaining: wrong mode %d", mode);
 1094         return (remaining);
 1095 }
 1096 
 1097 static int
 1098 remain_for_mode(int mode)
 1099 {
 1100 
 1101         return (mode == SINGLE_ALLPROC ? 0 : 1);
 1102 }
 1103 
 1104 static int
 1105 weed_inhib(int mode, struct thread *td2, struct proc *p)
 1106 {
 1107         int wakeup_swapper;
 1108 
 1109         PROC_LOCK_ASSERT(p, MA_OWNED);
 1110         PROC_SLOCK_ASSERT(p, MA_OWNED);
 1111         THREAD_LOCK_ASSERT(td2, MA_OWNED);
 1112 
 1113         wakeup_swapper = 0;
 1114 
 1115         /*
 1116          * Since the thread lock is dropped by the scheduler we have
 1117          * to retry to check for races.
 1118          */
 1119 restart:
 1120         switch (mode) {
 1121         case SINGLE_EXIT:
 1122                 if (TD_IS_SUSPENDED(td2)) {
 1123                         wakeup_swapper |= thread_unsuspend_one(td2, p, true);
 1124                         thread_lock(td2);
 1125                         goto restart;
 1126                 }
 1127                 if (TD_CAN_ABORT(td2)) {
 1128                         wakeup_swapper |= sleepq_abort(td2, EINTR);
 1129                         return (wakeup_swapper);
 1130                 }
 1131                 break;
 1132         case SINGLE_BOUNDARY:
 1133         case SINGLE_NO_EXIT:
 1134                 if (TD_IS_SUSPENDED(td2) &&
 1135                     (td2->td_flags & TDF_BOUNDARY) == 0) {
 1136                         wakeup_swapper |= thread_unsuspend_one(td2, p, false);
 1137                         thread_lock(td2);
 1138                         goto restart;
 1139                 }
 1140                 if (TD_CAN_ABORT(td2)) {
 1141                         wakeup_swapper |= sleepq_abort(td2, ERESTART);
 1142                         return (wakeup_swapper);
 1143                 }
 1144                 break;
 1145         case SINGLE_ALLPROC:
 1146                 /*
 1147                  * ALLPROC suspend tries to avoid spurious EINTR for
 1148                  * threads sleeping interruptable, by suspending the
 1149                  * thread directly, similarly to sig_suspend_threads().
 1150                  * Since such sleep is not neccessary performed at the user
 1151                  * boundary, TDF_ALLPROCSUSP is used to avoid immediate
 1152                  * un-suspend.
 1153                  */
 1154                 if (TD_IS_SUSPENDED(td2) && (td2->td_flags &
 1155                     TDF_ALLPROCSUSP) == 0) {
 1156                         wakeup_swapper |= thread_unsuspend_one(td2, p, false);
 1157                         thread_lock(td2);
 1158                         goto restart;
 1159                 }
 1160                 if (TD_CAN_ABORT(td2)) {
 1161                         td2->td_flags |= TDF_ALLPROCSUSP;
 1162                         wakeup_swapper |= sleepq_abort(td2, ERESTART);
 1163                         return (wakeup_swapper);
 1164                 }
 1165                 break;
 1166         default:
 1167                 break;
 1168         }
 1169         thread_unlock(td2);
 1170         return (wakeup_swapper);
 1171 }
 1172 
 1173 /*
 1174  * Enforce single-threading.
 1175  *
 1176  * Returns 1 if the caller must abort (another thread is waiting to
 1177  * exit the process or similar). Process is locked!
 1178  * Returns 0 when you are successfully the only thread running.
 1179  * A process has successfully single threaded in the suspend mode when
 1180  * There are no threads in user mode. Threads in the kernel must be
 1181  * allowed to continue until they get to the user boundary. They may even
 1182  * copy out their return values and data before suspending. They may however be
 1183  * accelerated in reaching the user boundary as we will wake up
 1184  * any sleeping threads that are interruptable. (PCATCH).
 1185  */
 1186 int
 1187 thread_single(struct proc *p, int mode)
 1188 {
 1189         struct thread *td;
 1190         struct thread *td2;
 1191         int remaining, wakeup_swapper;
 1192 
 1193         td = curthread;
 1194         KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
 1195             mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
 1196             ("invalid mode %d", mode));
 1197         /*
 1198          * If allowing non-ALLPROC singlethreading for non-curproc
 1199          * callers, calc_remaining() and remain_for_mode() should be
 1200          * adjusted to also account for td->td_proc != p.  For now
 1201          * this is not implemented because it is not used.
 1202          */
 1203         KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
 1204             (mode != SINGLE_ALLPROC && td->td_proc == p),
 1205             ("mode %d proc %p curproc %p", mode, p, td->td_proc));
 1206         mtx_assert(&Giant, MA_NOTOWNED);
 1207         PROC_LOCK_ASSERT(p, MA_OWNED);
 1208 
 1209         /*
 1210          * Is someone already single threading?
 1211          * Or may be singlethreading is not needed at all.
 1212          */
 1213         if (mode == SINGLE_ALLPROC) {
 1214                 while ((p->p_flag & P_STOPPED_SINGLE) != 0) {
 1215                         if ((p->p_flag2 & P2_WEXIT) != 0)
 1216                                 return (1);
 1217                         msleep(&p->p_flag, &p->p_mtx, PCATCH, "thrsgl", 0);
 1218                 }
 1219         } else if ((p->p_flag & P_HADTHREADS) == 0)
 1220                 return (0);
 1221         if (p->p_singlethread != NULL && p->p_singlethread != td)
 1222                 return (1);
 1223 
 1224         if (mode == SINGLE_EXIT) {
 1225                 p->p_flag |= P_SINGLE_EXIT;
 1226                 p->p_flag &= ~P_SINGLE_BOUNDARY;
 1227         } else {
 1228                 p->p_flag &= ~P_SINGLE_EXIT;
 1229                 if (mode == SINGLE_BOUNDARY)
 1230                         p->p_flag |= P_SINGLE_BOUNDARY;
 1231                 else
 1232                         p->p_flag &= ~P_SINGLE_BOUNDARY;
 1233         }
 1234         if (mode == SINGLE_ALLPROC)
 1235                 p->p_flag |= P_TOTAL_STOP;
 1236         p->p_flag |= P_STOPPED_SINGLE;
 1237         PROC_SLOCK(p);
 1238         p->p_singlethread = td;
 1239         remaining = calc_remaining(p, mode);
 1240         while (remaining != remain_for_mode(mode)) {
 1241                 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
 1242                         goto stopme;
 1243                 wakeup_swapper = 0;
 1244                 FOREACH_THREAD_IN_PROC(p, td2) {
 1245                         if (td2 == td)
 1246                                 continue;
 1247                         thread_lock(td2);
 1248                         ast_sched_locked(td2, TDA_SUSPEND);
 1249                         if (TD_IS_INHIBITED(td2)) {
 1250                                 wakeup_swapper |= weed_inhib(mode, td2, p);
 1251 #ifdef SMP
 1252                         } else if (TD_IS_RUNNING(td2)) {
 1253                                 forward_signal(td2);
 1254                                 thread_unlock(td2);
 1255 #endif
 1256                         } else
 1257                                 thread_unlock(td2);
 1258                 }
 1259                 if (wakeup_swapper)
 1260                         kick_proc0();
 1261                 remaining = calc_remaining(p, mode);
 1262 
 1263                 /*
 1264                  * Maybe we suspended some threads.. was it enough?
 1265                  */
 1266                 if (remaining == remain_for_mode(mode))
 1267                         break;
 1268 
 1269 stopme:
 1270                 /*
 1271                  * Wake us up when everyone else has suspended.
 1272                  * In the mean time we suspend as well.
 1273                  */
 1274                 thread_suspend_switch(td, p);
 1275                 remaining = calc_remaining(p, mode);
 1276         }
 1277         if (mode == SINGLE_EXIT) {
 1278                 /*
 1279                  * Convert the process to an unthreaded process.  The
 1280                  * SINGLE_EXIT is called by exit1() or execve(), in
 1281                  * both cases other threads must be retired.
 1282                  */
 1283                 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
 1284                 p->p_singlethread = NULL;
 1285                 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
 1286 
 1287                 /*
 1288                  * Wait for any remaining threads to exit cpu_throw().
 1289                  */
 1290                 while (p->p_exitthreads != 0) {
 1291                         PROC_SUNLOCK(p);
 1292                         PROC_UNLOCK(p);
 1293                         sched_relinquish(td);
 1294                         PROC_LOCK(p);
 1295                         PROC_SLOCK(p);
 1296                 }
 1297         } else if (mode == SINGLE_BOUNDARY) {
 1298                 /*
 1299                  * Wait until all suspended threads are removed from
 1300                  * the processors.  The thread_suspend_check()
 1301                  * increments p_boundary_count while it is still
 1302                  * running, which makes it possible for the execve()
 1303                  * to destroy vmspace while our other threads are
 1304                  * still using the address space.
 1305                  *
 1306                  * We lock the thread, which is only allowed to
 1307                  * succeed after context switch code finished using
 1308                  * the address space.
 1309                  */
 1310                 FOREACH_THREAD_IN_PROC(p, td2) {
 1311                         if (td2 == td)
 1312                                 continue;
 1313                         thread_lock(td2);
 1314                         KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
 1315                             ("td %p not on boundary", td2));
 1316                         KASSERT(TD_IS_SUSPENDED(td2),
 1317                             ("td %p is not suspended", td2));
 1318                         thread_unlock(td2);
 1319                 }
 1320         }
 1321         PROC_SUNLOCK(p);
 1322         return (0);
 1323 }
 1324 
 1325 bool
 1326 thread_suspend_check_needed(void)
 1327 {
 1328         struct proc *p;
 1329         struct thread *td;
 1330 
 1331         td = curthread;
 1332         p = td->td_proc;
 1333         PROC_LOCK_ASSERT(p, MA_OWNED);
 1334         return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
 1335             (td->td_dbgflags & TDB_SUSPEND) != 0));
 1336 }
 1337 
 1338 /*
 1339  * Called in from locations that can safely check to see
 1340  * whether we have to suspend or at least throttle for a
 1341  * single-thread event (e.g. fork).
 1342  *
 1343  * Such locations include userret().
 1344  * If the "return_instead" argument is non zero, the thread must be able to
 1345  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
 1346  *
 1347  * The 'return_instead' argument tells the function if it may do a
 1348  * thread_exit() or suspend, or whether the caller must abort and back
 1349  * out instead.
 1350  *
 1351  * If the thread that set the single_threading request has set the
 1352  * P_SINGLE_EXIT bit in the process flags then this call will never return
 1353  * if 'return_instead' is false, but will exit.
 1354  *
 1355  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
 1356  *---------------+--------------------+---------------------
 1357  *       0       | returns 0          |   returns 0 or 1
 1358  *               | when ST ends       |   immediately
 1359  *---------------+--------------------+---------------------
 1360  *       1       | thread exits       |   returns 1
 1361  *               |                    |  immediately
 1362  * 0 = thread_exit() or suspension ok,
 1363  * other = return error instead of stopping the thread.
 1364  *
 1365  * While a full suspension is under effect, even a single threading
 1366  * thread would be suspended if it made this call (but it shouldn't).
 1367  * This call should only be made from places where
 1368  * thread_exit() would be safe as that may be the outcome unless
 1369  * return_instead is set.
 1370  */
 1371 int
 1372 thread_suspend_check(int return_instead)
 1373 {
 1374         struct thread *td;
 1375         struct proc *p;
 1376         int wakeup_swapper;
 1377 
 1378         td = curthread;
 1379         p = td->td_proc;
 1380         mtx_assert(&Giant, MA_NOTOWNED);
 1381         PROC_LOCK_ASSERT(p, MA_OWNED);
 1382         while (thread_suspend_check_needed()) {
 1383                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
 1384                         KASSERT(p->p_singlethread != NULL,
 1385                             ("singlethread not set"));
 1386                         /*
 1387                          * The only suspension in action is a
 1388                          * single-threading. Single threader need not stop.
 1389                          * It is safe to access p->p_singlethread unlocked
 1390                          * because it can only be set to our address by us.
 1391                          */
 1392                         if (p->p_singlethread == td)
 1393                                 return (0);     /* Exempt from stopping. */
 1394                 }
 1395                 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
 1396                         return (EINTR);
 1397 
 1398                 /* Should we goto user boundary if we didn't come from there? */
 1399                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
 1400                     (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
 1401                         return (ERESTART);
 1402 
 1403                 /*
 1404                  * Ignore suspend requests if they are deferred.
 1405                  */
 1406                 if ((td->td_flags & TDF_SBDRY) != 0) {
 1407                         KASSERT(return_instead,
 1408                             ("TDF_SBDRY set for unsafe thread_suspend_check"));
 1409                         KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
 1410                             (TDF_SEINTR | TDF_SERESTART),
 1411                             ("both TDF_SEINTR and TDF_SERESTART"));
 1412                         return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
 1413                 }
 1414 
 1415                 /*
 1416                  * If the process is waiting for us to exit,
 1417                  * this thread should just suicide.
 1418                  * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
 1419                  */
 1420                 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
 1421                         PROC_UNLOCK(p);
 1422 
 1423                         /*
 1424                          * Allow Linux emulation layer to do some work
 1425                          * before thread suicide.
 1426                          */
 1427                         if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
 1428                                 (p->p_sysent->sv_thread_detach)(td);
 1429                         umtx_thread_exit(td);
 1430                         kern_thr_exit(td);
 1431                         panic("stopped thread did not exit");
 1432                 }
 1433 
 1434                 PROC_SLOCK(p);
 1435                 thread_stopped(p);
 1436                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
 1437                         if (p->p_numthreads == p->p_suspcount + 1) {
 1438                                 thread_lock(p->p_singlethread);
 1439                                 wakeup_swapper = thread_unsuspend_one(
 1440                                     p->p_singlethread, p, false);
 1441                                 if (wakeup_swapper)
 1442                                         kick_proc0();
 1443                         }
 1444                 }
 1445                 PROC_UNLOCK(p);
 1446                 thread_lock(td);
 1447                 /*
 1448                  * When a thread suspends, it just
 1449                  * gets taken off all queues.
 1450                  */
 1451                 thread_suspend_one(td);
 1452                 if (return_instead == 0) {
 1453                         p->p_boundary_count++;
 1454                         td->td_flags |= TDF_BOUNDARY;
 1455                 }
 1456                 PROC_SUNLOCK(p);
 1457                 mi_switch(SW_INVOL | SWT_SUSPEND);
 1458                 PROC_LOCK(p);
 1459         }
 1460         return (0);
 1461 }
 1462 
 1463 /*
 1464  * Check for possible stops and suspensions while executing a
 1465  * casueword or similar transiently failing operation.
 1466  *
 1467  * The sleep argument controls whether the function can handle a stop
 1468  * request itself or it should return ERESTART and the request is
 1469  * proceed at the kernel/user boundary in ast.
 1470  *
 1471  * Typically, when retrying due to casueword(9) failure (rv == 1), we
 1472  * should handle the stop requests there, with exception of cases when
 1473  * the thread owns a kernel resource, for instance busied the umtx
 1474  * key, or when functions return immediately if thread_check_susp()
 1475  * returned non-zero.  On the other hand, retrying the whole lock
 1476  * operation, we better not stop there but delegate the handling to
 1477  * ast.
 1478  *
 1479  * If the request is for thread termination P_SINGLE_EXIT, we cannot
 1480  * handle it at all, and simply return EINTR.
 1481  */
 1482 int
 1483 thread_check_susp(struct thread *td, bool sleep)
 1484 {
 1485         struct proc *p;
 1486         int error;
 1487 
 1488         /*
 1489          * The check for TDA_SUSPEND is racy, but it is enough to
 1490          * eventually break the lockstep loop.
 1491          */
 1492         if (!td_ast_pending(td, TDA_SUSPEND))
 1493                 return (0);
 1494         error = 0;
 1495         p = td->td_proc;
 1496         PROC_LOCK(p);
 1497         if (p->p_flag & P_SINGLE_EXIT)
 1498                 error = EINTR;
 1499         else if (P_SHOULDSTOP(p) ||
 1500             ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND)))
 1501                 error = sleep ? thread_suspend_check(0) : ERESTART;
 1502         PROC_UNLOCK(p);
 1503         return (error);
 1504 }
 1505 
 1506 void
 1507 thread_suspend_switch(struct thread *td, struct proc *p)
 1508 {
 1509 
 1510         KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
 1511         PROC_LOCK_ASSERT(p, MA_OWNED);
 1512         PROC_SLOCK_ASSERT(p, MA_OWNED);
 1513         /*
 1514          * We implement thread_suspend_one in stages here to avoid
 1515          * dropping the proc lock while the thread lock is owned.
 1516          */
 1517         if (p == td->td_proc) {
 1518                 thread_stopped(p);
 1519                 p->p_suspcount++;
 1520         }
 1521         PROC_UNLOCK(p);
 1522         thread_lock(td);
 1523         ast_unsched_locked(td, TDA_SUSPEND);
 1524         TD_SET_SUSPENDED(td);
 1525         sched_sleep(td, 0);
 1526         PROC_SUNLOCK(p);
 1527         DROP_GIANT();
 1528         mi_switch(SW_VOL | SWT_SUSPEND);
 1529         PICKUP_GIANT();
 1530         PROC_LOCK(p);
 1531         PROC_SLOCK(p);
 1532 }
 1533 
 1534 void
 1535 thread_suspend_one(struct thread *td)
 1536 {
 1537         struct proc *p;
 1538 
 1539         p = td->td_proc;
 1540         PROC_SLOCK_ASSERT(p, MA_OWNED);
 1541         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1542         KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
 1543         p->p_suspcount++;
 1544         ast_unsched_locked(td, TDA_SUSPEND);
 1545         TD_SET_SUSPENDED(td);
 1546         sched_sleep(td, 0);
 1547 }
 1548 
 1549 static int
 1550 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
 1551 {
 1552 
 1553         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1554         KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
 1555         TD_CLR_SUSPENDED(td);
 1556         td->td_flags &= ~TDF_ALLPROCSUSP;
 1557         if (td->td_proc == p) {
 1558                 PROC_SLOCK_ASSERT(p, MA_OWNED);
 1559                 p->p_suspcount--;
 1560                 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
 1561                         td->td_flags &= ~TDF_BOUNDARY;
 1562                         p->p_boundary_count--;
 1563                 }
 1564         }
 1565         return (setrunnable(td, 0));
 1566 }
 1567 
 1568 void
 1569 thread_run_flash(struct thread *td)
 1570 {
 1571         struct proc *p;
 1572 
 1573         p = td->td_proc;
 1574         PROC_LOCK_ASSERT(p, MA_OWNED);
 1575 
 1576         if (TD_ON_SLEEPQ(td))
 1577                 sleepq_remove_nested(td);
 1578         else
 1579                 thread_lock(td);
 1580 
 1581         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1582         KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
 1583 
 1584         TD_CLR_SUSPENDED(td);
 1585         PROC_SLOCK(p);
 1586         MPASS(p->p_suspcount > 0);
 1587         p->p_suspcount--;
 1588         PROC_SUNLOCK(p);
 1589         if (setrunnable(td, 0))
 1590                 kick_proc0();
 1591 }
 1592 
 1593 /*
 1594  * Allow all threads blocked by single threading to continue running.
 1595  */
 1596 void
 1597 thread_unsuspend(struct proc *p)
 1598 {
 1599         struct thread *td;
 1600         int wakeup_swapper;
 1601 
 1602         PROC_LOCK_ASSERT(p, MA_OWNED);
 1603         PROC_SLOCK_ASSERT(p, MA_OWNED);
 1604         wakeup_swapper = 0;
 1605         if (!P_SHOULDSTOP(p)) {
 1606                 FOREACH_THREAD_IN_PROC(p, td) {
 1607                         thread_lock(td);
 1608                         if (TD_IS_SUSPENDED(td))
 1609                                 wakeup_swapper |= thread_unsuspend_one(td, p,
 1610                                     true);
 1611                         else
 1612                                 thread_unlock(td);
 1613                 }
 1614         } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
 1615             p->p_numthreads == p->p_suspcount) {
 1616                 /*
 1617                  * Stopping everything also did the job for the single
 1618                  * threading request. Now we've downgraded to single-threaded,
 1619                  * let it continue.
 1620                  */
 1621                 if (p->p_singlethread->td_proc == p) {
 1622                         thread_lock(p->p_singlethread);
 1623                         wakeup_swapper = thread_unsuspend_one(
 1624                             p->p_singlethread, p, false);
 1625                 }
 1626         }
 1627         if (wakeup_swapper)
 1628                 kick_proc0();
 1629 }
 1630 
 1631 /*
 1632  * End the single threading mode..
 1633  */
 1634 void
 1635 thread_single_end(struct proc *p, int mode)
 1636 {
 1637         struct thread *td;
 1638         int wakeup_swapper;
 1639 
 1640         KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
 1641             mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
 1642             ("invalid mode %d", mode));
 1643         PROC_LOCK_ASSERT(p, MA_OWNED);
 1644         KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
 1645             (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
 1646             ("mode %d does not match P_TOTAL_STOP", mode));
 1647         KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
 1648             ("thread_single_end from other thread %p %p",
 1649             curthread, p->p_singlethread));
 1650         KASSERT(mode != SINGLE_BOUNDARY ||
 1651             (p->p_flag & P_SINGLE_BOUNDARY) != 0,
 1652             ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
 1653         p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
 1654             P_TOTAL_STOP);
 1655         PROC_SLOCK(p);
 1656         p->p_singlethread = NULL;
 1657         wakeup_swapper = 0;
 1658         /*
 1659          * If there are other threads they may now run,
 1660          * unless of course there is a blanket 'stop order'
 1661          * on the process. The single threader must be allowed
 1662          * to continue however as this is a bad place to stop.
 1663          */
 1664         if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
 1665                 FOREACH_THREAD_IN_PROC(p, td) {
 1666                         thread_lock(td);
 1667                         if (TD_IS_SUSPENDED(td)) {
 1668                                 wakeup_swapper |= thread_unsuspend_one(td, p,
 1669                                     true);
 1670                         } else
 1671                                 thread_unlock(td);
 1672                 }
 1673         }
 1674         KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
 1675             ("inconsistent boundary count %d", p->p_boundary_count));
 1676         PROC_SUNLOCK(p);
 1677         if (wakeup_swapper)
 1678                 kick_proc0();
 1679         wakeup(&p->p_flag);
 1680 }
 1681 
 1682 /*
 1683  * Locate a thread by number and return with proc lock held.
 1684  *
 1685  * thread exit establishes proc -> tidhash lock ordering, but lookup
 1686  * takes tidhash first and needs to return locked proc.
 1687  *
 1688  * The problem is worked around by relying on type-safety of both
 1689  * structures and doing the work in 2 steps:
 1690  * - tidhash-locked lookup which saves both thread and proc pointers
 1691  * - proc-locked verification that the found thread still matches
 1692  */
 1693 static bool
 1694 tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp)
 1695 {
 1696 #define RUN_THRESH      16
 1697         struct proc *p;
 1698         struct thread *td;
 1699         int run;
 1700         bool locked;
 1701 
 1702         run = 0;
 1703         rw_rlock(TIDHASHLOCK(tid));
 1704         locked = true;
 1705         LIST_FOREACH(td, TIDHASH(tid), td_hash) {
 1706                 if (td->td_tid != tid) {
 1707                         run++;
 1708                         continue;
 1709                 }
 1710                 p = td->td_proc;
 1711                 if (pid != -1 && p->p_pid != pid) {
 1712                         td = NULL;
 1713                         break;
 1714                 }
 1715                 if (run > RUN_THRESH) {
 1716                         if (rw_try_upgrade(TIDHASHLOCK(tid))) {
 1717                                 LIST_REMOVE(td, td_hash);
 1718                                 LIST_INSERT_HEAD(TIDHASH(td->td_tid),
 1719                                         td, td_hash);
 1720                                 rw_wunlock(TIDHASHLOCK(tid));
 1721                                 locked = false;
 1722                                 break;
 1723                         }
 1724                 }
 1725                 break;
 1726         }
 1727         if (locked)
 1728                 rw_runlock(TIDHASHLOCK(tid));
 1729         if (td == NULL)
 1730                 return (false);
 1731         *pp = p;
 1732         *tdp = td;
 1733         return (true);
 1734 }
 1735 
 1736 struct thread *
 1737 tdfind(lwpid_t tid, pid_t pid)
 1738 {
 1739         struct proc *p;
 1740         struct thread *td;
 1741 
 1742         td = curthread;
 1743         if (td->td_tid == tid) {
 1744                 if (pid != -1 && td->td_proc->p_pid != pid)
 1745                         return (NULL);
 1746                 PROC_LOCK(td->td_proc);
 1747                 return (td);
 1748         }
 1749 
 1750         for (;;) {
 1751                 if (!tdfind_hash(tid, pid, &p, &td))
 1752                         return (NULL);
 1753                 PROC_LOCK(p);
 1754                 if (td->td_tid != tid) {
 1755                         PROC_UNLOCK(p);
 1756                         continue;
 1757                 }
 1758                 if (td->td_proc != p) {
 1759                         PROC_UNLOCK(p);
 1760                         continue;
 1761                 }
 1762                 if (p->p_state == PRS_NEW) {
 1763                         PROC_UNLOCK(p);
 1764                         return (NULL);
 1765                 }
 1766                 return (td);
 1767         }
 1768 }
 1769 
 1770 void
 1771 tidhash_add(struct thread *td)
 1772 {
 1773         rw_wlock(TIDHASHLOCK(td->td_tid));
 1774         LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
 1775         rw_wunlock(TIDHASHLOCK(td->td_tid));
 1776 }
 1777 
 1778 void
 1779 tidhash_remove(struct thread *td)
 1780 {
 1781 
 1782         rw_wlock(TIDHASHLOCK(td->td_tid));
 1783         LIST_REMOVE(td, td_hash);
 1784         rw_wunlock(TIDHASHLOCK(td->td_tid));
 1785 }

Cache object: bebcf68c1afc87425c7549ea6bd61738


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.