The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_thread.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
    3  *  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice(s), this list of conditions and the following disclaimer as
   10  *    the first lines of this file unmodified other than the possible
   11  *    addition of one or more copyright notices.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice(s), this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   26  * DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD: releng/5.4/sys/kern/kern_thread.c 145335 2005-04-20 19:11:07Z cvs2svn $");
   31 
   32 #include <sys/param.h>
   33 #include <sys/systm.h>
   34 #include <sys/kernel.h>
   35 #include <sys/lock.h>
   36 #include <sys/mutex.h>
   37 #include <sys/proc.h>
   38 #include <sys/smp.h>
   39 #include <sys/sysctl.h>
   40 #include <sys/sched.h>
   41 #include <sys/sleepqueue.h>
   42 #include <sys/turnstile.h>
   43 #include <sys/ktr.h>
   44 
   45 #include <vm/vm.h>
   46 #include <vm/vm_extern.h>
   47 #include <vm/uma.h>
   48 
   49 /*
   50  * KSEGRP related storage.
   51  */
   52 static uma_zone_t ksegrp_zone;
   53 static uma_zone_t thread_zone;
   54 
   55 /* DEBUG ONLY */
   56 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
   57 static int thread_debug = 0;
   58 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
   59         &thread_debug, 0, "thread debug");
   60 
   61 int max_threads_per_proc = 1500;
   62 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
   63         &max_threads_per_proc, 0, "Limit on threads per proc");
   64 
   65 int max_groups_per_proc = 1500;
   66 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
   67         &max_groups_per_proc, 0, "Limit on thread groups per proc");
   68 
   69 int max_threads_hits;
   70 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
   71         &max_threads_hits, 0, "");
   72 
   73 int virtual_cpu;
   74 
   75 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
   76 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
   77 struct mtx kse_zombie_lock;
   78 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
   79 
   80 static int
   81 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
   82 {
   83         int error, new_val;
   84         int def_val;
   85 
   86         def_val = mp_ncpus;
   87         if (virtual_cpu == 0)
   88                 new_val = def_val;
   89         else
   90                 new_val = virtual_cpu;
   91         error = sysctl_handle_int(oidp, &new_val, 0, req);
   92         if (error != 0 || req->newptr == NULL)
   93                 return (error);
   94         if (new_val < 0)
   95                 return (EINVAL);
   96         virtual_cpu = new_val;
   97         return (0);
   98 }
   99 
  100 /* DEBUG ONLY */
  101 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
  102         0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
  103         "debug virtual cpus");
  104 
  105 /*
  106  * Thread ID allocator. The allocator keeps track of assigned IDs by
  107  * using a bitmap. The bitmap is created in parts. The parts are linked
  108  * together.
  109  */
  110 typedef u_long tid_bitmap_word;
  111 
  112 #define TID_IDS_PER_PART        1024
  113 #define TID_IDS_PER_IDX         (sizeof(tid_bitmap_word) << 3)
  114 #define TID_BITMAP_SIZE         (TID_IDS_PER_PART / TID_IDS_PER_IDX)
  115 #define TID_MIN                 (PID_MAX + 1)
  116 
  117 struct tid_bitmap_part {
  118         STAILQ_ENTRY(tid_bitmap_part) bmp_next;
  119         tid_bitmap_word bmp_bitmap[TID_BITMAP_SIZE];
  120         lwpid_t         bmp_base;
  121         int             bmp_free;
  122 };
  123 
  124 static STAILQ_HEAD(, tid_bitmap_part) tid_bitmap =
  125     STAILQ_HEAD_INITIALIZER(tid_bitmap);
  126 static uma_zone_t tid_zone;
  127 
  128 struct mtx tid_lock;
  129 MTX_SYSINIT(tid_lock, &tid_lock, "TID lock", MTX_DEF);
  130 
  131 /*
  132  * Prepare a thread for use.
  133  */
  134 static int
  135 thread_ctor(void *mem, int size, void *arg, int flags)
  136 {
  137         struct thread   *td;
  138 
  139         td = (struct thread *)mem;
  140         td->td_state = TDS_INACTIVE;
  141         td->td_oncpu    = NOCPU;
  142 
  143         /*
  144          * Note that td_critnest begins life as 1 because the thread is not
  145          * running and is thereby implicitly waiting to be on the receiving
  146          * end of a context switch.  A context switch must occur inside a
  147          * critical section, and in fact, includes hand-off of the sched_lock.
  148          * After a context switch to a newly created thread, it will release
  149          * sched_lock for the first time, and its td_critnest will hit 0 for
  150          * the first time.  This happens on the far end of a context switch,
  151          * and when it context switches away from itself, it will in fact go
  152          * back into a critical section, and hand off the sched lock to the
  153          * next thread.
  154          */
  155         td->td_critnest = 1;
  156         return (0);
  157 }
  158 
  159 /*
  160  * Reclaim a thread after use.
  161  */
  162 static void
  163 thread_dtor(void *mem, int size, void *arg)
  164 {
  165         struct thread *td;
  166 
  167         td = (struct thread *)mem;
  168 
  169 #ifdef INVARIANTS
  170         /* Verify that this thread is in a safe state to free. */
  171         switch (td->td_state) {
  172         case TDS_INHIBITED:
  173         case TDS_RUNNING:
  174         case TDS_CAN_RUN:
  175         case TDS_RUNQ:
  176                 /*
  177                  * We must never unlink a thread that is in one of
  178                  * these states, because it is currently active.
  179                  */
  180                 panic("bad state for thread unlinking");
  181                 /* NOTREACHED */
  182         case TDS_INACTIVE:
  183                 break;
  184         default:
  185                 panic("bad thread state");
  186                 /* NOTREACHED */
  187         }
  188 #endif
  189         sched_newthread(td);
  190 }
  191 
  192 /*
  193  * Initialize type-stable parts of a thread (when newly created).
  194  */
  195 static int
  196 thread_init(void *mem, int size, int flags)
  197 {
  198         struct thread *td;
  199         struct tid_bitmap_part *bmp, *new;
  200         int bit, idx;
  201 
  202         td = (struct thread *)mem;
  203 
  204         mtx_lock(&tid_lock);
  205         STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) {
  206                 if (bmp->bmp_free)
  207                         break;
  208         }
  209         /* Create a new bitmap if we run out of free bits. */
  210         if (bmp == NULL) {
  211                 mtx_unlock(&tid_lock);
  212                 new = uma_zalloc(tid_zone, M_WAITOK);
  213                 mtx_lock(&tid_lock);
  214                 bmp = STAILQ_LAST(&tid_bitmap, tid_bitmap_part, bmp_next);
  215                 if (bmp == NULL || bmp->bmp_free < TID_IDS_PER_PART/2) {
  216                         /* 1=free, 0=assigned. This way we can use ffsl(). */
  217                         memset(new->bmp_bitmap, ~0U, sizeof(new->bmp_bitmap));
  218                         new->bmp_base = (bmp == NULL) ? TID_MIN :
  219                             bmp->bmp_base + TID_IDS_PER_PART;
  220                         new->bmp_free = TID_IDS_PER_PART;
  221                         STAILQ_INSERT_TAIL(&tid_bitmap, new, bmp_next);
  222                         bmp = new;
  223                         new = NULL;
  224                 }
  225         } else
  226                 new = NULL;
  227         /* We have a bitmap with available IDs. */
  228         idx = 0;
  229         while (idx < TID_BITMAP_SIZE && bmp->bmp_bitmap[idx] == 0UL)
  230                 idx++;
  231         bit = ffsl(bmp->bmp_bitmap[idx]) - 1;
  232         td->td_tid = bmp->bmp_base + idx * TID_IDS_PER_IDX + bit;
  233         bmp->bmp_bitmap[idx] &= ~(1UL << bit);
  234         bmp->bmp_free--;
  235         mtx_unlock(&tid_lock);
  236         if (new != NULL)
  237                 uma_zfree(tid_zone, new);
  238 
  239         vm_thread_new(td, 0);
  240         cpu_thread_setup(td);
  241         td->td_sleepqueue = sleepq_alloc();
  242         td->td_turnstile = turnstile_alloc();
  243         td->td_sched = (struct td_sched *)&td[1];
  244         sched_newthread(td);
  245         return (0);
  246 }
  247 
  248 /*
  249  * Tear down type-stable parts of a thread (just before being discarded).
  250  */
  251 static void
  252 thread_fini(void *mem, int size)
  253 {
  254         struct thread *td;
  255         struct tid_bitmap_part *bmp;
  256         lwpid_t tid;
  257         int bit, idx;
  258 
  259         td = (struct thread *)mem;
  260         turnstile_free(td->td_turnstile);
  261         sleepq_free(td->td_sleepqueue);
  262         vm_thread_dispose(td);
  263 
  264         STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) {
  265                 if (td->td_tid >= bmp->bmp_base &&
  266                     td->td_tid < bmp->bmp_base + TID_IDS_PER_PART)
  267                         break;
  268         }
  269         KASSERT(bmp != NULL, ("No TID bitmap?"));
  270         mtx_lock(&tid_lock);
  271         tid = td->td_tid - bmp->bmp_base;
  272         idx = tid / TID_IDS_PER_IDX;
  273         bit = 1UL << (tid % TID_IDS_PER_IDX);
  274         bmp->bmp_bitmap[idx] |= bit;
  275         bmp->bmp_free++;
  276         mtx_unlock(&tid_lock);
  277 }
  278 
  279 /*
  280  * Initialize type-stable parts of a ksegrp (when newly created).
  281  */
  282 static int
  283 ksegrp_ctor(void *mem, int size, void *arg, int flags)
  284 {
  285         struct ksegrp   *kg;
  286 
  287         kg = (struct ksegrp *)mem;
  288         bzero(mem, size);
  289         kg->kg_sched = (struct kg_sched *)&kg[1];
  290         return (0);
  291 }
  292 
  293 void
  294 ksegrp_link(struct ksegrp *kg, struct proc *p)
  295 {
  296 
  297         TAILQ_INIT(&kg->kg_threads);
  298         TAILQ_INIT(&kg->kg_runq);       /* links with td_runq */
  299         TAILQ_INIT(&kg->kg_slpq);       /* links with td_runq */
  300         TAILQ_INIT(&kg->kg_upcalls);    /* all upcall structure in ksegrp */
  301         kg->kg_proc = p;
  302         /*
  303          * the following counters are in the -zero- section
  304          * and may not need clearing
  305          */
  306         kg->kg_numthreads = 0;
  307         kg->kg_runnable   = 0;
  308         kg->kg_numupcalls = 0;
  309         /* link it in now that it's consistent */
  310         p->p_numksegrps++;
  311         TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
  312 }
  313 
  314 /*
  315  * Called from:
  316  *   thread-exit()
  317  */
  318 void
  319 ksegrp_unlink(struct ksegrp *kg)
  320 {
  321         struct proc *p;
  322 
  323         mtx_assert(&sched_lock, MA_OWNED);
  324         KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
  325         KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
  326 
  327         p = kg->kg_proc;
  328         TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
  329         p->p_numksegrps--;
  330         /*
  331          * Aggregate stats from the KSE
  332          */
  333 }
  334 
  335 /*
  336  * For a newly created process,
  337  * link up all the structures and its initial threads etc.
  338  * called from:
  339  * {arch}/{arch}/machdep.c   ia64_init(), init386() etc.
  340  * proc_dtor() (should go away)
  341  * proc_init()
  342  */
  343 void
  344 proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
  345 {
  346 
  347         TAILQ_INIT(&p->p_ksegrps);           /* all ksegrps in proc */
  348         TAILQ_INIT(&p->p_threads);           /* all threads in proc */
  349         TAILQ_INIT(&p->p_suspended);         /* Threads suspended */
  350         p->p_numksegrps = 0;
  351         p->p_numthreads = 0;
  352 
  353         ksegrp_link(kg, p);
  354         thread_link(td, kg);
  355 }
  356 
  357 /*
  358  * Initialize global thread allocation resources.
  359  */
  360 void
  361 threadinit(void)
  362 {
  363 
  364         thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
  365             thread_ctor, thread_dtor, thread_init, thread_fini,
  366             UMA_ALIGN_CACHE, 0);
  367         tid_zone = uma_zcreate("TID", sizeof(struct tid_bitmap_part),
  368             NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
  369         ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
  370             ksegrp_ctor, NULL, NULL, NULL,
  371             UMA_ALIGN_CACHE, 0);
  372         kseinit();      /* set up kse specific stuff  e.g. upcall zone*/
  373 }
  374 
  375 /*
  376  * Stash an embarasingly extra thread into the zombie thread queue.
  377  */
  378 void
  379 thread_stash(struct thread *td)
  380 {
  381         mtx_lock_spin(&kse_zombie_lock);
  382         TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
  383         mtx_unlock_spin(&kse_zombie_lock);
  384 }
  385 
  386 /*
  387  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
  388  */
  389 void
  390 ksegrp_stash(struct ksegrp *kg)
  391 {
  392         mtx_lock_spin(&kse_zombie_lock);
  393         TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
  394         mtx_unlock_spin(&kse_zombie_lock);
  395 }
  396 
  397 /*
  398  * Reap zombie kse resource.
  399  */
  400 void
  401 thread_reap(void)
  402 {
  403         struct thread *td_first, *td_next;
  404         struct ksegrp *kg_first, * kg_next;
  405 
  406         /*
  407          * Don't even bother to lock if none at this instant,
  408          * we really don't care about the next instant..
  409          */
  410         if ((!TAILQ_EMPTY(&zombie_threads))
  411             || (!TAILQ_EMPTY(&zombie_ksegrps))) {
  412                 mtx_lock_spin(&kse_zombie_lock);
  413                 td_first = TAILQ_FIRST(&zombie_threads);
  414                 kg_first = TAILQ_FIRST(&zombie_ksegrps);
  415                 if (td_first)
  416                         TAILQ_INIT(&zombie_threads);
  417                 if (kg_first)
  418                         TAILQ_INIT(&zombie_ksegrps);
  419                 mtx_unlock_spin(&kse_zombie_lock);
  420                 while (td_first) {
  421                         td_next = TAILQ_NEXT(td_first, td_runq);
  422                         if (td_first->td_ucred)
  423                                 crfree(td_first->td_ucred);
  424                         thread_free(td_first);
  425                         td_first = td_next;
  426                 }
  427                 while (kg_first) {
  428                         kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
  429                         ksegrp_free(kg_first);
  430                         kg_first = kg_next;
  431                 }
  432                 /*
  433                  * there will always be a thread on the list if one of these
  434                  * is there.
  435                  */
  436                 kse_GC();
  437         }
  438 }
  439 
  440 /*
  441  * Allocate a ksegrp.
  442  */
  443 struct ksegrp *
  444 ksegrp_alloc(void)
  445 {
  446         return (uma_zalloc(ksegrp_zone, M_WAITOK));
  447 }
  448 
  449 /*
  450  * Allocate a thread.
  451  */
  452 struct thread *
  453 thread_alloc(void)
  454 {
  455         thread_reap(); /* check if any zombies to get */
  456         return (uma_zalloc(thread_zone, M_WAITOK));
  457 }
  458 
  459 /*
  460  * Deallocate a ksegrp.
  461  */
  462 void
  463 ksegrp_free(struct ksegrp *td)
  464 {
  465         uma_zfree(ksegrp_zone, td);
  466 }
  467 
  468 /*
  469  * Deallocate a thread.
  470  */
  471 void
  472 thread_free(struct thread *td)
  473 {
  474 
  475         cpu_thread_clean(td);
  476         uma_zfree(thread_zone, td);
  477 }
  478 
  479 /*
  480  * Discard the current thread and exit from its context.
  481  * Always called with scheduler locked.
  482  *
  483  * Because we can't free a thread while we're operating under its context,
  484  * push the current thread into our CPU's deadthread holder. This means
  485  * we needn't worry about someone else grabbing our context before we
  486  * do a cpu_throw().  This may not be needed now as we are under schedlock.
  487  * Maybe we can just do a thread_stash() as thr_exit1 does.
  488  */
  489 /*  XXX
  490  * libthr expects its thread exit to return for the last
  491  * thread, meaning that the program is back to non-threaded
  492  * mode I guess. Because we do this (cpu_throw) unconditionally
  493  * here, they have their own version of it. (thr_exit1()) 
  494  * that doesn't do it all if this was the last thread.
  495  * It is also called from thread_suspend_check().
  496  * Of course in the end, they end up coming here through exit1
  497  * anyhow..  After fixing 'thr' to play by the rules we should be able 
  498  * to merge these two functions together.
  499  *
  500  * called from:
  501  * exit1()
  502  * kse_exit()
  503  * thr_exit()
  504  * thread_user_enter()
  505  * thread_userret()
  506  * thread_suspend_check()
  507  */
  508 void
  509 thread_exit(void)
  510 {
  511         struct thread *td;
  512         struct proc *p;
  513         struct ksegrp   *kg;
  514 
  515         td = curthread;
  516         kg = td->td_ksegrp;
  517         p = td->td_proc;
  518 
  519         mtx_assert(&sched_lock, MA_OWNED);
  520         mtx_assert(&Giant, MA_NOTOWNED);
  521         PROC_LOCK_ASSERT(p, MA_OWNED);
  522         KASSERT(p != NULL, ("thread exiting without a process"));
  523         KASSERT(kg != NULL, ("thread exiting without a kse group"));
  524         CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
  525             (long)p->p_pid, p->p_comm);
  526 
  527         if (td->td_standin != NULL) {
  528                 /*
  529                  * Note that we don't need to free the cred here as it
  530                  * is done in thread_reap().
  531                  */
  532                 thread_stash(td->td_standin);
  533                 td->td_standin = NULL;
  534         }
  535 
  536         /*
  537          * drop FPU & debug register state storage, or any other
  538          * architecture specific resources that
  539          * would not be on a new untouched process.
  540          */
  541         cpu_thread_exit(td);    /* XXXSMP */
  542 
  543         /*
  544          * The thread is exiting. scheduler can release its stuff
  545          * and collect stats etc.
  546          */
  547         sched_thread_exit(td);
  548 
  549         /*
  550          * The last thread is left attached to the process
  551          * So that the whole bundle gets recycled. Skip
  552          * all this stuff if we never had threads.
  553          * EXIT clears all sign of other threads when
  554          * it goes to single threading, so the last thread always
  555          * takes the short path.
  556          */
  557         if (p->p_flag & P_HADTHREADS) {
  558                 if (p->p_numthreads > 1) {
  559                         thread_unlink(td);
  560 
  561                         /* XXX first arg not used in 4BSD or ULE */
  562                         sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
  563 
  564                         /*
  565                          * as we are exiting there is room for another
  566                          * to be created.
  567                          */
  568                         if (p->p_maxthrwaits)
  569                                 wakeup(&p->p_numthreads);
  570 
  571                         /*
  572                          * The test below is NOT true if we are the
  573                          * sole exiting thread. P_STOPPED_SNGL is unset
  574                          * in exit1() after it is the only survivor.
  575                          */
  576                         if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
  577                                 if (p->p_numthreads == p->p_suspcount) {
  578                                         thread_unsuspend_one(p->p_singlethread);
  579                                 }
  580                         }
  581 
  582                         /*
  583                          * Because each upcall structure has an owner thread,
  584                          * owner thread exits only when process is in exiting
  585                          * state, so upcall to userland is no longer needed,
  586                          * deleting upcall structure is safe here.
  587                          * So when all threads in a group is exited, all upcalls
  588                          * in the group should be automatically freed.
  589                          *  XXXKSE This is a KSE thing and should be exported
  590                          * there somehow.
  591                          */
  592                         upcall_remove(td);
  593 
  594                         /*
  595                          * If the thread we unlinked above was the last one,
  596                          * then this ksegrp should go away too.
  597                          */
  598                         if (kg->kg_numthreads == 0) {
  599                                 /*
  600                                  * let the scheduler know about this in case
  601                                  * it needs to recover stats or resources.
  602                                  * Theoretically we could let
  603                                  * sched_exit_ksegrp()  do the equivalent of
  604                                  * setting the concurrency to 0
  605                                  * but don't do it yet to avoid changing
  606                                  * the existing scheduler code until we
  607                                  * are ready.
  608                                  * We supply a random other ksegrp
  609                                  * as the recipient of any built up
  610                                  * cpu usage etc. (If the scheduler wants it).
  611                                  * XXXKSE
  612                                  * This is probably not fair so think of
  613                                  * a better answer.
  614                                  */
  615                                 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
  616                                 sched_set_concurrency(kg, 0); /* XXX TEMP */
  617                                 ksegrp_unlink(kg);
  618                                 ksegrp_stash(kg);
  619                         }
  620                         PROC_UNLOCK(p);
  621                         td->td_ksegrp   = NULL;
  622                         PCPU_SET(deadthread, td);
  623                 } else {
  624                         /*
  625                          * The last thread is exiting.. but not through exit()
  626                          * what should we do?
  627                          * Theoretically this can't happen
  628                          * exit1() - clears threading flags before coming here
  629                          * kse_exit() - treats last thread specially
  630                          * thr_exit() - treats last thread specially
  631                          * thread_user_enter() - only if more exist
  632                          * thread_userret() - only if more exist
  633                          * thread_suspend_check() - only if more exist
  634                          */
  635                         panic ("thread_exit: Last thread exiting on its own");
  636                 }
  637         } else {
  638                 /*
  639                  * non threaded process comes here.
  640                  * This includes an EX threaded process that is coming
  641                  * here via exit1(). (exit1 dethreads the proc first).
  642                  */
  643                 PROC_UNLOCK(p);
  644         }
  645         td->td_state = TDS_INACTIVE;
  646         CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
  647         cpu_throw(td, choosethread());
  648         panic("I'm a teapot!");
  649         /* NOTREACHED */
  650 }
  651 
  652 /*
  653  * Do any thread specific cleanups that may be needed in wait()
  654  * called with Giant, proc and schedlock not held.
  655  */
  656 void
  657 thread_wait(struct proc *p)
  658 {
  659         struct thread *td;
  660 
  661         mtx_assert(&Giant, MA_NOTOWNED);
  662         KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
  663         KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
  664         FOREACH_THREAD_IN_PROC(p, td) {
  665                 if (td->td_standin != NULL) {
  666                         if (td->td_standin->td_ucred != NULL) {
  667                                 crfree(td->td_standin->td_ucred);
  668                                 td->td_standin->td_ucred = NULL;
  669                         }
  670                         thread_free(td->td_standin);
  671                         td->td_standin = NULL;
  672                 }
  673                 cpu_thread_clean(td);
  674                 crfree(td->td_ucred);
  675         }
  676         thread_reap();  /* check for zombie threads etc. */
  677 }
  678 
  679 /*
  680  * Link a thread to a process.
  681  * set up anything that needs to be initialized for it to
  682  * be used by the process.
  683  *
  684  * Note that we do not link to the proc's ucred here.
  685  * The thread is linked as if running but no KSE assigned.
  686  * Called from:
  687  *  proc_linkup()
  688  *  thread_schedule_upcall()
  689  *  thr_create()
  690  */
  691 void
  692 thread_link(struct thread *td, struct ksegrp *kg)
  693 {
  694         struct proc *p;
  695 
  696         p = kg->kg_proc;
  697         td->td_state    = TDS_INACTIVE;
  698         td->td_proc     = p;
  699         td->td_ksegrp   = kg;
  700         td->td_flags    = 0;
  701         td->td_kflags   = 0;
  702 
  703         LIST_INIT(&td->td_contested);
  704         callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
  705         TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
  706         TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
  707         p->p_numthreads++;
  708         kg->kg_numthreads++;
  709 }
  710 
  711 /*
  712  * Convert a process with one thread to an unthreaded process.
  713  * Called from:
  714  *  thread_single(exit)  (called from execve and exit)
  715  *  kse_exit()          XXX may need cleaning up wrt KSE stuff
  716  */
  717 void
  718 thread_unthread(struct thread *td)
  719 {
  720         struct proc *p = td->td_proc;
  721 
  722         KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
  723         upcall_remove(td);
  724         p->p_flag &= ~(P_SA|P_HADTHREADS);
  725         td->td_mailbox = NULL;
  726         td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
  727         if (td->td_standin != NULL) {
  728                 thread_stash(td->td_standin);
  729                 td->td_standin = NULL;
  730         }
  731         sched_set_concurrency(td->td_ksegrp, 1);
  732 }
  733 
  734 /*
  735  * Called from:
  736  *  thread_exit()
  737  */
  738 void
  739 thread_unlink(struct thread *td)
  740 {
  741         struct proc *p = td->td_proc;
  742         struct ksegrp *kg = td->td_ksegrp;
  743 
  744         mtx_assert(&sched_lock, MA_OWNED);
  745         TAILQ_REMOVE(&p->p_threads, td, td_plist);
  746         p->p_numthreads--;
  747         TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
  748         kg->kg_numthreads--;
  749         /* could clear a few other things here */
  750         /* Must  NOT clear links to proc and ksegrp! */
  751 }
  752 
  753 /*
  754  * Enforce single-threading.
  755  *
  756  * Returns 1 if the caller must abort (another thread is waiting to
  757  * exit the process or similar). Process is locked!
  758  * Returns 0 when you are successfully the only thread running.
  759  * A process has successfully single threaded in the suspend mode when
  760  * There are no threads in user mode. Threads in the kernel must be
  761  * allowed to continue until they get to the user boundary. They may even
  762  * copy out their return values and data before suspending. They may however be
  763  * accellerated in reaching the user boundary as we will wake up
  764  * any sleeping threads that are interruptable. (PCATCH).
  765  */
  766 int
  767 thread_single(int mode)
  768 {
  769         struct thread *td;
  770         struct thread *td2;
  771         struct proc *p;
  772         int remaining;
  773 
  774         td = curthread;
  775         p = td->td_proc;
  776         mtx_assert(&Giant, MA_NOTOWNED);
  777         PROC_LOCK_ASSERT(p, MA_OWNED);
  778         KASSERT((td != NULL), ("curthread is NULL"));
  779 
  780         if ((p->p_flag & P_HADTHREADS) == 0)
  781                 return (0);
  782 
  783         /* Is someone already single threading? */
  784         if (p->p_singlethread != NULL && p->p_singlethread != td)
  785                 return (1);
  786 
  787         if (mode == SINGLE_EXIT) {
  788                 p->p_flag |= P_SINGLE_EXIT;
  789                 p->p_flag &= ~P_SINGLE_BOUNDARY;
  790         } else {
  791                 p->p_flag &= ~P_SINGLE_EXIT;
  792                 if (mode == SINGLE_BOUNDARY)
  793                         p->p_flag |= P_SINGLE_BOUNDARY;
  794                 else
  795                         p->p_flag &= ~P_SINGLE_BOUNDARY;
  796         }
  797         p->p_flag |= P_STOPPED_SINGLE;
  798         mtx_lock_spin(&sched_lock);
  799         p->p_singlethread = td;
  800         if (mode == SINGLE_EXIT)
  801                 remaining = p->p_numthreads;
  802         else if (mode == SINGLE_BOUNDARY)
  803                 remaining = p->p_numthreads - p->p_boundary_count;
  804         else
  805                 remaining = p->p_numthreads - p->p_suspcount;
  806         while (remaining != 1) {
  807                 FOREACH_THREAD_IN_PROC(p, td2) {
  808                         if (td2 == td)
  809                                 continue;
  810                         td2->td_flags |= TDF_ASTPENDING;
  811                         if (TD_IS_INHIBITED(td2)) {
  812                                 switch (mode) {
  813                                 case SINGLE_EXIT:
  814                                         if (td->td_flags & TDF_DBSUSPEND)
  815                                                 td->td_flags &= ~TDF_DBSUSPEND;
  816                                         if (TD_IS_SUSPENDED(td2))
  817                                                 thread_unsuspend_one(td2);
  818                                         if (TD_ON_SLEEPQ(td2) &&
  819                                             (td2->td_flags & TDF_SINTR))
  820                                                 sleepq_abort(td2);
  821                                         break;
  822                                 case SINGLE_BOUNDARY:
  823                                         if (TD_IS_SUSPENDED(td2) &&
  824                                             !(td2->td_flags & TDF_BOUNDARY))
  825                                                 thread_unsuspend_one(td2);
  826                                         if (TD_ON_SLEEPQ(td2) &&
  827                                             (td2->td_flags & TDF_SINTR))
  828                                                 sleepq_abort(td2);
  829                                         break;
  830                                 default:        
  831                                         if (TD_IS_SUSPENDED(td2))
  832                                                 continue;
  833                                         /*
  834                                          * maybe other inhibitted states too?
  835                                          */
  836                                         if ((td2->td_flags & TDF_SINTR) &&
  837                                             (td2->td_inhibitors &
  838                                             (TDI_SLEEPING | TDI_SWAPPED)))
  839                                                 thread_suspend_one(td2);
  840                                         break;
  841                                 }
  842                         }
  843                 }
  844                 if (mode == SINGLE_EXIT)
  845                         remaining = p->p_numthreads;
  846                 else if (mode == SINGLE_BOUNDARY)
  847                         remaining = p->p_numthreads - p->p_boundary_count;
  848                 else
  849                         remaining = p->p_numthreads - p->p_suspcount;
  850 
  851                 /*
  852                  * Maybe we suspended some threads.. was it enough?
  853                  */
  854                 if (remaining == 1)
  855                         break;
  856 
  857                 /*
  858                  * Wake us up when everyone else has suspended.
  859                  * In the mean time we suspend as well.
  860                  */
  861                 thread_suspend_one(td);
  862                 PROC_UNLOCK(p);
  863                 mi_switch(SW_VOL, NULL);
  864                 mtx_unlock_spin(&sched_lock);
  865                 PROC_LOCK(p);
  866                 mtx_lock_spin(&sched_lock);
  867                 if (mode == SINGLE_EXIT)
  868                         remaining = p->p_numthreads;
  869                 else if (mode == SINGLE_BOUNDARY)
  870                         remaining = p->p_numthreads - p->p_boundary_count;
  871                 else
  872                         remaining = p->p_numthreads - p->p_suspcount;
  873         }
  874         if (mode == SINGLE_EXIT) {
  875                 /*
  876                  * We have gotten rid of all the other threads and we
  877                  * are about to either exit or exec. In either case,
  878                  * we try our utmost  to revert to being a non-threaded
  879                  * process.
  880                  */
  881                 p->p_singlethread = NULL;
  882                 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
  883                 thread_unthread(td);
  884         }
  885         mtx_unlock_spin(&sched_lock);
  886         return (0);
  887 }
  888 
  889 /*
  890  * Called in from locations that can safely check to see
  891  * whether we have to suspend or at least throttle for a
  892  * single-thread event (e.g. fork).
  893  *
  894  * Such locations include userret().
  895  * If the "return_instead" argument is non zero, the thread must be able to
  896  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
  897  *
  898  * The 'return_instead' argument tells the function if it may do a
  899  * thread_exit() or suspend, or whether the caller must abort and back
  900  * out instead.
  901  *
  902  * If the thread that set the single_threading request has set the
  903  * P_SINGLE_EXIT bit in the process flags then this call will never return
  904  * if 'return_instead' is false, but will exit.
  905  *
  906  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
  907  *---------------+--------------------+---------------------
  908  *       0       | returns 0          |   returns 0 or 1
  909  *               | when ST ends       |   immediatly
  910  *---------------+--------------------+---------------------
  911  *       1       | thread exits       |   returns 1
  912  *               |                    |  immediatly
  913  * 0 = thread_exit() or suspension ok,
  914  * other = return error instead of stopping the thread.
  915  *
  916  * While a full suspension is under effect, even a single threading
  917  * thread would be suspended if it made this call (but it shouldn't).
  918  * This call should only be made from places where
  919  * thread_exit() would be safe as that may be the outcome unless
  920  * return_instead is set.
  921  */
  922 int
  923 thread_suspend_check(int return_instead)
  924 {
  925         struct thread *td;
  926         struct proc *p;
  927 
  928         td = curthread;
  929         p = td->td_proc;
  930         mtx_assert(&Giant, MA_NOTOWNED);
  931         PROC_LOCK_ASSERT(p, MA_OWNED);
  932         while (P_SHOULDSTOP(p) ||
  933               ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
  934                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
  935                         KASSERT(p->p_singlethread != NULL,
  936                             ("singlethread not set"));
  937                         /*
  938                          * The only suspension in action is a
  939                          * single-threading. Single threader need not stop.
  940                          * XXX Should be safe to access unlocked
  941                          * as it can only be set to be true by us.
  942                          */
  943                         if (p->p_singlethread == td)
  944                                 return (0);     /* Exempt from stopping. */
  945                 }
  946                 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
  947                         return (1);
  948 
  949                 /* Should we goto user boundary if we didn't come from there? */
  950                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
  951                     (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
  952                         return (1);
  953 
  954                 mtx_lock_spin(&sched_lock);
  955                 thread_stopped(p);
  956                 /*
  957                  * If the process is waiting for us to exit,
  958                  * this thread should just suicide.
  959                  * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
  960                  */
  961                 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
  962                         thread_exit();
  963 
  964                 /*
  965                  * When a thread suspends, it just
  966                  * moves to the processes's suspend queue
  967                  * and stays there.
  968                  */
  969                 thread_suspend_one(td);
  970                 if (return_instead == 0) {
  971                         p->p_boundary_count++;
  972                         td->td_flags |= TDF_BOUNDARY;
  973                 }
  974                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
  975                         if (p->p_numthreads == p->p_suspcount) 
  976                                 thread_unsuspend_one(p->p_singlethread);
  977                 }
  978                 PROC_UNLOCK(p);
  979                 mi_switch(SW_INVOL, NULL);
  980                 if (return_instead == 0) {
  981                         p->p_boundary_count--;
  982                         td->td_flags &= ~TDF_BOUNDARY;
  983                 }
  984                 mtx_unlock_spin(&sched_lock);
  985                 PROC_LOCK(p);
  986         }
  987         return (0);
  988 }
  989 
  990 void
  991 thread_suspend_one(struct thread *td)
  992 {
  993         struct proc *p = td->td_proc;
  994 
  995         mtx_assert(&sched_lock, MA_OWNED);
  996         PROC_LOCK_ASSERT(p, MA_OWNED);
  997         KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
  998         p->p_suspcount++;
  999         TD_SET_SUSPENDED(td);
 1000         TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
 1001         /*
 1002          * Hack: If we are suspending but are on the sleep queue
 1003          * then we are in msleep or the cv equivalent. We
 1004          * want to look like we have two Inhibitors.
 1005          * May already be set.. doesn't matter.
 1006          */
 1007         if (TD_ON_SLEEPQ(td))
 1008                 TD_SET_SLEEPING(td);
 1009 }
 1010 
 1011 void
 1012 thread_unsuspend_one(struct thread *td)
 1013 {
 1014         struct proc *p = td->td_proc;
 1015 
 1016         mtx_assert(&sched_lock, MA_OWNED);
 1017         PROC_LOCK_ASSERT(p, MA_OWNED);
 1018         TAILQ_REMOVE(&p->p_suspended, td, td_runq);
 1019         TD_CLR_SUSPENDED(td);
 1020         p->p_suspcount--;
 1021         setrunnable(td);
 1022 }
 1023 
 1024 /*
 1025  * Allow all threads blocked by single threading to continue running.
 1026  */
 1027 void
 1028 thread_unsuspend(struct proc *p)
 1029 {
 1030         struct thread *td;
 1031 
 1032         mtx_assert(&sched_lock, MA_OWNED);
 1033         PROC_LOCK_ASSERT(p, MA_OWNED);
 1034         if (!P_SHOULDSTOP(p)) {
 1035                 while ((td = TAILQ_FIRST(&p->p_suspended))) {
 1036                         thread_unsuspend_one(td);
 1037                 }
 1038         } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
 1039             (p->p_numthreads == p->p_suspcount)) {
 1040                 /*
 1041                  * Stopping everything also did the job for the single
 1042                  * threading request. Now we've downgraded to single-threaded,
 1043                  * let it continue.
 1044                  */
 1045                 thread_unsuspend_one(p->p_singlethread);
 1046         }
 1047 }
 1048 
 1049 /*
 1050  * End the single threading mode..
 1051  */
 1052 void
 1053 thread_single_end(void)
 1054 {
 1055         struct thread *td;
 1056         struct proc *p;
 1057 
 1058         td = curthread;
 1059         p = td->td_proc;
 1060         PROC_LOCK_ASSERT(p, MA_OWNED);
 1061         p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
 1062         mtx_lock_spin(&sched_lock);
 1063         p->p_singlethread = NULL;
 1064         /*
 1065          * If there are other threads they mey now run,
 1066          * unless of course there is a blanket 'stop order'
 1067          * on the process. The single threader must be allowed
 1068          * to continue however as this is a bad place to stop.
 1069          */
 1070         if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
 1071                 while (( td = TAILQ_FIRST(&p->p_suspended))) {
 1072                         thread_unsuspend_one(td);
 1073                 }
 1074         }
 1075         mtx_unlock_spin(&sched_lock);
 1076 }
 1077 
 1078 /*
 1079  * Called before going into an interruptible sleep to see if we have been
 1080  * interrupted or requested to exit.
 1081  */
 1082 int
 1083 thread_sleep_check(struct thread *td)
 1084 {
 1085         struct proc *p;
 1086 
 1087         p = td->td_proc;
 1088         mtx_assert(&sched_lock, MA_OWNED);
 1089         if (p->p_flag & P_HADTHREADS) {
 1090                 if (p->p_singlethread != td) {
 1091                         if (p->p_flag & P_SINGLE_EXIT)
 1092                                 return (EINTR);
 1093                         if (p->p_flag & P_SINGLE_BOUNDARY)
 1094                                 return (ERESTART);
 1095                 }
 1096                 if (td->td_flags & TDF_INTERRUPT)
 1097                         return (td->td_intrval);
 1098         }
 1099         return (0);
 1100 }

Cache object: 2018c7b7a8b65e5d72123e1047181685


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.