The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_thread.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
    3  *  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice(s), this list of conditions and the following disclaimer as
   10  *    the first lines of this file unmodified other than the possible
   11  *    addition of one or more copyright notices.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice(s), this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   26  * DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD: releng/6.1/sys/kern/kern_thread.c 158179 2006-04-30 16:44:43Z cvs2svn $");
   31 
   32 #include <sys/param.h>
   33 #include <sys/systm.h>
   34 #include <sys/kernel.h>
   35 #include <sys/lock.h>
   36 #include <sys/mutex.h>
   37 #include <sys/proc.h>
   38 #include <sys/resourcevar.h>
   39 #include <sys/smp.h>
   40 #include <sys/sysctl.h>
   41 #include <sys/sched.h>
   42 #include <sys/sleepqueue.h>
   43 #include <sys/turnstile.h>
   44 #include <sys/ktr.h>
   45 #include <sys/umtx.h>
   46 
   47 #include <vm/vm.h>
   48 #include <vm/vm_extern.h>
   49 #include <vm/uma.h>
   50 
   51 /*
   52  * KSEGRP related storage.
   53  */
   54 static uma_zone_t ksegrp_zone;
   55 static uma_zone_t thread_zone;
   56 
   57 /* DEBUG ONLY */
   58 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
   59 static int thread_debug = 0;
   60 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
   61         &thread_debug, 0, "thread debug");
   62 
   63 int max_threads_per_proc = 1500;
   64 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
   65         &max_threads_per_proc, 0, "Limit on threads per proc");
   66 
   67 int max_groups_per_proc = 1500;
   68 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
   69         &max_groups_per_proc, 0, "Limit on thread groups per proc");
   70 
   71 int max_threads_hits;
   72 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
   73         &max_threads_hits, 0, "");
   74 
   75 int virtual_cpu;
   76 
   77 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
   78 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
   79 struct mtx kse_zombie_lock;
   80 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
   81 
   82 static int
   83 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
   84 {
   85         int error, new_val;
   86         int def_val;
   87 
   88         def_val = mp_ncpus;
   89         if (virtual_cpu == 0)
   90                 new_val = def_val;
   91         else
   92                 new_val = virtual_cpu;
   93         error = sysctl_handle_int(oidp, &new_val, 0, req);
   94         if (error != 0 || req->newptr == NULL)
   95                 return (error);
   96         if (new_val < 0)
   97                 return (EINVAL);
   98         virtual_cpu = new_val;
   99         return (0);
  100 }
  101 
  102 /* DEBUG ONLY */
  103 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
  104         0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
  105         "debug virtual cpus");
  106 
  107 struct mtx tid_lock;
  108 static struct unrhdr *tid_unrhdr;
  109 
  110 /*
  111  * Prepare a thread for use.
  112  */
  113 static int
  114 thread_ctor(void *mem, int size, void *arg, int flags)
  115 {
  116         struct thread   *td;
  117 
  118         td = (struct thread *)mem;
  119         td->td_state = TDS_INACTIVE;
  120         td->td_oncpu = NOCPU;
  121 
  122         td->td_tid = alloc_unr(tid_unrhdr);
  123 
  124         /*
  125          * Note that td_critnest begins life as 1 because the thread is not
  126          * running and is thereby implicitly waiting to be on the receiving
  127          * end of a context switch.  A context switch must occur inside a
  128          * critical section, and in fact, includes hand-off of the sched_lock.
  129          * After a context switch to a newly created thread, it will release
  130          * sched_lock for the first time, and its td_critnest will hit 0 for
  131          * the first time.  This happens on the far end of a context switch,
  132          * and when it context switches away from itself, it will in fact go
  133          * back into a critical section, and hand off the sched lock to the
  134          * next thread.
  135          */
  136         td->td_critnest = 1;
  137         return (0);
  138 }
  139 
  140 /*
  141  * Reclaim a thread after use.
  142  */
  143 static void
  144 thread_dtor(void *mem, int size, void *arg)
  145 {
  146         struct thread *td;
  147 
  148         td = (struct thread *)mem;
  149 
  150 #ifdef INVARIANTS
  151         /* Verify that this thread is in a safe state to free. */
  152         switch (td->td_state) {
  153         case TDS_INHIBITED:
  154         case TDS_RUNNING:
  155         case TDS_CAN_RUN:
  156         case TDS_RUNQ:
  157                 /*
  158                  * We must never unlink a thread that is in one of
  159                  * these states, because it is currently active.
  160                  */
  161                 panic("bad state for thread unlinking");
  162                 /* NOTREACHED */
  163         case TDS_INACTIVE:
  164                 break;
  165         default:
  166                 panic("bad thread state");
  167                 /* NOTREACHED */
  168         }
  169 #endif
  170 
  171         free_unr(tid_unrhdr, td->td_tid);
  172         sched_newthread(td);
  173 }
  174 
  175 /*
  176  * Initialize type-stable parts of a thread (when newly created).
  177  */
  178 static int
  179 thread_init(void *mem, int size, int flags)
  180 {
  181         struct thread *td;
  182 
  183         td = (struct thread *)mem;
  184 
  185         vm_thread_new(td, 0);
  186         cpu_thread_setup(td);
  187         td->td_sleepqueue = sleepq_alloc();
  188         td->td_turnstile = turnstile_alloc();
  189         td->td_umtxq = umtxq_alloc();
  190         td->td_sched = (struct td_sched *)&td[1];
  191         sched_newthread(td);
  192         return (0);
  193 }
  194 
  195 /*
  196  * Tear down type-stable parts of a thread (just before being discarded).
  197  */
  198 static void
  199 thread_fini(void *mem, int size)
  200 {
  201         struct thread *td;
  202 
  203         td = (struct thread *)mem;
  204         turnstile_free(td->td_turnstile);
  205         sleepq_free(td->td_sleepqueue);
  206         umtxq_free(td->td_umtxq);
  207         vm_thread_dispose(td);
  208 }
  209 
  210 /*
  211  * Initialize type-stable parts of a ksegrp (when newly created).
  212  */
  213 static int
  214 ksegrp_ctor(void *mem, int size, void *arg, int flags)
  215 {
  216         struct ksegrp   *kg;
  217 
  218         kg = (struct ksegrp *)mem;
  219         bzero(mem, size);
  220         kg->kg_sched = (struct kg_sched *)&kg[1];
  221         return (0);
  222 }
  223 
  224 void
  225 ksegrp_link(struct ksegrp *kg, struct proc *p)
  226 {
  227 
  228         TAILQ_INIT(&kg->kg_threads);
  229         TAILQ_INIT(&kg->kg_runq);       /* links with td_runq */
  230         TAILQ_INIT(&kg->kg_upcalls);    /* all upcall structure in ksegrp */
  231         kg->kg_proc = p;
  232         /*
  233          * the following counters are in the -zero- section
  234          * and may not need clearing
  235          */
  236         kg->kg_numthreads = 0;
  237         kg->kg_numupcalls = 0;
  238         /* link it in now that it's consistent */
  239         p->p_numksegrps++;
  240         TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
  241 }
  242 
  243 /*
  244  * Called from:
  245  *   thread-exit()
  246  */
  247 void
  248 ksegrp_unlink(struct ksegrp *kg)
  249 {
  250         struct proc *p;
  251 
  252         mtx_assert(&sched_lock, MA_OWNED);
  253         KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
  254         KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
  255 
  256         p = kg->kg_proc;
  257         TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
  258         p->p_numksegrps--;
  259         /*
  260          * Aggregate stats from the KSE
  261          */
  262         if (p->p_procscopegrp == kg)
  263                 p->p_procscopegrp = NULL;
  264 }
  265 
  266 /*
  267  * For a newly created process,
  268  * link up all the structures and its initial threads etc.
  269  * called from:
  270  * {arch}/{arch}/machdep.c   ia64_init(), init386() etc.
  271  * proc_dtor() (should go away)
  272  * proc_init()
  273  */
  274 void
  275 proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
  276 {
  277 
  278         TAILQ_INIT(&p->p_ksegrps);           /* all ksegrps in proc */
  279         TAILQ_INIT(&p->p_threads);           /* all threads in proc */
  280         TAILQ_INIT(&p->p_suspended);         /* Threads suspended */
  281         p->p_numksegrps = 0;
  282         p->p_numthreads = 0;
  283 
  284         ksegrp_link(kg, p);
  285         thread_link(td, kg);
  286 }
  287 
  288 /*
  289  * Initialize global thread allocation resources.
  290  */
  291 void
  292 threadinit(void)
  293 {
  294 
  295         mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
  296         tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock);
  297 
  298         thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
  299             thread_ctor, thread_dtor, thread_init, thread_fini,
  300             UMA_ALIGN_CACHE, 0);
  301         ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
  302             ksegrp_ctor, NULL, NULL, NULL,
  303             UMA_ALIGN_CACHE, 0);
  304         kseinit();      /* set up kse specific stuff  e.g. upcall zone*/
  305 }
  306 
  307 /*
  308  * Stash an embarasingly extra thread into the zombie thread queue.
  309  */
  310 void
  311 thread_stash(struct thread *td)
  312 {
  313         mtx_lock_spin(&kse_zombie_lock);
  314         TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
  315         mtx_unlock_spin(&kse_zombie_lock);
  316 }
  317 
  318 /*
  319  * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
  320  */
  321 void
  322 ksegrp_stash(struct ksegrp *kg)
  323 {
  324         mtx_lock_spin(&kse_zombie_lock);
  325         TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
  326         mtx_unlock_spin(&kse_zombie_lock);
  327 }
  328 
  329 /*
  330  * Reap zombie kse resource.
  331  */
  332 void
  333 thread_reap(void)
  334 {
  335         struct thread *td_first, *td_next;
  336         struct ksegrp *kg_first, * kg_next;
  337 
  338         /*
  339          * Don't even bother to lock if none at this instant,
  340          * we really don't care about the next instant..
  341          */
  342         if ((!TAILQ_EMPTY(&zombie_threads))
  343             || (!TAILQ_EMPTY(&zombie_ksegrps))) {
  344                 mtx_lock_spin(&kse_zombie_lock);
  345                 td_first = TAILQ_FIRST(&zombie_threads);
  346                 kg_first = TAILQ_FIRST(&zombie_ksegrps);
  347                 if (td_first)
  348                         TAILQ_INIT(&zombie_threads);
  349                 if (kg_first)
  350                         TAILQ_INIT(&zombie_ksegrps);
  351                 mtx_unlock_spin(&kse_zombie_lock);
  352                 while (td_first) {
  353                         td_next = TAILQ_NEXT(td_first, td_runq);
  354                         if (td_first->td_ucred)
  355                                 crfree(td_first->td_ucred);
  356                         thread_free(td_first);
  357                         td_first = td_next;
  358                 }
  359                 while (kg_first) {
  360                         kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
  361                         ksegrp_free(kg_first);
  362                         kg_first = kg_next;
  363                 }
  364                 /*
  365                  * there will always be a thread on the list if one of these
  366                  * is there.
  367                  */
  368                 kse_GC();
  369         }
  370 }
  371 
  372 /*
  373  * Allocate a ksegrp.
  374  */
  375 struct ksegrp *
  376 ksegrp_alloc(void)
  377 {
  378         return (uma_zalloc(ksegrp_zone, M_WAITOK));
  379 }
  380 
  381 /*
  382  * Allocate a thread.
  383  */
  384 struct thread *
  385 thread_alloc(void)
  386 {
  387         thread_reap(); /* check if any zombies to get */
  388         return (uma_zalloc(thread_zone, M_WAITOK));
  389 }
  390 
  391 /*
  392  * Deallocate a ksegrp.
  393  */
  394 void
  395 ksegrp_free(struct ksegrp *td)
  396 {
  397         uma_zfree(ksegrp_zone, td);
  398 }
  399 
  400 /*
  401  * Deallocate a thread.
  402  */
  403 void
  404 thread_free(struct thread *td)
  405 {
  406 
  407         cpu_thread_clean(td);
  408         uma_zfree(thread_zone, td);
  409 }
  410 
  411 /*
  412  * Discard the current thread and exit from its context.
  413  * Always called with scheduler locked.
  414  *
  415  * Because we can't free a thread while we're operating under its context,
  416  * push the current thread into our CPU's deadthread holder. This means
  417  * we needn't worry about someone else grabbing our context before we
  418  * do a cpu_throw().  This may not be needed now as we are under schedlock.
  419  * Maybe we can just do a thread_stash() as thr_exit1 does.
  420  */
  421 /*  XXX
  422  * libthr expects its thread exit to return for the last
  423  * thread, meaning that the program is back to non-threaded
  424  * mode I guess. Because we do this (cpu_throw) unconditionally
  425  * here, they have their own version of it. (thr_exit1()) 
  426  * that doesn't do it all if this was the last thread.
  427  * It is also called from thread_suspend_check().
  428  * Of course in the end, they end up coming here through exit1
  429  * anyhow..  After fixing 'thr' to play by the rules we should be able 
  430  * to merge these two functions together.
  431  *
  432  * called from:
  433  * exit1()
  434  * kse_exit()
  435  * thr_exit()
  436  * thread_user_enter()
  437  * thread_userret()
  438  * thread_suspend_check()
  439  */
  440 void
  441 thread_exit(void)
  442 {
  443         struct bintime new_switchtime;
  444         struct thread *td;
  445         struct proc *p;
  446         struct ksegrp   *kg;
  447 
  448         td = curthread;
  449         kg = td->td_ksegrp;
  450         p = td->td_proc;
  451 
  452         mtx_assert(&sched_lock, MA_OWNED);
  453         mtx_assert(&Giant, MA_NOTOWNED);
  454         PROC_LOCK_ASSERT(p, MA_OWNED);
  455         KASSERT(p != NULL, ("thread exiting without a process"));
  456         KASSERT(kg != NULL, ("thread exiting without a kse group"));
  457         CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
  458             (long)p->p_pid, p->p_comm);
  459 
  460         if (td->td_standin != NULL) {
  461                 /*
  462                  * Note that we don't need to free the cred here as it
  463                  * is done in thread_reap().
  464                  */
  465                 thread_stash(td->td_standin);
  466                 td->td_standin = NULL;
  467         }
  468 
  469         /*
  470          * drop FPU & debug register state storage, or any other
  471          * architecture specific resources that
  472          * would not be on a new untouched process.
  473          */
  474         cpu_thread_exit(td);    /* XXXSMP */
  475 
  476         /*
  477          * The thread is exiting. scheduler can release its stuff
  478          * and collect stats etc.
  479          */
  480         sched_thread_exit(td);
  481 
  482         /* Do the same timestamp bookkeeping that mi_switch() would do. */
  483         binuptime(&new_switchtime);
  484         bintime_add(&p->p_rux.rux_runtime, &new_switchtime);
  485         bintime_sub(&p->p_rux.rux_runtime, PCPU_PTR(switchtime));
  486         PCPU_SET(switchtime, new_switchtime);
  487         PCPU_SET(switchticks, ticks);
  488         cnt.v_swtch++;
  489 
  490         /* Add our usage into the usage of all our children. */
  491         if (p->p_numthreads == 1)
  492                 ruadd(p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
  493 
  494         /*
  495          * The last thread is left attached to the process
  496          * So that the whole bundle gets recycled. Skip
  497          * all this stuff if we never had threads.
  498          * EXIT clears all sign of other threads when
  499          * it goes to single threading, so the last thread always
  500          * takes the short path.
  501          */
  502         if (p->p_flag & P_HADTHREADS) {
  503                 if (p->p_numthreads > 1) {
  504                         thread_unlink(td);
  505 
  506                         /* XXX first arg not used in 4BSD or ULE */
  507                         sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
  508 
  509                         /*
  510                          * The test below is NOT true if we are the
  511                          * sole exiting thread. P_STOPPED_SNGL is unset
  512                          * in exit1() after it is the only survivor.
  513                          */
  514                         if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
  515                                 if (p->p_numthreads == p->p_suspcount) {
  516                                         thread_unsuspend_one(p->p_singlethread);
  517                                 }
  518                         }
  519 
  520                         /*
  521                          * Because each upcall structure has an owner thread,
  522                          * owner thread exits only when process is in exiting
  523                          * state, so upcall to userland is no longer needed,
  524                          * deleting upcall structure is safe here.
  525                          * So when all threads in a group is exited, all upcalls
  526                          * in the group should be automatically freed.
  527                          *  XXXKSE This is a KSE thing and should be exported
  528                          * there somehow.
  529                          */
  530                         upcall_remove(td);
  531 
  532                         /*
  533                          * If the thread we unlinked above was the last one,
  534                          * then this ksegrp should go away too.
  535                          */
  536                         if (kg->kg_numthreads == 0) {
  537                                 /*
  538                                  * let the scheduler know about this in case
  539                                  * it needs to recover stats or resources.
  540                                  * Theoretically we could let
  541                                  * sched_exit_ksegrp()  do the equivalent of
  542                                  * setting the concurrency to 0
  543                                  * but don't do it yet to avoid changing
  544                                  * the existing scheduler code until we
  545                                  * are ready.
  546                                  * We supply a random other ksegrp
  547                                  * as the recipient of any built up
  548                                  * cpu usage etc. (If the scheduler wants it).
  549                                  * XXXKSE
  550                                  * This is probably not fair so think of
  551                                  * a better answer.
  552                                  */
  553                                 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
  554                                 sched_set_concurrency(kg, 0); /* XXX TEMP */
  555                                 ksegrp_unlink(kg);
  556                                 ksegrp_stash(kg);
  557                         }
  558                         PROC_UNLOCK(p);
  559                         td->td_ksegrp   = NULL;
  560                         PCPU_SET(deadthread, td);
  561                 } else {
  562                         /*
  563                          * The last thread is exiting.. but not through exit()
  564                          * what should we do?
  565                          * Theoretically this can't happen
  566                          * exit1() - clears threading flags before coming here
  567                          * kse_exit() - treats last thread specially
  568                          * thr_exit() - treats last thread specially
  569                          * thread_user_enter() - only if more exist
  570                          * thread_userret() - only if more exist
  571                          * thread_suspend_check() - only if more exist
  572                          */
  573                         panic ("thread_exit: Last thread exiting on its own");
  574                 }
  575         } else {
  576                 /*
  577                  * non threaded process comes here.
  578                  * This includes an EX threaded process that is coming
  579                  * here via exit1(). (exit1 dethreads the proc first).
  580                  */
  581                 PROC_UNLOCK(p);
  582         }
  583         td->td_state = TDS_INACTIVE;
  584         CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
  585         cpu_throw(td, choosethread());
  586         panic("I'm a teapot!");
  587         /* NOTREACHED */
  588 }
  589 
  590 /*
  591  * Do any thread specific cleanups that may be needed in wait()
  592  * called with Giant, proc and schedlock not held.
  593  */
  594 void
  595 thread_wait(struct proc *p)
  596 {
  597         struct thread *td;
  598 
  599         mtx_assert(&Giant, MA_NOTOWNED);
  600         KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
  601         KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
  602         FOREACH_THREAD_IN_PROC(p, td) {
  603                 if (td->td_standin != NULL) {
  604                         if (td->td_standin->td_ucred != NULL) {
  605                                 crfree(td->td_standin->td_ucred);
  606                                 td->td_standin->td_ucred = NULL;
  607                         }
  608                         thread_free(td->td_standin);
  609                         td->td_standin = NULL;
  610                 }
  611                 cpu_thread_clean(td);
  612                 crfree(td->td_ucred);
  613         }
  614         thread_reap();  /* check for zombie threads etc. */
  615 }
  616 
  617 /*
  618  * Link a thread to a process.
  619  * set up anything that needs to be initialized for it to
  620  * be used by the process.
  621  *
  622  * Note that we do not link to the proc's ucred here.
  623  * The thread is linked as if running but no KSE assigned.
  624  * Called from:
  625  *  proc_linkup()
  626  *  thread_schedule_upcall()
  627  *  thr_create()
  628  */
  629 void
  630 thread_link(struct thread *td, struct ksegrp *kg)
  631 {
  632         struct proc *p;
  633 
  634         p = kg->kg_proc;
  635         td->td_state    = TDS_INACTIVE;
  636         td->td_proc     = p;
  637         td->td_ksegrp   = kg;
  638         td->td_flags    = 0;
  639         td->td_kflags   = 0;
  640 
  641         LIST_INIT(&td->td_contested);
  642         callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
  643         TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
  644         TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
  645         p->p_numthreads++;
  646         kg->kg_numthreads++;
  647 }
  648 
  649 /*
  650  * Convert a process with one thread to an unthreaded process.
  651  * Called from:
  652  *  thread_single(exit)  (called from execve and exit)
  653  *  kse_exit()          XXX may need cleaning up wrt KSE stuff
  654  */
  655 void
  656 thread_unthread(struct thread *td)
  657 {
  658         struct proc *p = td->td_proc;
  659 
  660         KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
  661         upcall_remove(td);
  662         p->p_flag &= ~(P_SA|P_HADTHREADS);
  663         td->td_mailbox = NULL;
  664         td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
  665         if (td->td_standin != NULL) {
  666                 thread_stash(td->td_standin);
  667                 td->td_standin = NULL;
  668         }
  669         sched_set_concurrency(td->td_ksegrp, 1);
  670 }
  671 
  672 /*
  673  * Called from:
  674  *  thread_exit()
  675  */
  676 void
  677 thread_unlink(struct thread *td)
  678 {
  679         struct proc *p = td->td_proc;
  680         struct ksegrp *kg = td->td_ksegrp;
  681 
  682         mtx_assert(&sched_lock, MA_OWNED);
  683         TAILQ_REMOVE(&p->p_threads, td, td_plist);
  684         p->p_numthreads--;
  685         TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
  686         kg->kg_numthreads--;
  687         /* could clear a few other things here */
  688         /* Must  NOT clear links to proc and ksegrp! */
  689 }
  690 
  691 /*
  692  * Enforce single-threading.
  693  *
  694  * Returns 1 if the caller must abort (another thread is waiting to
  695  * exit the process or similar). Process is locked!
  696  * Returns 0 when you are successfully the only thread running.
  697  * A process has successfully single threaded in the suspend mode when
  698  * There are no threads in user mode. Threads in the kernel must be
  699  * allowed to continue until they get to the user boundary. They may even
  700  * copy out their return values and data before suspending. They may however be
  701  * accellerated in reaching the user boundary as we will wake up
  702  * any sleeping threads that are interruptable. (PCATCH).
  703  */
  704 int
  705 thread_single(int mode)
  706 {
  707         struct thread *td;
  708         struct thread *td2;
  709         struct proc *p;
  710         int remaining;
  711 
  712         td = curthread;
  713         p = td->td_proc;
  714         mtx_assert(&Giant, MA_NOTOWNED);
  715         PROC_LOCK_ASSERT(p, MA_OWNED);
  716         KASSERT((td != NULL), ("curthread is NULL"));
  717 
  718         if ((p->p_flag & P_HADTHREADS) == 0)
  719                 return (0);
  720 
  721         /* Is someone already single threading? */
  722         if (p->p_singlethread != NULL && p->p_singlethread != td)
  723                 return (1);
  724 
  725         if (mode == SINGLE_EXIT) {
  726                 p->p_flag |= P_SINGLE_EXIT;
  727                 p->p_flag &= ~P_SINGLE_BOUNDARY;
  728         } else {
  729                 p->p_flag &= ~P_SINGLE_EXIT;
  730                 if (mode == SINGLE_BOUNDARY)
  731                         p->p_flag |= P_SINGLE_BOUNDARY;
  732                 else
  733                         p->p_flag &= ~P_SINGLE_BOUNDARY;
  734         }
  735         p->p_flag |= P_STOPPED_SINGLE;
  736         mtx_lock_spin(&sched_lock);
  737         p->p_singlethread = td;
  738         if (mode == SINGLE_EXIT)
  739                 remaining = p->p_numthreads;
  740         else if (mode == SINGLE_BOUNDARY)
  741                 remaining = p->p_numthreads - p->p_boundary_count;
  742         else
  743                 remaining = p->p_numthreads - p->p_suspcount;
  744         while (remaining != 1) {
  745                 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
  746                         goto stopme;
  747                 FOREACH_THREAD_IN_PROC(p, td2) {
  748                         if (td2 == td)
  749                                 continue;
  750                         td2->td_flags |= TDF_ASTPENDING;
  751                         if (TD_IS_INHIBITED(td2)) {
  752                                 switch (mode) {
  753                                 case SINGLE_EXIT:
  754                                         if (td->td_flags & TDF_DBSUSPEND)
  755                                                 td->td_flags &= ~TDF_DBSUSPEND;
  756                                         if (TD_IS_SUSPENDED(td2))
  757                                                 thread_unsuspend_one(td2);
  758                                         if (TD_ON_SLEEPQ(td2) &&
  759                                             (td2->td_flags & TDF_SINTR))
  760                                                 sleepq_abort(td2, EINTR);
  761                                         break;
  762                                 case SINGLE_BOUNDARY:
  763                                         if (TD_IS_SUSPENDED(td2) &&
  764                                             !(td2->td_flags & TDF_BOUNDARY))
  765                                                 thread_unsuspend_one(td2);
  766                                         if (TD_ON_SLEEPQ(td2) &&
  767                                             (td2->td_flags & TDF_SINTR))
  768                                                 sleepq_abort(td2, ERESTART);
  769                                         break;
  770                                 default:        
  771                                         if (TD_IS_SUSPENDED(td2))
  772                                                 continue;
  773                                         /*
  774                                          * maybe other inhibitted states too?
  775                                          */
  776                                         if ((td2->td_flags & TDF_SINTR) &&
  777                                             (td2->td_inhibitors &
  778                                             (TDI_SLEEPING | TDI_SWAPPED)))
  779                                                 thread_suspend_one(td2);
  780                                         break;
  781                                 }
  782                         }
  783 #ifdef SMP
  784                         else if (TD_IS_RUNNING(td2) && td != td2) {
  785                                 forward_signal(td2);
  786                         }
  787 #endif
  788                 }
  789                 if (mode == SINGLE_EXIT)
  790                         remaining = p->p_numthreads;
  791                 else if (mode == SINGLE_BOUNDARY)
  792                         remaining = p->p_numthreads - p->p_boundary_count;
  793                 else
  794                         remaining = p->p_numthreads - p->p_suspcount;
  795 
  796                 /*
  797                  * Maybe we suspended some threads.. was it enough?
  798                  */
  799                 if (remaining == 1)
  800                         break;
  801 
  802 stopme:
  803                 /*
  804                  * Wake us up when everyone else has suspended.
  805                  * In the mean time we suspend as well.
  806                  */
  807                 thread_stopped(p);
  808                 thread_suspend_one(td);
  809                 PROC_UNLOCK(p);
  810                 mi_switch(SW_VOL, NULL);
  811                 mtx_unlock_spin(&sched_lock);
  812                 PROC_LOCK(p);
  813                 mtx_lock_spin(&sched_lock);
  814                 if (mode == SINGLE_EXIT)
  815                         remaining = p->p_numthreads;
  816                 else if (mode == SINGLE_BOUNDARY)
  817                         remaining = p->p_numthreads - p->p_boundary_count;
  818                 else
  819                         remaining = p->p_numthreads - p->p_suspcount;
  820         }
  821         if (mode == SINGLE_EXIT) {
  822                 /*
  823                  * We have gotten rid of all the other threads and we
  824                  * are about to either exit or exec. In either case,
  825                  * we try our utmost  to revert to being a non-threaded
  826                  * process.
  827                  */
  828                 p->p_singlethread = NULL;
  829                 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
  830                 thread_unthread(td);
  831         }
  832         mtx_unlock_spin(&sched_lock);
  833         return (0);
  834 }
  835 
  836 /*
  837  * Called in from locations that can safely check to see
  838  * whether we have to suspend or at least throttle for a
  839  * single-thread event (e.g. fork).
  840  *
  841  * Such locations include userret().
  842  * If the "return_instead" argument is non zero, the thread must be able to
  843  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
  844  *
  845  * The 'return_instead' argument tells the function if it may do a
  846  * thread_exit() or suspend, or whether the caller must abort and back
  847  * out instead.
  848  *
  849  * If the thread that set the single_threading request has set the
  850  * P_SINGLE_EXIT bit in the process flags then this call will never return
  851  * if 'return_instead' is false, but will exit.
  852  *
  853  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
  854  *---------------+--------------------+---------------------
  855  *       0       | returns 0          |   returns 0 or 1
  856  *               | when ST ends       |   immediatly
  857  *---------------+--------------------+---------------------
  858  *       1       | thread exits       |   returns 1
  859  *               |                    |  immediatly
  860  * 0 = thread_exit() or suspension ok,
  861  * other = return error instead of stopping the thread.
  862  *
  863  * While a full suspension is under effect, even a single threading
  864  * thread would be suspended if it made this call (but it shouldn't).
  865  * This call should only be made from places where
  866  * thread_exit() would be safe as that may be the outcome unless
  867  * return_instead is set.
  868  */
  869 int
  870 thread_suspend_check(int return_instead)
  871 {
  872         struct thread *td;
  873         struct proc *p;
  874 
  875         td = curthread;
  876         p = td->td_proc;
  877         mtx_assert(&Giant, MA_NOTOWNED);
  878         PROC_LOCK_ASSERT(p, MA_OWNED);
  879         while (P_SHOULDSTOP(p) ||
  880               ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
  881                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
  882                         KASSERT(p->p_singlethread != NULL,
  883                             ("singlethread not set"));
  884                         /*
  885                          * The only suspension in action is a
  886                          * single-threading. Single threader need not stop.
  887                          * XXX Should be safe to access unlocked
  888                          * as it can only be set to be true by us.
  889                          */
  890                         if (p->p_singlethread == td)
  891                                 return (0);     /* Exempt from stopping. */
  892                 }
  893                 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
  894                         return (EINTR);
  895 
  896                 /* Should we goto user boundary if we didn't come from there? */
  897                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
  898                     (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
  899                         return (ERESTART);
  900 
  901                 mtx_lock_spin(&sched_lock);
  902                 thread_stopped(p);
  903                 /*
  904                  * If the process is waiting for us to exit,
  905                  * this thread should just suicide.
  906                  * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
  907                  */
  908                 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
  909                         thread_exit();
  910 
  911                 /*
  912                  * When a thread suspends, it just
  913                  * moves to the processes's suspend queue
  914                  * and stays there.
  915                  */
  916                 thread_suspend_one(td);
  917                 if (return_instead == 0) {
  918                         p->p_boundary_count++;
  919                         td->td_flags |= TDF_BOUNDARY;
  920                 }
  921                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
  922                         if (p->p_numthreads == p->p_suspcount) 
  923                                 thread_unsuspend_one(p->p_singlethread);
  924                 }
  925                 PROC_UNLOCK(p);
  926                 mi_switch(SW_INVOL, NULL);
  927                 if (return_instead == 0) {
  928                         p->p_boundary_count--;
  929                         td->td_flags &= ~TDF_BOUNDARY;
  930                 }
  931                 mtx_unlock_spin(&sched_lock);
  932                 PROC_LOCK(p);
  933         }
  934         return (0);
  935 }
  936 
  937 void
  938 thread_suspend_one(struct thread *td)
  939 {
  940         struct proc *p = td->td_proc;
  941 
  942         mtx_assert(&sched_lock, MA_OWNED);
  943         PROC_LOCK_ASSERT(p, MA_OWNED);
  944         KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
  945         p->p_suspcount++;
  946         TD_SET_SUSPENDED(td);
  947         TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
  948 }
  949 
  950 void
  951 thread_unsuspend_one(struct thread *td)
  952 {
  953         struct proc *p = td->td_proc;
  954 
  955         mtx_assert(&sched_lock, MA_OWNED);
  956         PROC_LOCK_ASSERT(p, MA_OWNED);
  957         TAILQ_REMOVE(&p->p_suspended, td, td_runq);
  958         TD_CLR_SUSPENDED(td);
  959         p->p_suspcount--;
  960         setrunnable(td);
  961 }
  962 
  963 /*
  964  * Allow all threads blocked by single threading to continue running.
  965  */
  966 void
  967 thread_unsuspend(struct proc *p)
  968 {
  969         struct thread *td;
  970 
  971         mtx_assert(&sched_lock, MA_OWNED);
  972         PROC_LOCK_ASSERT(p, MA_OWNED);
  973         if (!P_SHOULDSTOP(p)) {
  974                 while ((td = TAILQ_FIRST(&p->p_suspended))) {
  975                         thread_unsuspend_one(td);
  976                 }
  977         } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
  978             (p->p_numthreads == p->p_suspcount)) {
  979                 /*
  980                  * Stopping everything also did the job for the single
  981                  * threading request. Now we've downgraded to single-threaded,
  982                  * let it continue.
  983                  */
  984                 thread_unsuspend_one(p->p_singlethread);
  985         }
  986 }
  987 
  988 /*
  989  * End the single threading mode..
  990  */
  991 void
  992 thread_single_end(void)
  993 {
  994         struct thread *td;
  995         struct proc *p;
  996 
  997         td = curthread;
  998         p = td->td_proc;
  999         PROC_LOCK_ASSERT(p, MA_OWNED);
 1000         p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
 1001         mtx_lock_spin(&sched_lock);
 1002         p->p_singlethread = NULL;
 1003         p->p_procscopegrp = NULL;
 1004         /*
 1005          * If there are other threads they mey now run,
 1006          * unless of course there is a blanket 'stop order'
 1007          * on the process. The single threader must be allowed
 1008          * to continue however as this is a bad place to stop.
 1009          */
 1010         if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
 1011                 while ((td = TAILQ_FIRST(&p->p_suspended))) {
 1012                         thread_unsuspend_one(td);
 1013                 }
 1014         }
 1015         mtx_unlock_spin(&sched_lock);
 1016 }
 1017 
 1018 /*
 1019  * Called before going into an interruptible sleep to see if we have been
 1020  * interrupted or requested to exit.
 1021  */
 1022 int
 1023 thread_sleep_check(struct thread *td)
 1024 {
 1025         struct proc *p;
 1026 
 1027         p = td->td_proc;
 1028         mtx_assert(&sched_lock, MA_OWNED);
 1029         if (p->p_flag & P_HADTHREADS) {
 1030                 if (p->p_singlethread != td) {
 1031                         if (p->p_flag & P_SINGLE_EXIT)
 1032                                 return (EINTR);
 1033                         if (p->p_flag & P_SINGLE_BOUNDARY)
 1034                                 return (ERESTART);
 1035                 }
 1036                 if (td->td_flags & TDF_INTERRUPT)
 1037                         return (td->td_intrval);
 1038         }
 1039         return (0);
 1040 }

Cache object: ee8c0ad8cad57fdde39c9335bb7b8817


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.