The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lwp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_lwp.c,v 1.26 2004/03/05 11:17:41 junyoung Exp $   */
    2 
    3 /*-
    4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Nathan J. Williams.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. All advertising materials mentioning features or use of this software
   19  *    must display the following acknowledgement:
   20  *        This product includes software developed by the NetBSD
   21  *        Foundation, Inc. and its contributors.
   22  * 4. Neither the name of The NetBSD Foundation nor the names of its
   23  *    contributors may be used to endorse or promote products derived
   24  *    from this software without specific prior written permission.
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   36  * POSSIBILITY OF SUCH DAMAGE.
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.26 2004/03/05 11:17:41 junyoung Exp $");
   41 
   42 #include "opt_multiprocessor.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/systm.h>
   46 #include <sys/pool.h>
   47 #include <sys/lock.h>
   48 #include <sys/proc.h>
   49 #include <sys/sa.h>
   50 #include <sys/savar.h>
   51 #include <sys/types.h>
   52 #include <sys/ucontext.h>
   53 #include <sys/resourcevar.h>
   54 #include <sys/mount.h>
   55 #include <sys/syscallargs.h>
   56 
   57 #include <uvm/uvm_extern.h>
   58 
   59 struct lwplist alllwp;
   60 
   61 #define LWP_DEBUG
   62 
   63 #ifdef LWP_DEBUG
   64 int lwp_debug = 0;
   65 #define DPRINTF(x) if (lwp_debug) printf x
   66 #else
   67 #define DPRINTF(x)
   68 #endif
   69 /* ARGSUSED */
   70 int
   71 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
   72 {
   73         struct sys__lwp_create_args /* {
   74                 syscallarg(const ucontext_t *) ucp;
   75                 syscallarg(u_long) flags;
   76                 syscallarg(lwpid_t *) new_lwp;
   77         } */ *uap = v;
   78         struct proc *p = l->l_proc;
   79         struct lwp *l2;
   80         vaddr_t uaddr;
   81         boolean_t inmem;
   82         ucontext_t *newuc;
   83         int s, error;
   84 
   85         newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
   86 
   87         error = copyin(SCARG(uap, ucp), newuc, sizeof(*newuc));
   88         if (error)
   89                 return (error);
   90 
   91         /* XXX check against resource limits */
   92 
   93         inmem = uvm_uarea_alloc(&uaddr);
   94         if (__predict_false(uaddr == 0)) {
   95                 return (ENOMEM);
   96         }
   97 
   98         /* XXX flags:
   99          * __LWP_ASLWP is probably needed for Solaris compat.
  100          */
  101 
  102         newlwp(l, p, uaddr, inmem,
  103             SCARG(uap, flags) & LWP_DETACHED,
  104             NULL, 0, startlwp, newuc, &l2);
  105 
  106         if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
  107                 SCHED_LOCK(s);
  108                 l2->l_stat = LSRUN;
  109                 setrunqueue(l2);
  110                 SCHED_UNLOCK(s);
  111                 simple_lock(&p->p_lock);
  112                 p->p_nrlwps++;
  113                 simple_unlock(&p->p_lock);
  114         } else {
  115                 l2->l_stat = LSSUSPENDED;
  116         }
  117 
  118         error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
  119             sizeof(l2->l_lid));
  120         if (error)
  121                 return (error);
  122 
  123         return (0);
  124 }
  125 
  126 
  127 int
  128 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
  129 {
  130 
  131         lwp_exit(l);
  132         /* NOTREACHED */
  133         return (0);
  134 }
  135 
  136 
  137 int
  138 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
  139 {
  140 
  141         *retval = l->l_lid;
  142 
  143         return (0);
  144 }
  145 
  146 
  147 int
  148 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval)
  149 {
  150 
  151         *retval = (uintptr_t) l->l_private;
  152 
  153         return (0);
  154 }
  155 
  156 
  157 int
  158 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval)
  159 {
  160         struct sys__lwp_setprivate_args /* {
  161                 syscallarg(void *) ptr;
  162         } */ *uap = v;
  163 
  164         l->l_private = SCARG(uap, ptr);
  165 
  166         return (0);
  167 }
  168 
  169 
  170 int
  171 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
  172 {
  173         struct sys__lwp_suspend_args /* {
  174                 syscallarg(lwpid_t) target;
  175         } */ *uap = v;
  176         int target_lid;
  177         struct proc *p = l->l_proc;
  178         struct lwp *t;
  179         struct lwp *t2;
  180 
  181         target_lid = SCARG(uap, target);
  182 
  183         LIST_FOREACH(t, &p->p_lwps, l_sibling)
  184                 if (t->l_lid == target_lid)
  185                         break;
  186 
  187         if (t == NULL)
  188                 return (ESRCH);
  189 
  190         if (t == l) {
  191                 /*
  192                  * Check for deadlock, which is only possible
  193                  * when we're suspending ourself.
  194                  */
  195                 LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
  196                         if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
  197                                 break;
  198                 }
  199 
  200                 if (t2 == NULL) /* All other LWPs are suspended */
  201                         return (EDEADLK);
  202         }
  203 
  204         return lwp_suspend(l, t);
  205 }
  206 
  207 inline int
  208 lwp_suspend(struct lwp *l, struct lwp *t)
  209 {
  210         struct proc *p = t->l_proc;
  211         int s;
  212 
  213         if (t == l) {
  214                 SCHED_LOCK(s);
  215                 l->l_stat = LSSUSPENDED;
  216                 /* XXX NJWLWP check if this makes sense here: */
  217                 p->p_stats->p_ru.ru_nvcsw++;
  218                 mi_switch(l, NULL);
  219                 SCHED_ASSERT_UNLOCKED();
  220                 splx(s);
  221         } else {
  222                 switch (t->l_stat) {
  223                 case LSSUSPENDED:
  224                         return (0); /* _lwp_suspend() is idempotent */
  225                 case LSRUN:
  226                         SCHED_LOCK(s);
  227                         remrunqueue(t);
  228                         t->l_stat = LSSUSPENDED;
  229                         SCHED_UNLOCK(s);
  230                         simple_lock(&p->p_lock);
  231                         p->p_nrlwps--;
  232                         simple_unlock(&p->p_lock);
  233                         break;
  234                 case LSSLEEP:
  235                         t->l_stat = LSSUSPENDED;
  236                         break;
  237                 case LSIDL:
  238                 case LSZOMB:
  239                         return (EINTR); /* It's what Solaris does..... */
  240                 case LSSTOP:
  241                         panic("_lwp_suspend: Stopped LWP in running process!");
  242                         break;
  243                 case LSONPROC:
  244                         panic("XXX multiprocessor LWPs? Implement me!");
  245                         break;
  246                 }
  247         }
  248 
  249         return (0);
  250 }
  251 
  252 
  253 int
  254 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
  255 {
  256         struct sys__lwp_continue_args /* {
  257                 syscallarg(lwpid_t) target;
  258         } */ *uap = v;
  259         int s, target_lid;
  260         struct proc *p = l->l_proc;
  261         struct lwp *t;
  262 
  263         target_lid = SCARG(uap, target);
  264 
  265         LIST_FOREACH(t, &p->p_lwps, l_sibling)
  266                 if (t->l_lid == target_lid)
  267                         break;
  268 
  269         if (t == NULL)
  270                 return (ESRCH);
  271 
  272         SCHED_LOCK(s);
  273         lwp_continue(t);
  274         SCHED_UNLOCK(s);
  275 
  276         return (0);
  277 }
  278 
  279 void
  280 lwp_continue(struct lwp *l)
  281 {
  282 
  283         DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
  284             l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
  285             l->l_wchan));
  286 
  287         if (l->l_stat != LSSUSPENDED)
  288                 return;
  289 
  290         if (l->l_wchan == 0) {
  291                 /* LWP was runnable before being suspended. */
  292                 setrunnable(l);
  293         } else {
  294                 /* LWP was sleeping before being suspended. */
  295                 l->l_stat = LSSLEEP;
  296         }
  297 }
  298 
  299 int
  300 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
  301 {
  302         struct sys__lwp_wakeup_args /* {
  303                 syscallarg(lwpid_t) wakeup;
  304         } */ *uap = v;
  305         lwpid_t target_lid;
  306         struct lwp *t;
  307         struct proc *p;
  308         int error;
  309         int s;
  310 
  311         p = l->l_proc;
  312         target_lid = SCARG(uap, target);
  313 
  314         SCHED_LOCK(s);
  315 
  316         LIST_FOREACH(t, &p->p_lwps, l_sibling)
  317                 if (t->l_lid == target_lid)
  318                         break;
  319 
  320         if (t == NULL) {
  321                 error = ESRCH;
  322                 goto exit;
  323         }
  324 
  325         if (t->l_stat != LSSLEEP) {
  326                 error = ENODEV;
  327                 goto exit;
  328         }
  329 
  330         if ((t->l_flag & L_SINTR) == 0) {
  331                 error = EBUSY;
  332                 goto exit;
  333         }
  334         /*
  335          * Tell ltsleep to wakeup.
  336          */
  337         t->l_flag |= L_CANCELLED;
  338 
  339         setrunnable(t);
  340         error = 0;
  341 exit:
  342         SCHED_UNLOCK(s);
  343 
  344         return error;
  345 }
  346 
  347 int
  348 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
  349 {
  350         struct sys__lwp_wait_args /* {
  351                 syscallarg(lwpid_t) wait_for;
  352                 syscallarg(lwpid_t *) departed;
  353         } */ *uap = v;
  354         int error;
  355         lwpid_t dep;
  356 
  357         error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
  358         if (error)
  359                 return (error);
  360 
  361         if (SCARG(uap, departed)) {
  362                 error = copyout(&dep, SCARG(uap, departed),
  363                     sizeof(dep));
  364                 if (error)
  365                         return (error);
  366         }
  367 
  368         return (0);
  369 }
  370 
  371 
  372 int
  373 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
  374 {
  375         struct proc *p = l->l_proc;
  376         struct lwp *l2, *l3;
  377         int nfound, error, wpri;
  378         static const char waitstr1[] = "lwpwait";
  379         static const char waitstr2[] = "lwpwait2";
  380 
  381         DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
  382             p->p_pid, l->l_lid, lid));
  383 
  384         if (lid == l->l_lid)
  385                 return (EDEADLK); /* Waiting for ourselves makes no sense. */
  386 
  387         wpri = PWAIT |
  388             ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
  389  loop:
  390         nfound = 0;
  391         LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
  392                 if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
  393                     ((lid != 0) && (lid != l2->l_lid)))
  394                         continue;
  395 
  396                 nfound++;
  397                 if (l2->l_stat == LSZOMB) {
  398                         if (departed)
  399                                 *departed = l2->l_lid;
  400 
  401                         simple_lock(&p->p_lock);
  402                         LIST_REMOVE(l2, l_sibling);
  403                         p->p_nlwps--;
  404                         p->p_nzlwps--;
  405                         simple_unlock(&p->p_lock);
  406                         /* XXX decrement limits */
  407 
  408                         pool_put(&lwp_pool, l2);
  409 
  410                         return (0);
  411                 } else if (l2->l_stat == LSSLEEP ||
  412                            l2->l_stat == LSSUSPENDED) {
  413                         /* Deadlock checks.
  414                          * 1. If all other LWPs are waiting for exits
  415                          *    or suspended, we would deadlock.
  416                          */
  417 
  418                         LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
  419                                 if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
  420                                     !(l3->l_stat == LSSLEEP &&
  421                                         l3->l_wchan == (caddr_t) &p->p_nlwps))
  422                                         break;
  423                         }
  424                         if (l3 == NULL) /* Everyone else is waiting. */
  425                                 return (EDEADLK);
  426 
  427                         /* XXX we'd like to check for a cycle of waiting
  428                          * LWPs (specific LID waits, not any-LWP waits)
  429                          * and detect that sort of deadlock, but we don't
  430                          * have a good place to store the lwp that is
  431                          * being waited for. wchan is already filled with
  432                          * &p->p_nlwps, and putting the lwp address in
  433                          * there for deadlock tracing would require
  434                          * exiting LWPs to call wakeup on both their
  435                          * own address and &p->p_nlwps, to get threads
  436                          * sleeping on any LWP exiting.
  437                          *
  438                          * Revisit later. Maybe another auxillary
  439                          * storage location associated with sleeping
  440                          * is in order.
  441                          */
  442                 }
  443         }
  444 
  445         if (nfound == 0)
  446                 return (ESRCH);
  447 
  448         if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
  449             (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
  450                 return (error);
  451 
  452         goto loop;
  453 }
  454 
  455 
  456 int
  457 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
  458     int flags, void *stack, size_t stacksize,
  459     void (*func)(void *), void *arg, struct lwp **rnewlwpp)
  460 {
  461         struct lwp *l2;
  462         int s;
  463 
  464         l2 = pool_get(&lwp_pool, PR_WAITOK);
  465 
  466         l2->l_stat = LSIDL;
  467         l2->l_forw = l2->l_back = NULL;
  468         l2->l_proc = p2;
  469 
  470         memset(&l2->l_startzero, 0,
  471                (unsigned) ((caddr_t)&l2->l_endzero -
  472                            (caddr_t)&l2->l_startzero));
  473         memcpy(&l2->l_startcopy, &l1->l_startcopy,
  474                (unsigned) ((caddr_t)&l2->l_endcopy -
  475                            (caddr_t)&l2->l_startcopy));
  476 
  477 #if !defined(MULTIPROCESSOR)
  478         /*
  479          * In the single-processor case, all processes will always run
  480          * on the same CPU.  So, initialize the child's CPU to the parent's
  481          * now.  In the multiprocessor case, the child's CPU will be
  482          * initialized in the low-level context switch code when the
  483          * process runs.
  484          */
  485         KASSERT(l1->l_cpu != NULL);
  486         l2->l_cpu = l1->l_cpu;
  487 #else
  488         /*
  489          * zero child's CPU pointer so we don't get trash.
  490          */
  491         l2->l_cpu = NULL;
  492 #endif /* ! MULTIPROCESSOR */
  493 
  494         l2->l_flag = inmem ? L_INMEM : 0;
  495         l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
  496 
  497         callout_init(&l2->l_tsleep_ch);
  498 
  499         if (rnewlwpp != NULL)
  500                 *rnewlwpp = l2;
  501 
  502         l2->l_addr = (struct user *)uaddr;
  503         uvm_lwp_fork(l1, l2, stack, stacksize, func,
  504             (arg != NULL) ? arg : l2);
  505 
  506         simple_lock(&p2->p_lock);
  507         l2->l_lid = ++p2->p_nlwpid;
  508         LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
  509         p2->p_nlwps++;
  510         simple_unlock(&p2->p_lock);
  511 
  512         /* XXX should be locked differently... */
  513         s = proclist_lock_write();
  514         LIST_INSERT_HEAD(&alllwp, l2, l_list);
  515         proclist_unlock_write(s);
  516 
  517         if (p2->p_emul->e_lwp_fork)
  518                 (*p2->p_emul->e_lwp_fork)(l1, l2);
  519 
  520         return (0);
  521 }
  522 
  523 
  524 /*
  525  * Quit the process. This will call cpu_exit, which will call cpu_switch,
  526  * so this can only be used meaningfully if you're willing to switch away.
  527  * Calling with l!=curlwp would be weird.
  528  */
  529 void
  530 lwp_exit(struct lwp *l)
  531 {
  532         struct proc *p = l->l_proc;
  533         int s;
  534 
  535         DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
  536         DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
  537             p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
  538 
  539         if (p->p_emul->e_lwp_exit)
  540                 (*p->p_emul->e_lwp_exit)(l);
  541 
  542         /*
  543          * If we are the last live LWP in a process, we need to exit
  544          * the entire process (if that's not already going on). We do
  545          * so with an exit status of zero, because it's a "controlled"
  546          * exit, and because that's what Solaris does.
  547          */
  548         if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
  549                 DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
  550                     p->p_pid, l->l_lid));
  551                 exit1(l, 0);
  552                 /* NOTREACHED */
  553         }
  554 
  555         s = proclist_lock_write();
  556         LIST_REMOVE(l, l_list);
  557         proclist_unlock_write(s);
  558 
  559         /* Free MD LWP resources */
  560 #ifndef __NO_CPU_LWP_FREE
  561         cpu_lwp_free(l, 0);
  562 #endif
  563 
  564         simple_lock(&p->p_lock);
  565         p->p_nrlwps--;
  566         simple_unlock(&p->p_lock);
  567 
  568         l->l_stat = LSDEAD;
  569 
  570         /* This LWP no longer needs to hold the kernel lock. */
  571         KERNEL_PROC_UNLOCK(l);
  572 
  573         pmap_deactivate(l);
  574 
  575         /* cpu_exit() will not return */
  576         cpu_exit(l);
  577 }
  578 
  579 /*
  580  * We are called from cpu_exit() once it is safe to schedule the
  581  * dead process's resources to be freed (i.e., once we've switched to
  582  * the idle PCB for the current CPU).
  583  *
  584  * NOTE: One must be careful with locking in this routine.  It's
  585  * called from a critical section in machine-dependent code, so
  586  * we should refrain from changing any interrupt state.
  587  */
  588 void
  589 lwp_exit2(struct lwp *l)
  590 {
  591         struct proc *p;
  592 
  593         KERNEL_LOCK(LK_EXCLUSIVE);
  594         /*
  595          * Free the VM resources we're still holding on to.
  596          */
  597         uvm_lwp_exit(l);
  598 
  599         if (l->l_flag & L_DETACHED) {
  600                 /* Nobody waits for detached LWPs. */
  601 
  602                 if ((l->l_flag & L_PROCEXIT) == 0) {
  603                         LIST_REMOVE(l, l_sibling);
  604                         p = l->l_proc;
  605                         p->p_nlwps--;
  606                 }
  607 
  608                 pool_put(&lwp_pool, l);
  609                 KERNEL_UNLOCK();
  610         } else {
  611                 l->l_stat = LSZOMB;
  612                 p = l->l_proc;
  613                 p->p_nzlwps++;
  614                 KERNEL_UNLOCK();
  615                 wakeup(&p->p_nlwps);
  616         }
  617 }
  618 
  619 /*
  620  * Pick a LWP to represent the process for those operations which
  621  * want information about a "process" that is actually associated
  622  * with a LWP.
  623  */
  624 struct lwp *
  625 proc_representative_lwp(struct proc *p)
  626 {
  627         struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
  628 
  629         /* Trivial case: only one LWP */
  630         if (p->p_nlwps == 1)
  631                 return (LIST_FIRST(&p->p_lwps));
  632 
  633         switch (p->p_stat) {
  634         case SSTOP:
  635         case SACTIVE:
  636                 /* Pick the most live LWP */
  637                 onproc = running = sleeping = stopped = suspended = NULL;
  638                 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
  639                         switch (l->l_stat) {
  640                         case LSONPROC:
  641                                 onproc = l;
  642                                 break;
  643                         case LSRUN:
  644                                 running = l;
  645                                 break;
  646                         case LSSLEEP:
  647                                 sleeping = l;
  648                                 break;
  649                         case LSSTOP:
  650                                 stopped = l;
  651                                 break;
  652                         case LSSUSPENDED:
  653                                 suspended = l;
  654                                 break;
  655                         }
  656                 }
  657                 if (onproc)
  658                         return onproc;
  659                 if (running)
  660                         return running;
  661                 if (sleeping)
  662                         return sleeping;
  663                 if (stopped)
  664                         return stopped;
  665                 if (suspended)
  666                         return suspended;
  667                 break;
  668         case SZOMB:
  669                 /* Doesn't really matter... */
  670                 return (LIST_FIRST(&p->p_lwps));
  671 #ifdef DIAGNOSTIC
  672         case SIDL:
  673                 /* We have more than one LWP and we're in SIDL?
  674                  * How'd that happen?
  675                  */
  676                 panic("Too many LWPs (%d) in SIDL process %d (%s)",
  677                     p->p_nrlwps, p->p_pid, p->p_comm);
  678         default:
  679                 panic("Process %d (%s) in unknown state %d",
  680                     p->p_pid, p->p_comm, p->p_stat);
  681 #endif
  682         }
  683 
  684         panic("proc_representative_lwp: couldn't find a lwp for process"
  685                 " %d (%s)", p->p_pid, p->p_comm);
  686         /* NOTREACHED */
  687         return NULL;
  688 }

Cache object: 7932d4eada767af4c6b042a65d344cc9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.