The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_synch.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_synch.c,v 1.173.2.1 2008/09/16 18:49:34 bouyer Exp $      */
    2 
    3 /*-
    4  * Copyright (c) 1999, 2000, 2004 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
    9  * NASA Ames Research Center.
   10  * This code is derived from software contributed to The NetBSD Foundation
   11  * by Charles M. Hannum.
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  * 3. All advertising materials mentioning features or use of this software
   22  *    must display the following acknowledgement:
   23  *      This product includes software developed by the NetBSD
   24  *      Foundation, Inc. and its contributors.
   25  * 4. Neither the name of The NetBSD Foundation nor the names of its
   26  *    contributors may be used to endorse or promote products derived
   27  *    from this software without specific prior written permission.
   28  *
   29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   39  * POSSIBILITY OF SUCH DAMAGE.
   40  */
   41 
   42 /*-
   43  * Copyright (c) 1982, 1986, 1990, 1991, 1993
   44  *      The Regents of the University of California.  All rights reserved.
   45  * (c) UNIX System Laboratories, Inc.
   46  * All or some portions of this file are derived from material licensed
   47  * to the University of California by American Telephone and Telegraph
   48  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   49  * the permission of UNIX System Laboratories, Inc.
   50  *
   51  * Redistribution and use in source and binary forms, with or without
   52  * modification, are permitted provided that the following conditions
   53  * are met:
   54  * 1. Redistributions of source code must retain the above copyright
   55  *    notice, this list of conditions and the following disclaimer.
   56  * 2. Redistributions in binary form must reproduce the above copyright
   57  *    notice, this list of conditions and the following disclaimer in the
   58  *    documentation and/or other materials provided with the distribution.
   59  * 3. Neither the name of the University nor the names of its contributors
   60  *    may be used to endorse or promote products derived from this software
   61  *    without specific prior written permission.
   62  *
   63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   73  * SUCH DAMAGE.
   74  *
   75  *      @(#)kern_synch.c        8.9 (Berkeley) 5/19/95
   76  */
   77 
   78 #include <sys/cdefs.h>
   79 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.173.2.1 2008/09/16 18:49:34 bouyer Exp $");
   80 
   81 #include "opt_ddb.h"
   82 #include "opt_ktrace.h"
   83 #include "opt_kstack.h"
   84 #include "opt_lockdebug.h"
   85 #include "opt_multiprocessor.h"
   86 #include "opt_perfctrs.h"
   87 
   88 #include <sys/param.h>
   89 #include <sys/systm.h>
   90 #include <sys/callout.h>
   91 #include <sys/proc.h>
   92 #include <sys/kernel.h>
   93 #include <sys/buf.h>
   94 #if defined(PERFCTRS)
   95 #include <sys/pmc.h>
   96 #endif
   97 #include <sys/signalvar.h>
   98 #include <sys/resourcevar.h>
   99 #include <sys/sched.h>
  100 #include <sys/sa.h>
  101 #include <sys/savar.h>
  102 #include <sys/kauth.h>
  103 
  104 #include <uvm/uvm_extern.h>
  105 
  106 #ifdef KTRACE
  107 #include <sys/ktrace.h>
  108 #endif
  109 
  110 #include <machine/cpu.h>
  111 
  112 int     lbolt;                  /* once a second sleep address */
  113 int     rrticks;                /* number of hardclock ticks per roundrobin() */
  114 
  115 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
  116 #define XXX_SCHED_LOCK          simple_lock(&sched_lock)
  117 #define XXX_SCHED_UNLOCK        simple_unlock(&sched_lock)
  118 #else
  119 #define XXX_SCHED_LOCK          /* nothing */
  120 #define XXX_SCHED_UNLOCK        /* nothing */
  121 #endif
  122 
  123 /*
  124  * Sleep queues.
  125  *
  126  * We're only looking at 7 bits of the address; everything is
  127  * aligned to 4, lots of things are aligned to greater powers
  128  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
  129  */
  130 #define SLPQUE_TABLESIZE        128
  131 #define SLPQUE_LOOKUP(x)        (((u_long)(x) >> 8) & (SLPQUE_TABLESIZE - 1))
  132 
  133 #define SLPQUE(ident)   (&sched_slpque[SLPQUE_LOOKUP(ident)])
  134 
  135 /*
  136  * The global scheduler state.
  137  */
  138 struct prochd sched_qs[RUNQUE_NQS];     /* run queues */
  139 volatile uint32_t sched_whichqs;        /* bitmap of non-empty queues */
  140 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
  141 
  142 struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
  143 
  144 void schedcpu(void *);
  145 void updatepri(struct lwp *);
  146 void endtsleep(void *);
  147 
  148 inline void sa_awaken(struct lwp *);
  149 inline void awaken(struct lwp *);
  150 
  151 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
  152 static unsigned int schedcpu_ticks;
  153 
  154 
  155 /*
  156  * Force switch among equal priority processes every 100ms.
  157  * Called from hardclock every hz/10 == rrticks hardclock ticks.
  158  */
  159 /* ARGSUSED */
  160 void
  161 roundrobin(struct cpu_info *ci)
  162 {
  163         struct schedstate_percpu *spc = &ci->ci_schedstate;
  164 
  165         spc->spc_rrticks = rrticks;
  166 
  167         if (curlwp != NULL) {
  168                 if (spc->spc_flags & SPCF_SEENRR) {
  169                         /*
  170                          * The process has already been through a roundrobin
  171                          * without switching and may be hogging the CPU.
  172                          * Indicate that the process should yield.
  173                          */
  174                         spc->spc_flags |= SPCF_SHOULDYIELD;
  175                 } else
  176                         spc->spc_flags |= SPCF_SEENRR;
  177         }
  178         need_resched(curcpu());
  179 }
  180 
  181 #define PPQ     (128 / RUNQUE_NQS)      /* priorities per queue */
  182 #define NICE_WEIGHT 2                   /* priorities per nice level */
  183 
  184 #define ESTCPU_SHIFT    11
  185 #define ESTCPU_MAX      ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
  186 #define ESTCPULIM(e)    min((e), ESTCPU_MAX)
  187 
  188 /*
  189  * Constants for digital decay and forget:
  190  *      90% of (p_estcpu) usage in 5 * loadav time
  191  *      95% of (p_pctcpu) usage in 60 seconds (load insensitive)
  192  *          Note that, as ps(1) mentions, this can let percentages
  193  *          total over 100% (I've seen 137.9% for 3 processes).
  194  *
  195  * Note that hardclock updates p_estcpu and p_cpticks independently.
  196  *
  197  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
  198  * That is, the system wants to compute a value of decay such
  199  * that the following for loop:
  200  *      for (i = 0; i < (5 * loadavg); i++)
  201  *              p_estcpu *= decay;
  202  * will compute
  203  *      p_estcpu *= 0.1;
  204  * for all values of loadavg:
  205  *
  206  * Mathematically this loop can be expressed by saying:
  207  *      decay ** (5 * loadavg) ~= .1
  208  *
  209  * The system computes decay as:
  210  *      decay = (2 * loadavg) / (2 * loadavg + 1)
  211  *
  212  * We wish to prove that the system's computation of decay
  213  * will always fulfill the equation:
  214  *      decay ** (5 * loadavg) ~= .1
  215  *
  216  * If we compute b as:
  217  *      b = 2 * loadavg
  218  * then
  219  *      decay = b / (b + 1)
  220  *
  221  * We now need to prove two things:
  222  *      1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
  223  *      2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
  224  *
  225  * Facts:
  226  *         For x close to zero, exp(x) =~ 1 + x, since
  227  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
  228  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
  229  *         For x close to zero, ln(1+x) =~ x, since
  230  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
  231  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
  232  *         ln(.1) =~ -2.30
  233  *
  234  * Proof of (1):
  235  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
  236  *      solving for factor,
  237  *      ln(factor) =~ (-2.30/5*loadav), or
  238  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
  239  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
  240  *
  241  * Proof of (2):
  242  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
  243  *      solving for power,
  244  *      power*ln(b/(b+1)) =~ -2.30, or
  245  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
  246  *
  247  * Actual power values for the implemented algorithm are as follows:
  248  *      loadav: 1       2       3       4
  249  *      power:  5.68    10.32   14.94   19.55
  250  */
  251 
  252 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
  253 #define loadfactor(loadav)      (2 * (loadav))
  254 
  255 static fixpt_t
  256 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
  257 {
  258 
  259         if (estcpu == 0) {
  260                 return 0;
  261         }
  262 
  263 #if !defined(_LP64)
  264         /* avoid 64bit arithmetics. */
  265 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
  266         if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
  267                 return estcpu * loadfac / (loadfac + FSCALE);
  268         }
  269 #endif /* !defined(_LP64) */
  270 
  271         return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
  272 }
  273 
  274 /*
  275  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
  276  * sleeping for at least seven times the loadfactor will decay p_estcpu to
  277  * less than (1 << ESTCPU_SHIFT).
  278  *
  279  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
  280  */
  281 static fixpt_t
  282 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
  283 {
  284 
  285         if ((n << FSHIFT) >= 7 * loadfac) {
  286                 return 0;
  287         }
  288 
  289         while (estcpu != 0 && n > 1) {
  290                 estcpu = decay_cpu(loadfac, estcpu);
  291                 n--;
  292         }
  293 
  294         return estcpu;
  295 }
  296 
  297 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
  298 fixpt_t ccpu = 0.95122942450071400909 * FSCALE;         /* exp(-1/20) */
  299 
  300 /*
  301  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
  302  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
  303  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
  304  *
  305  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
  306  *      1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
  307  *
  308  * If you dont want to bother with the faster/more-accurate formula, you
  309  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
  310  * (more general) method of calculating the %age of CPU used by a process.
  311  */
  312 #define CCPU_SHIFT      11
  313 
  314 /*
  315  * Recompute process priorities, every hz ticks.
  316  */
  317 /* ARGSUSED */
  318 void
  319 schedcpu(void *arg)
  320 {
  321         fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
  322         struct lwp *l;
  323         struct proc *p;
  324         int s, minslp;
  325         int clkhz;
  326 
  327         schedcpu_ticks++;
  328 
  329         proclist_lock_read();
  330         PROCLIST_FOREACH(p, &allproc) {
  331                 /*
  332                  * Increment time in/out of memory and sleep time
  333                  * (if sleeping).  We ignore overflow; with 16-bit int's
  334                  * (remember them?) overflow takes 45 days.
  335                  */
  336                 minslp = 2;
  337                 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
  338                         l->l_swtime++;
  339                         if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
  340                             l->l_stat == LSSUSPENDED) {
  341                                 l->l_slptime++;
  342                                 minslp = min(minslp, l->l_slptime);
  343                         } else
  344                                 minslp = 0;
  345                 }
  346                 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
  347                 /*
  348                  * If the process has slept the entire second,
  349                  * stop recalculating its priority until it wakes up.
  350                  */
  351                 if (minslp > 1)
  352                         continue;
  353                 s = splstatclock();     /* prevent state changes */
  354                 /*
  355                  * p_pctcpu is only for ps.
  356                  */
  357                 clkhz = stathz != 0 ? stathz : hz;
  358 #if     (FSHIFT >= CCPU_SHIFT)
  359                 p->p_pctcpu += (clkhz == 100)?
  360                         ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
  361                         100 * (((fixpt_t) p->p_cpticks)
  362                                 << (FSHIFT - CCPU_SHIFT)) / clkhz;
  363 #else
  364                 p->p_pctcpu += ((FSCALE - ccpu) *
  365                         (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
  366 #endif
  367                 p->p_cpticks = 0;
  368                 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
  369                 splx(s);        /* Done with the process CPU ticks update */
  370                 SCHED_LOCK(s);
  371                 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
  372                         if (l->l_slptime > 1)
  373                                 continue;
  374                         resetpriority(l);
  375                         if (l->l_priority >= PUSER) {
  376                                 if (l->l_stat == LSRUN &&
  377                                     (l->l_flag & L_INMEM) &&
  378                                     (l->l_priority / PPQ) != (l->l_usrpri / PPQ)) {
  379                                         remrunqueue(l);
  380                                         l->l_priority = l->l_usrpri;
  381                                         setrunqueue(l);
  382                                 } else
  383                                         l->l_priority = l->l_usrpri;
  384                         }
  385                 }
  386                 SCHED_UNLOCK(s);
  387         }
  388         proclist_unlock_read();
  389         uvm_meter();
  390         wakeup((caddr_t)&lbolt);
  391         callout_schedule(&schedcpu_ch, hz);
  392 }
  393 
  394 /*
  395  * Recalculate the priority of a process after it has slept for a while.
  396  */
  397 void
  398 updatepri(struct lwp *l)
  399 {
  400         struct proc *p = l->l_proc;
  401         fixpt_t loadfac;
  402 
  403         SCHED_ASSERT_LOCKED();
  404         KASSERT(l->l_slptime > 1);
  405 
  406         loadfac = loadfactor(averunnable.ldavg[0]);
  407 
  408         l->l_slptime--; /* the first time was done in schedcpu */
  409         /* XXX NJWLWP */
  410         p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
  411         resetpriority(l);
  412 }
  413 
  414 /*
  415  * During autoconfiguration or after a panic, a sleep will simply
  416  * lower the priority briefly to allow interrupts, then return.
  417  * The priority to be used (safepri) is machine-dependent, thus this
  418  * value is initialized and maintained in the machine-dependent layers.
  419  * This priority will typically be 0, or the lowest priority
  420  * that is safe for use on the interrupt stack; it can be made
  421  * higher to block network software interrupts after panics.
  422  */
  423 int safepri;
  424 
  425 /*
  426  * General sleep call.  Suspends the current process until a wakeup is
  427  * performed on the specified identifier.  The process will then be made
  428  * runnable with the specified priority.  Sleeps at most timo/hz seconds
  429  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
  430  * before and after sleeping, else signals are not checked.  Returns 0 if
  431  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
  432  * signal needs to be delivered, ERESTART is returned if the current system
  433  * call should be restarted if possible, and EINTR is returned if the system
  434  * call should be interrupted by the signal (return EINTR).
  435  *
  436  * The interlock is held until the scheduler_slock is acquired.  The
  437  * interlock will be locked before returning back to the caller
  438  * unless the PNORELOCK flag is specified, in which case the
  439  * interlock will always be unlocked upon return.
  440  */
  441 int
  442 ltsleep(volatile const void *ident, int priority, const char *wmesg, int timo,
  443     volatile struct simplelock *interlock)
  444 {
  445         struct lwp *l = curlwp;
  446         struct proc *p = l ? l->l_proc : NULL;
  447         struct slpque *qp;
  448         int sig, s;
  449         int catch = priority & PCATCH;
  450         int relock = (priority & PNORELOCK) == 0;
  451         int exiterr = (priority & PNOEXITERR) == 0;
  452 
  453         /*
  454          * XXXSMP
  455          * This is probably bogus.  Figure out what the right
  456          * thing to do here really is.
  457          * Note that not sleeping if ltsleep is called with curlwp == NULL
  458          * in the shutdown case is disgusting but partly necessary given
  459          * how shutdown (barely) works.
  460          */
  461         if (cold || (doing_shutdown && (panicstr || (l == NULL)))) {
  462                 /*
  463                  * After a panic, or during autoconfiguration,
  464                  * just give interrupts a chance, then just return;
  465                  * don't run any other procs or panic below,
  466                  * in case this is the idle process and already asleep.
  467                  */
  468                 s = splhigh();
  469                 splx(safepri);
  470                 splx(s);
  471                 if (interlock != NULL && relock == 0)
  472                         simple_unlock(interlock);
  473                 return (0);
  474         }
  475 
  476         KASSERT(p != NULL);
  477         LOCK_ASSERT(interlock == NULL || simple_lock_held(interlock));
  478 
  479 #ifdef KTRACE
  480         if (KTRPOINT(p, KTR_CSW))
  481                 ktrcsw(l, 1, 0);
  482 #endif
  483 
  484         SCHED_LOCK(s);
  485 
  486 #ifdef DIAGNOSTIC
  487         if (ident == NULL)
  488                 panic("ltsleep: ident == NULL");
  489         if (l->l_stat != LSONPROC)
  490                 panic("ltsleep: l_stat %d != LSONPROC", l->l_stat);
  491         if (l->l_back != NULL)
  492                 panic("ltsleep: p_back != NULL");
  493 #endif
  494 
  495         l->l_wchan = ident;
  496         l->l_wmesg = wmesg;
  497         l->l_slptime = 0;
  498         l->l_priority = priority & PRIMASK;
  499 
  500         qp = SLPQUE(ident);
  501         if (qp->sq_head == 0)
  502                 qp->sq_head = l;
  503         else {
  504                 *qp->sq_tailp = l;
  505         }
  506         *(qp->sq_tailp = &l->l_forw) = 0;
  507 
  508         if (timo)
  509                 callout_reset(&l->l_tsleep_ch, timo, endtsleep, l);
  510 
  511         /*
  512          * We can now release the interlock; the scheduler_slock
  513          * is held, so a thread can't get in to do wakeup() before
  514          * we do the switch.
  515          *
  516          * XXX We leave the code block here, after inserting ourselves
  517          * on the sleep queue, because we might want a more clever
  518          * data structure for the sleep queues at some point.
  519          */
  520         if (interlock != NULL)
  521                 simple_unlock(interlock);
  522 
  523         /*
  524          * We put ourselves on the sleep queue and start our timeout
  525          * before calling CURSIG, as we could stop there, and a wakeup
  526          * or a SIGCONT (or both) could occur while we were stopped.
  527          * A SIGCONT would cause us to be marked as SSLEEP
  528          * without resuming us, thus we must be ready for sleep
  529          * when CURSIG is called.  If the wakeup happens while we're
  530          * stopped, p->p_wchan will be 0 upon return from CURSIG.
  531          */
  532         if (catch) {
  533                 XXX_SCHED_UNLOCK;
  534                 l->l_flag |= L_SINTR;
  535                 if (((sig = CURSIG(l)) != 0) ||
  536                     ((p->p_flag & P_WEXIT) && p->p_nlwps > 1)) {
  537                         XXX_SCHED_LOCK;
  538                         if (l->l_wchan != NULL)
  539                                 unsleep(l);
  540                         l->l_stat = LSONPROC;
  541                         SCHED_UNLOCK(s);
  542                         goto resume;
  543                 }
  544                 XXX_SCHED_LOCK;
  545                 if (l->l_wchan == NULL) {
  546                         SCHED_UNLOCK(s);
  547                         catch = 0;
  548                         goto resume;
  549                 }
  550         } else
  551                 sig = 0;
  552         l->l_stat = LSSLEEP;
  553         p->p_nrlwps--;
  554         p->p_stats->p_ru.ru_nvcsw++;
  555         SCHED_ASSERT_LOCKED();
  556         if (l->l_flag & L_SA)
  557                 sa_switch(l, SA_UPCALL_BLOCKED);
  558         else
  559                 mi_switch(l, NULL);
  560 
  561 #ifdef KERN_SYNCH_BPENDTSLEEP_LABEL
  562         /*
  563          * XXX
  564          * gcc4 optimizer will duplicate this asm statement on some arch
  565          * and it will cause a multiple symbol definition error in gas.
  566          * the kernel Makefile is setup to use -fno-reorder-blocks if
  567          * this option is set.
  568          */
  569         /* handy breakpoint location after process "wakes" */
  570         __asm(".globl bpendtsleep\nbpendtsleep:");
  571 #endif
  572         /*
  573          * p->p_nrlwps is incremented by whoever made us runnable again,
  574          * either setrunnable() or awaken().
  575          */
  576 
  577         SCHED_ASSERT_UNLOCKED();
  578         splx(s);
  579 
  580  resume:
  581         KDASSERT(l->l_cpu != NULL);
  582         KDASSERT(l->l_cpu == curcpu());
  583         l->l_cpu->ci_schedstate.spc_curpriority = l->l_usrpri;
  584 
  585         l->l_flag &= ~L_SINTR;
  586         if (l->l_flag & L_TIMEOUT) {
  587                 l->l_flag &= ~(L_TIMEOUT|L_CANCELLED);
  588                 if (sig == 0) {
  589 #ifdef KTRACE
  590                         if (KTRPOINT(p, KTR_CSW))
  591                                 ktrcsw(l, 0, 0);
  592 #endif
  593                         if (relock && interlock != NULL)
  594                                 simple_lock(interlock);
  595                         return (EWOULDBLOCK);
  596                 }
  597         } else if (timo)
  598                 callout_stop(&l->l_tsleep_ch);
  599 
  600         if (catch) {
  601                 const int cancelled = l->l_flag & L_CANCELLED;
  602                 l->l_flag &= ~L_CANCELLED;
  603                 if (sig != 0 || (sig = CURSIG(l)) != 0 || cancelled) {
  604 #ifdef KTRACE
  605                         if (KTRPOINT(p, KTR_CSW))
  606                                 ktrcsw(l, 0, 0);
  607 #endif
  608                         if (relock && interlock != NULL)
  609                                 simple_lock(interlock);
  610                         /*
  611                          * If this sleep was canceled, don't let the syscall
  612                          * restart.
  613                          */
  614                         if (cancelled ||
  615                             (SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
  616                                 return (EINTR);
  617                         return (ERESTART);
  618                 }
  619         }
  620 
  621 #ifdef KTRACE
  622         if (KTRPOINT(p, KTR_CSW))
  623                 ktrcsw(l, 0, 0);
  624 #endif
  625         if (relock && interlock != NULL)
  626                 simple_lock(interlock);
  627 
  628         /* XXXNJW this is very much a kluge.
  629          * revisit. a better way of preventing looping/hanging syscalls like
  630          * wait4() and _lwp_wait() from wedging an exiting process
  631          * would be preferred.
  632          */
  633         if (catch && ((p->p_flag & P_WEXIT) && p->p_nlwps > 1 && exiterr))
  634                 return (EINTR);
  635         return (0);
  636 }
  637 
  638 /*
  639  * Implement timeout for tsleep.
  640  * If process hasn't been awakened (wchan non-zero),
  641  * set timeout flag and undo the sleep.  If LWP
  642  * is stopped, just unsleep so it will remain stopped.
  643  */
  644 void
  645 endtsleep(void *arg)
  646 {
  647         struct lwp *l;
  648         int s;
  649 
  650         l = (struct lwp *)arg;
  651         SCHED_LOCK(s);
  652         if (l->l_wchan) {
  653                 if (l->l_stat == LSSLEEP)
  654                         setrunnable(l);
  655                 else
  656                         unsleep(l);
  657                 l->l_flag |= L_TIMEOUT;
  658         }
  659         SCHED_UNLOCK(s);
  660 }
  661 
  662 /*
  663  * Remove a process from its wait queue
  664  */
  665 void
  666 unsleep(struct lwp *l)
  667 {
  668         struct slpque *qp;
  669         struct lwp **hp;
  670 
  671         SCHED_ASSERT_LOCKED();
  672 
  673         if (l->l_wchan) {
  674                 hp = &(qp = SLPQUE(l->l_wchan))->sq_head;
  675                 while (*hp != l)
  676                         hp = &(*hp)->l_forw;
  677                 *hp = l->l_forw;
  678                 if (qp->sq_tailp == &l->l_forw)
  679                         qp->sq_tailp = hp;
  680                 l->l_wchan = 0;
  681         }
  682 }
  683 
  684 inline void
  685 sa_awaken(struct lwp *l)
  686 {
  687 
  688         SCHED_ASSERT_LOCKED();
  689 
  690         if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
  691                 l->l_flag &= ~L_SA_IDLE;
  692 }
  693 
  694 /*
  695  * Optimized-for-wakeup() version of setrunnable().
  696  */
  697 inline void
  698 awaken(struct lwp *l)
  699 {
  700 
  701         SCHED_ASSERT_LOCKED();
  702 
  703         if (l->l_proc->p_sa)
  704                 sa_awaken(l);
  705 
  706         if (l->l_slptime > 1)
  707                 updatepri(l);
  708         l->l_slptime = 0;
  709         l->l_stat = LSRUN;
  710         l->l_proc->p_nrlwps++;
  711         /*
  712          * Since curpriority is a user priority, p->p_priority
  713          * is always better than curpriority on the last CPU on
  714          * which it ran.
  715          *
  716          * XXXSMP See affinity comment in resched_proc().
  717          */
  718         if (l->l_flag & L_INMEM) {
  719                 setrunqueue(l);
  720                 KASSERT(l->l_cpu != NULL);
  721                 need_resched(l->l_cpu);
  722         } else
  723                 sched_wakeup(&proc0);
  724 }
  725 
  726 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
  727 void
  728 sched_unlock_idle(void)
  729 {
  730 
  731         simple_unlock(&sched_lock);
  732 }
  733 
  734 void
  735 sched_lock_idle(void)
  736 {
  737 
  738         simple_lock(&sched_lock);
  739 }
  740 #endif /* MULTIPROCESSOR || LOCKDEBUG */
  741 
  742 /*
  743  * Make all processes sleeping on the specified identifier runnable.
  744  */
  745 
  746 void
  747 wakeup(volatile const void *ident)
  748 {
  749         int s;
  750 
  751         SCHED_ASSERT_UNLOCKED();
  752 
  753         SCHED_LOCK(s);
  754         sched_wakeup(ident);
  755         SCHED_UNLOCK(s);
  756 }
  757 
  758 void
  759 sched_wakeup(volatile const void *ident)
  760 {
  761         struct slpque *qp;
  762         struct lwp *l, **q;
  763 
  764         SCHED_ASSERT_LOCKED();
  765 
  766         qp = SLPQUE(ident);
  767  restart:
  768         for (q = &qp->sq_head; (l = *q) != NULL; ) {
  769 #ifdef DIAGNOSTIC
  770                 if (l->l_back || (l->l_stat != LSSLEEP &&
  771                     l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
  772                         panic("wakeup");
  773 #endif
  774                 if (l->l_wchan == ident) {
  775                         l->l_wchan = 0;
  776                         *q = l->l_forw;
  777                         if (qp->sq_tailp == &l->l_forw)
  778                                 qp->sq_tailp = q;
  779                         if (l->l_stat == LSSLEEP) {
  780                                 awaken(l);
  781                                 goto restart;
  782                         }
  783                 } else
  784                         q = &l->l_forw;
  785         }
  786 }
  787 
  788 /*
  789  * Make the highest priority process first in line on the specified
  790  * identifier runnable.
  791  */
  792 void
  793 wakeup_one(volatile const void *ident)
  794 {
  795         struct slpque *qp;
  796         struct lwp *l, **q;
  797         struct lwp *best_sleepp, **best_sleepq;
  798         struct lwp *best_stopp, **best_stopq;
  799         int s;
  800 
  801         best_sleepp = best_stopp = NULL;
  802         best_sleepq = best_stopq = NULL;
  803 
  804         SCHED_LOCK(s);
  805 
  806         qp = SLPQUE(ident);
  807 
  808         for (q = &qp->sq_head; (l = *q) != NULL; q = &l->l_forw) {
  809 #ifdef DIAGNOSTIC
  810                 if (l->l_back || (l->l_stat != LSSLEEP &&
  811                     l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
  812                         panic("wakeup_one");
  813 #endif
  814                 if (l->l_wchan == ident) {
  815                         if (l->l_stat == LSSLEEP) {
  816                                 if (best_sleepp == NULL ||
  817                                     l->l_priority < best_sleepp->l_priority) {
  818                                         best_sleepp = l;
  819                                         best_sleepq = q;
  820                                 }
  821                         } else {
  822                                 if (best_stopp == NULL ||
  823                                     l->l_priority < best_stopp->l_priority) {
  824                                         best_stopp = l;
  825                                         best_stopq = q;
  826                                 }
  827                         }
  828                 }
  829         }
  830 
  831         /*
  832          * Consider any SSLEEP process higher than the highest priority SSTOP
  833          * process.
  834          */
  835         if (best_sleepp != NULL) {
  836                 l = best_sleepp;
  837                 q = best_sleepq;
  838         } else {
  839                 l = best_stopp;
  840                 q = best_stopq;
  841         }
  842 
  843         if (l != NULL) {
  844                 l->l_wchan = NULL;
  845                 *q = l->l_forw;
  846                 if (qp->sq_tailp == &l->l_forw)
  847                         qp->sq_tailp = q;
  848                 if (l->l_stat == LSSLEEP)
  849                         awaken(l);
  850         }
  851         SCHED_UNLOCK(s);
  852 }
  853 
  854 /*
  855  * General yield call.  Puts the current process back on its run queue and
  856  * performs a voluntary context switch.  Should only be called when the
  857  * current process explicitly requests it (eg sched_yield(2) in compat code).
  858  */
  859 void
  860 yield(void)
  861 {
  862         struct lwp *l = curlwp;
  863         int s;
  864 
  865         SCHED_LOCK(s);
  866         l->l_priority = l->l_usrpri;
  867         l->l_stat = LSRUN;
  868         setrunqueue(l);
  869         l->l_proc->p_stats->p_ru.ru_nvcsw++;
  870         mi_switch(l, NULL);
  871         SCHED_ASSERT_UNLOCKED();
  872         splx(s);
  873 }
  874 
  875 /*
  876  * General preemption call.  Puts the current LWP back on its run queue
  877  * and performs an involuntary context switch.
  878  * The 'more' ("more work to do") argument is boolean. Returning to userspace
  879  * preempt() calls pass 0. "Voluntary" preemptions in e.g. uiomove() pass 1.
  880  * This will be used to indicate to the SA subsystem that the LWP is
  881  * not yet finished in the kernel.
  882  */
  883 
  884 void
  885 preempt(int more)
  886 {
  887         struct lwp *l = curlwp;
  888         int r, s;
  889 
  890         SCHED_LOCK(s);
  891         l->l_priority = l->l_usrpri;
  892         l->l_stat = LSRUN;
  893         setrunqueue(l);
  894         l->l_proc->p_stats->p_ru.ru_nivcsw++;
  895         r = mi_switch(l, NULL);
  896         SCHED_ASSERT_UNLOCKED();
  897         splx(s);
  898         if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
  899                 sa_preempt(l);
  900 }
  901 
  902 /*
  903  * The machine independent parts of context switch.
  904  * Must be called at splsched() (no higher!) and with
  905  * the sched_lock held.
  906  * Switch to "new" if non-NULL, otherwise let cpu_switch choose
  907  * the next lwp.
  908  *
  909  * Returns 1 if another LWP was actually run.
  910  */
  911 int
  912 mi_switch(struct lwp *l, struct lwp *newl)
  913 {
  914         struct schedstate_percpu *spc;
  915         struct rlimit *rlim;
  916         long s, u;
  917         struct timeval tv;
  918         int hold_count;
  919         struct proc *p = l->l_proc;
  920         int retval;
  921 
  922         SCHED_ASSERT_LOCKED();
  923 
  924         /*
  925          * Release the kernel_lock, as we are about to yield the CPU.
  926          * The scheduler lock is still held until cpu_switch()
  927          * selects a new process and removes it from the run queue.
  928          */
  929         hold_count = KERNEL_LOCK_RELEASE_ALL();
  930 
  931         KDASSERT(l->l_cpu != NULL);
  932         KDASSERT(l->l_cpu == curcpu());
  933 
  934         spc = &l->l_cpu->ci_schedstate;
  935 
  936 #ifdef LOCKDEBUG
  937         spinlock_switchcheck();
  938         simple_lock_switchcheck();
  939 #endif
  940 
  941         /*
  942          * Compute the amount of time during which the current
  943          * process was running.
  944          */
  945         microtime(&tv);
  946         u = p->p_rtime.tv_usec +
  947             (tv.tv_usec - spc->spc_runtime.tv_usec);
  948         s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
  949         if (u < 0) {
  950                 u += 1000000;
  951                 s--;
  952         } else if (u >= 1000000) {
  953                 u -= 1000000;
  954                 s++;
  955         }
  956         p->p_rtime.tv_usec = u;
  957         p->p_rtime.tv_sec = s;
  958 
  959         /*
  960          * Process is about to yield the CPU; clear the appropriate
  961          * scheduling flags.
  962          */
  963         spc->spc_flags &= ~SPCF_SWITCHCLEAR;
  964 
  965 #ifdef KSTACK_CHECK_MAGIC
  966         kstack_check_magic(l);
  967 #endif
  968 
  969         /*
  970          * If we are using h/w performance counters, save context.
  971          */
  972 #if PERFCTRS
  973         if (PMC_ENABLED(p)) {
  974                 pmc_save_context(p);
  975         }
  976 #endif
  977 
  978         /*
  979          * Switch to the new current process.  When we
  980          * run again, we'll return back here.
  981          */
  982         uvmexp.swtch++;
  983         if (newl == NULL) {
  984                 retval = cpu_switch(l, NULL);
  985         } else {
  986                 remrunqueue(newl);
  987                 cpu_switchto(l, newl);
  988                 retval = 0;
  989         }
  990 
  991         /*
  992          * If we are using h/w performance counters, restore context.
  993          */
  994 #if PERFCTRS
  995         if (PMC_ENABLED(p)) {
  996                 pmc_restore_context(p);
  997         }
  998 #endif
  999 
 1000         /*
 1001          * Make sure that MD code released the scheduler lock before
 1002          * resuming us.
 1003          */
 1004         SCHED_ASSERT_UNLOCKED();
 1005 
 1006         /*
 1007          * We're running again; record our new start time.  We might
 1008          * be running on a new CPU now, so don't use the cache'd
 1009          * schedstate_percpu pointer.
 1010          */
 1011         KDASSERT(l->l_cpu != NULL);
 1012         KDASSERT(l->l_cpu == curcpu());
 1013         microtime(&l->l_cpu->ci_schedstate.spc_runtime);
 1014 
 1015         /*
 1016          * Reacquire the kernel_lock now.  We do this after we've
 1017          * released the scheduler lock to avoid deadlock, and before
 1018          * we reacquire the interlock.
 1019          */
 1020         KERNEL_LOCK_ACQUIRE_COUNT(hold_count);
 1021 
 1022         /*
 1023          * Check if the process exceeds its CPU resource allocation.
 1024          * If over max, kill it.  In any case, if it has run for more
 1025          * than 10 minutes, reduce priority to give others a chance.
 1026          */
 1027         rlim = &p->p_rlimit[RLIMIT_CPU];
 1028         if (s >= rlim->rlim_cur) {
 1029                 if (s >= rlim->rlim_max) {
 1030                         psignal(p, SIGKILL);
 1031                 } else {
 1032                         psignal(p, SIGXCPU);
 1033                         if (rlim->rlim_cur < rlim->rlim_max)
 1034                                 rlim->rlim_cur += 5;
 1035                 }
 1036         }
 1037         if (autonicetime && s > autonicetime &&
 1038             kauth_cred_geteuid(p->p_cred) && p->p_nice == NZERO) {
 1039                 SCHED_LOCK(s);
 1040                 p->p_nice = autoniceval + NZERO;
 1041                 resetpriority(l);
 1042                 SCHED_UNLOCK(s);
 1043         }
 1044 
 1045         return retval;
 1046 }
 1047 
 1048 /*
 1049  * Initialize the (doubly-linked) run queues
 1050  * to be empty.
 1051  */
 1052 void
 1053 rqinit()
 1054 {
 1055         int i;
 1056 
 1057         for (i = 0; i < RUNQUE_NQS; i++)
 1058                 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
 1059                     (struct lwp *)&sched_qs[i];
 1060 }
 1061 
 1062 static inline void
 1063 resched_proc(struct lwp *l, u_char pri)
 1064 {
 1065         struct cpu_info *ci;
 1066 
 1067         /*
 1068          * XXXSMP
 1069          * Since l->l_cpu persists across a context switch,
 1070          * this gives us *very weak* processor affinity, in
 1071          * that we notify the CPU on which the process last
 1072          * ran that it should try to switch.
 1073          *
 1074          * This does not guarantee that the process will run on
 1075          * that processor next, because another processor might
 1076          * grab it the next time it performs a context switch.
 1077          *
 1078          * This also does not handle the case where its last
 1079          * CPU is running a higher-priority process, but every
 1080          * other CPU is running a lower-priority process.  There
 1081          * are ways to handle this situation, but they're not
 1082          * currently very pretty, and we also need to weigh the
 1083          * cost of moving a process from one CPU to another.
 1084          *
 1085          * XXXSMP
 1086          * There is also the issue of locking the other CPU's
 1087          * sched state, which we currently do not do.
 1088          */
 1089         ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
 1090         if (pri < ci->ci_schedstate.spc_curpriority)
 1091                 need_resched(ci);
 1092 }
 1093 
 1094 /*
 1095  * Change process state to be runnable,
 1096  * placing it on the run queue if it is in memory,
 1097  * and awakening the swapper if it isn't in memory.
 1098  */
 1099 void
 1100 setrunnable(struct lwp *l)
 1101 {
 1102         struct proc *p = l->l_proc;
 1103 
 1104         SCHED_ASSERT_LOCKED();
 1105 
 1106         switch (l->l_stat) {
 1107         case 0:
 1108         case LSRUN:
 1109         case LSONPROC:
 1110         case LSZOMB:
 1111         case LSDEAD:
 1112         default:
 1113                 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
 1114         case LSSTOP:
 1115                 /*
 1116                  * If we're being traced (possibly because someone attached us
 1117                  * while we were stopped), check for a signal from the debugger.
 1118                  */
 1119                 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
 1120                         sigaddset(&p->p_sigctx.ps_siglist, p->p_xstat);
 1121                         CHECKSIGS(p);
 1122                 }
 1123         case LSSLEEP:
 1124                 unsleep(l);             /* e.g. when sending signals */
 1125                 break;
 1126 
 1127         case LSIDL:
 1128                 break;
 1129         case LSSUSPENDED:
 1130                 break;
 1131         }
 1132 
 1133         if (l->l_proc->p_sa)
 1134                 sa_awaken(l);
 1135 
 1136         l->l_stat = LSRUN;
 1137         p->p_nrlwps++;
 1138 
 1139         if (l->l_flag & L_INMEM)
 1140                 setrunqueue(l);
 1141 
 1142         if (l->l_slptime > 1)
 1143                 updatepri(l);
 1144         l->l_slptime = 0;
 1145         if ((l->l_flag & L_INMEM) == 0)
 1146                 sched_wakeup((caddr_t)&proc0);
 1147         else
 1148                 resched_proc(l, l->l_priority);
 1149 }
 1150 
 1151 /*
 1152  * Compute the priority of a process when running in user mode.
 1153  * Arrange to reschedule if the resulting priority is better
 1154  * than that of the current process.
 1155  */
 1156 void
 1157 resetpriority(struct lwp *l)
 1158 {
 1159         unsigned int newpriority;
 1160         struct proc *p = l->l_proc;
 1161 
 1162         SCHED_ASSERT_LOCKED();
 1163 
 1164         newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
 1165                         NICE_WEIGHT * (p->p_nice - NZERO);
 1166         newpriority = min(newpriority, MAXPRI);
 1167         l->l_usrpri = newpriority;
 1168         resched_proc(l, l->l_usrpri);
 1169 }
 1170 
 1171 /*
 1172  * Recompute priority for all LWPs in a process.
 1173  */
 1174 void
 1175 resetprocpriority(struct proc *p)
 1176 {
 1177         struct lwp *l;
 1178 
 1179         LIST_FOREACH(l, &p->p_lwps, l_sibling)
 1180             resetpriority(l);
 1181 }
 1182 
 1183 /*
 1184  * We adjust the priority of the current process.  The priority of a process
 1185  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
 1186  * is increased here.  The formula for computing priorities (in kern_synch.c)
 1187  * will compute a different value each time p_estcpu increases. This can
 1188  * cause a switch, but unless the priority crosses a PPQ boundary the actual
 1189  * queue will not change.  The CPU usage estimator ramps up quite quickly
 1190  * when the process is running (linearly), and decays away exponentially, at
 1191  * a rate which is proportionally slower when the system is busy.  The basic
 1192  * principle is that the system will 90% forget that the process used a lot
 1193  * of CPU time in 5 * loadav seconds.  This causes the system to favor
 1194  * processes which haven't run much recently, and to round-robin among other
 1195  * processes.
 1196  */
 1197 
 1198 void
 1199 schedclock(struct lwp *l)
 1200 {
 1201         struct proc *p = l->l_proc;
 1202         int s;
 1203 
 1204         p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
 1205         SCHED_LOCK(s);
 1206         resetpriority(l);
 1207         SCHED_UNLOCK(s);
 1208 
 1209         if (l->l_priority >= PUSER)
 1210                 l->l_priority = l->l_usrpri;
 1211 }
 1212 
 1213 void
 1214 suspendsched()
 1215 {
 1216         struct lwp *l;
 1217         int s;
 1218 
 1219         /*
 1220          * Convert all non-P_SYSTEM LSSLEEP or LSRUN processes to
 1221          * LSSUSPENDED.
 1222          */
 1223         proclist_lock_read();
 1224         SCHED_LOCK(s);
 1225         LIST_FOREACH(l, &alllwp, l_list) {
 1226                 if ((l->l_proc->p_flag & P_SYSTEM) != 0)
 1227                         continue;
 1228 
 1229                 switch (l->l_stat) {
 1230                 case LSRUN:
 1231                         l->l_proc->p_nrlwps--;
 1232                         if ((l->l_flag & L_INMEM) != 0)
 1233                                 remrunqueue(l);
 1234                         /* FALLTHROUGH */
 1235                 case LSSLEEP:
 1236                         l->l_stat = LSSUSPENDED;
 1237                         break;
 1238                 case LSONPROC:
 1239                         /*
 1240                          * XXX SMP: we need to deal with processes on
 1241                          * others CPU !
 1242                          */
 1243                         break;
 1244                 default:
 1245                         break;
 1246                 }
 1247         }
 1248         SCHED_UNLOCK(s);
 1249         proclist_unlock_read();
 1250 }
 1251 
 1252 /*
 1253  * scheduler_fork_hook:
 1254  *
 1255  *      Inherit the parent's scheduler history.
 1256  */
 1257 void
 1258 scheduler_fork_hook(struct proc *parent, struct proc *child)
 1259 {
 1260 
 1261         child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
 1262         child->p_forktime = schedcpu_ticks;
 1263 }
 1264 
 1265 /*
 1266  * scheduler_wait_hook:
 1267  *
 1268  *      Chargeback parents for the sins of their children.
 1269  */
 1270 void
 1271 scheduler_wait_hook(struct proc *parent, struct proc *child)
 1272 {
 1273         fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
 1274         fixpt_t estcpu;
 1275 
 1276         /* XXX Only if parent != init?? */
 1277 
 1278         estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
 1279             schedcpu_ticks - child->p_forktime);
 1280         if (child->p_estcpu > estcpu) {
 1281                 parent->p_estcpu =
 1282                     ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
 1283         }
 1284 }
 1285 
 1286 /*
 1287  * Low-level routines to access the run queue.  Optimised assembler
 1288  * routines can override these.
 1289  */
 1290 
 1291 #ifndef __HAVE_MD_RUNQUEUE
 1292 
 1293 /*
 1294  * On some architectures, it's faster to use a MSB ordering for the priorites
 1295  * than the traditional LSB ordering.
 1296  */
 1297 #ifdef __HAVE_BIGENDIAN_BITOPS
 1298 #define RQMASK(n) (0x80000000 >> (n))
 1299 #else
 1300 #define RQMASK(n) (0x00000001 << (n))
 1301 #endif
 1302 
 1303 /*
 1304  * The primitives that manipulate the run queues.  whichqs tells which
 1305  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
 1306  * into queues, remrunqueue removes them from queues.  The running process is
 1307  * on no queue, other processes are on a queue related to p->p_priority,
 1308  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
 1309  * available queues.
 1310  */
 1311 
 1312 #ifdef RQDEBUG
 1313 static void
 1314 checkrunqueue(int whichq, struct lwp *l)
 1315 {
 1316         const struct prochd * const rq = &sched_qs[whichq];
 1317         struct lwp *l2;
 1318         int found = 0;
 1319         int die = 0;
 1320         int empty = 1;
 1321         for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
 1322                 if (l2->l_stat != LSRUN) {
 1323                         printf("checkrunqueue[%d]: lwp %p state (%d) "
 1324                             " != LSRUN\n", whichq, l2, l2->l_stat);
 1325                 }
 1326                 if (l2->l_back->l_forw != l2) {
 1327                         printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
 1328                             "corrupt %p\n", whichq, l2, l2->l_back,
 1329                             l2->l_back->l_forw);
 1330                         die = 1;
 1331                 }
 1332                 if (l2->l_forw->l_back != l2) {
 1333                         printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
 1334                             "corrupt %p\n", whichq, l2, l2->l_forw,
 1335                             l2->l_forw->l_back);
 1336                         die = 1;
 1337                 }
 1338                 if (l2 == l)
 1339                         found = 1;
 1340                 empty = 0;
 1341         }
 1342         if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
 1343                 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
 1344                     whichq, rq);
 1345                 die = 1;
 1346         } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
 1347                 printf("checkrunqueue[%d]: bit clear for non-empty "
 1348                     "run-queue %p\n", whichq, rq);
 1349                 die = 1;
 1350         }
 1351         if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
 1352                 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
 1353                     whichq, l);
 1354                 die = 1;
 1355         }
 1356         if (l != NULL && empty) {
 1357                 printf("checkrunqueue[%d]: empty run-queue %p with "
 1358                     "active lwp %p\n", whichq, rq, l);
 1359                 die = 1;
 1360         }
 1361         if (l != NULL && !found) {
 1362                 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
 1363                     whichq, l, rq);
 1364                 die = 1;
 1365         }
 1366         if (die)
 1367                 panic("checkrunqueue: inconsistency found");
 1368 }
 1369 #endif /* RQDEBUG */
 1370 
 1371 void
 1372 setrunqueue(struct lwp *l)
 1373 {
 1374         struct prochd *rq;
 1375         struct lwp *prev;
 1376         const int whichq = l->l_priority / PPQ;
 1377 
 1378 #ifdef RQDEBUG
 1379         checkrunqueue(whichq, NULL);
 1380 #endif
 1381 #ifdef DIAGNOSTIC
 1382         if (l->l_back != NULL || l->l_wchan != NULL || l->l_stat != LSRUN)
 1383                 panic("setrunqueue");
 1384 #endif
 1385         sched_whichqs |= RQMASK(whichq);
 1386         rq = &sched_qs[whichq];
 1387         prev = rq->ph_rlink;
 1388         l->l_forw = (struct lwp *)rq;
 1389         rq->ph_rlink = l;
 1390         prev->l_forw = l;
 1391         l->l_back = prev;
 1392 #ifdef RQDEBUG
 1393         checkrunqueue(whichq, l);
 1394 #endif
 1395 }
 1396 
 1397 void
 1398 remrunqueue(struct lwp *l)
 1399 {
 1400         struct lwp *prev, *next;
 1401         const int whichq = l->l_priority / PPQ;
 1402 #ifdef RQDEBUG
 1403         checkrunqueue(whichq, l);
 1404 #endif
 1405 #ifdef DIAGNOSTIC
 1406         if (((sched_whichqs & RQMASK(whichq)) == 0))
 1407                 panic("remrunqueue: bit %d not set", whichq);
 1408 #endif
 1409         prev = l->l_back;
 1410         l->l_back = NULL;
 1411         next = l->l_forw;
 1412         prev->l_forw = next;
 1413         next->l_back = prev;
 1414         if (prev == next)
 1415                 sched_whichqs &= ~RQMASK(whichq);
 1416 #ifdef RQDEBUG
 1417         checkrunqueue(whichq, NULL);
 1418 #endif
 1419 }
 1420 
 1421 #undef RQMASK
 1422 #endif /* !defined(__HAVE_MD_RUNQUEUE) */

Cache object: 09f6c935666f8f0be9e57cc62e55927e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.