The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/sched_4bsd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/9.0/sys/kern/sched_4bsd.c 225199 2011-08-26 18:00:07Z delphij $");
   37 
   38 #include "opt_hwpmc_hooks.h"
   39 #include "opt_sched.h"
   40 #include "opt_kdtrace.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/cpuset.h>
   45 #include <sys/kernel.h>
   46 #include <sys/ktr.h>
   47 #include <sys/lock.h>
   48 #include <sys/kthread.h>
   49 #include <sys/mutex.h>
   50 #include <sys/proc.h>
   51 #include <sys/resourcevar.h>
   52 #include <sys/sched.h>
   53 #include <sys/smp.h>
   54 #include <sys/sysctl.h>
   55 #include <sys/sx.h>
   56 #include <sys/turnstile.h>
   57 #include <sys/umtx.h>
   58 #include <machine/pcb.h>
   59 #include <machine/smp.h>
   60 
   61 #ifdef HWPMC_HOOKS
   62 #include <sys/pmckern.h>
   63 #endif
   64 
   65 #ifdef KDTRACE_HOOKS
   66 #include <sys/dtrace_bsd.h>
   67 int                             dtrace_vtime_active;
   68 dtrace_vtime_switch_func_t      dtrace_vtime_switch_func;
   69 #endif
   70 
   71 /*
   72  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
   73  * the range 100-256 Hz (approximately).
   74  */
   75 #define ESTCPULIM(e) \
   76     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
   77     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
   78 #ifdef SMP
   79 #define INVERSE_ESTCPU_WEIGHT   (8 * smp_cpus)
   80 #else
   81 #define INVERSE_ESTCPU_WEIGHT   8       /* 1 / (priorities per estcpu level). */
   82 #endif
   83 #define NICE_WEIGHT             1       /* Priorities per nice level. */
   84 
   85 #define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
   86 
   87 /*
   88  * The schedulable entity that runs a context.
   89  * This is  an extension to the thread structure and is tailored to
   90  * the requirements of this scheduler
   91  */
   92 struct td_sched {
   93         fixpt_t         ts_pctcpu;      /* (j) %cpu during p_swtime. */
   94         int             ts_cpticks;     /* (j) Ticks of cpu time. */
   95         int             ts_slptime;     /* (j) Seconds !RUNNING. */
   96         int             ts_flags;
   97         struct runq     *ts_runq;       /* runq the thread is currently on */
   98 #ifdef KTR
   99         char            ts_name[TS_NAME_LEN];
  100 #endif
  101 };
  102 
  103 /* flags kept in td_flags */
  104 #define TDF_DIDRUN      TDF_SCHED0      /* thread actually ran. */
  105 #define TDF_BOUND       TDF_SCHED1      /* Bound to one CPU. */
  106 
  107 /* flags kept in ts_flags */
  108 #define TSF_AFFINITY    0x0001          /* Has a non-"full" CPU set. */
  109 
  110 #define SKE_RUNQ_PCPU(ts)                                               \
  111     ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
  112 
  113 #define THREAD_CAN_SCHED(td, cpu)       \
  114     CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
  115 
  116 static struct td_sched td_sched0;
  117 struct mtx sched_lock;
  118 
  119 static int      sched_tdcnt;    /* Total runnable threads in the system. */
  120 static int      sched_quantum;  /* Roundrobin scheduling quantum in ticks. */
  121 #define SCHED_QUANTUM   (hz / 10)       /* Default sched quantum */
  122 
  123 static void     setup_runqs(void);
  124 static void     schedcpu(void);
  125 static void     schedcpu_thread(void);
  126 static void     sched_priority(struct thread *td, u_char prio);
  127 static void     sched_setup(void *dummy);
  128 static void     maybe_resched(struct thread *td);
  129 static void     updatepri(struct thread *td);
  130 static void     resetpriority(struct thread *td);
  131 static void     resetpriority_thread(struct thread *td);
  132 #ifdef SMP
  133 static int      sched_pickcpu(struct thread *td);
  134 static int      forward_wakeup(int cpunum);
  135 static void     kick_other_cpu(int pri, int cpuid);
  136 #endif
  137 
  138 static struct kproc_desc sched_kp = {
  139         "schedcpu",
  140         schedcpu_thread,
  141         NULL
  142 };
  143 SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start,
  144     &sched_kp);
  145 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
  146 
  147 /*
  148  * Global run queue.
  149  */
  150 static struct runq runq;
  151 
  152 #ifdef SMP
  153 /*
  154  * Per-CPU run queues
  155  */
  156 static struct runq runq_pcpu[MAXCPU];
  157 long runq_length[MAXCPU];
  158 
  159 static cpuset_t idle_cpus_mask;
  160 #endif
  161 
  162 struct pcpuidlestat {
  163         u_int idlecalls;
  164         u_int oldidlecalls;
  165 };
  166 static DPCPU_DEFINE(struct pcpuidlestat, idlestat);
  167 
  168 static void
  169 setup_runqs(void)
  170 {
  171 #ifdef SMP
  172         int i;
  173 
  174         for (i = 0; i < MAXCPU; ++i)
  175                 runq_init(&runq_pcpu[i]);
  176 #endif
  177 
  178         runq_init(&runq);
  179 }
  180 
  181 static int
  182 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
  183 {
  184         int error, new_val;
  185 
  186         new_val = sched_quantum * tick;
  187         error = sysctl_handle_int(oidp, &new_val, 0, req);
  188         if (error != 0 || req->newptr == NULL)
  189                 return (error);
  190         if (new_val < tick)
  191                 return (EINVAL);
  192         sched_quantum = new_val / tick;
  193         hogticks = 2 * sched_quantum;
  194         return (0);
  195 }
  196 
  197 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
  198 
  199 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
  200     "Scheduler name");
  201 
  202 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
  203     0, sizeof sched_quantum, sysctl_kern_quantum, "I",
  204     "Roundrobin scheduling quantum in microseconds");
  205 
  206 #ifdef SMP
  207 /* Enable forwarding of wakeups to all other cpus */
  208 SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
  209 
  210 static int runq_fuzz = 1;
  211 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
  212 
  213 static int forward_wakeup_enabled = 1;
  214 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
  215            &forward_wakeup_enabled, 0,
  216            "Forwarding of wakeup to idle CPUs");
  217 
  218 static int forward_wakeups_requested = 0;
  219 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
  220            &forward_wakeups_requested, 0,
  221            "Requests for Forwarding of wakeup to idle CPUs");
  222 
  223 static int forward_wakeups_delivered = 0;
  224 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
  225            &forward_wakeups_delivered, 0,
  226            "Completed Forwarding of wakeup to idle CPUs");
  227 
  228 static int forward_wakeup_use_mask = 1;
  229 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
  230            &forward_wakeup_use_mask, 0,
  231            "Use the mask of idle cpus");
  232 
  233 static int forward_wakeup_use_loop = 0;
  234 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
  235            &forward_wakeup_use_loop, 0,
  236            "Use a loop to find idle cpus");
  237 
  238 #endif
  239 #if 0
  240 static int sched_followon = 0;
  241 SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
  242            &sched_followon, 0,
  243            "allow threads to share a quantum");
  244 #endif
  245 
  246 static __inline void
  247 sched_load_add(void)
  248 {
  249 
  250         sched_tdcnt++;
  251         KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
  252 }
  253 
  254 static __inline void
  255 sched_load_rem(void)
  256 {
  257 
  258         sched_tdcnt--;
  259         KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
  260 }
  261 /*
  262  * Arrange to reschedule if necessary, taking the priorities and
  263  * schedulers into account.
  264  */
  265 static void
  266 maybe_resched(struct thread *td)
  267 {
  268 
  269         THREAD_LOCK_ASSERT(td, MA_OWNED);
  270         if (td->td_priority < curthread->td_priority)
  271                 curthread->td_flags |= TDF_NEEDRESCHED;
  272 }
  273 
  274 /*
  275  * This function is called when a thread is about to be put on run queue
  276  * because it has been made runnable or its priority has been adjusted.  It
  277  * determines if the new thread should be immediately preempted to.  If so,
  278  * it switches to it and eventually returns true.  If not, it returns false
  279  * so that the caller may place the thread on an appropriate run queue.
  280  */
  281 int
  282 maybe_preempt(struct thread *td)
  283 {
  284 #ifdef PREEMPTION
  285         struct thread *ctd;
  286         int cpri, pri;
  287 
  288         /*
  289          * The new thread should not preempt the current thread if any of the
  290          * following conditions are true:
  291          *
  292          *  - The kernel is in the throes of crashing (panicstr).
  293          *  - The current thread has a higher (numerically lower) or
  294          *    equivalent priority.  Note that this prevents curthread from
  295          *    trying to preempt to itself.
  296          *  - It is too early in the boot for context switches (cold is set).
  297          *  - The current thread has an inhibitor set or is in the process of
  298          *    exiting.  In this case, the current thread is about to switch
  299          *    out anyways, so there's no point in preempting.  If we did,
  300          *    the current thread would not be properly resumed as well, so
  301          *    just avoid that whole landmine.
  302          *  - If the new thread's priority is not a realtime priority and
  303          *    the current thread's priority is not an idle priority and
  304          *    FULL_PREEMPTION is disabled.
  305          *
  306          * If all of these conditions are false, but the current thread is in
  307          * a nested critical section, then we have to defer the preemption
  308          * until we exit the critical section.  Otherwise, switch immediately
  309          * to the new thread.
  310          */
  311         ctd = curthread;
  312         THREAD_LOCK_ASSERT(td, MA_OWNED);
  313         KASSERT((td->td_inhibitors == 0),
  314                         ("maybe_preempt: trying to run inhibited thread"));
  315         pri = td->td_priority;
  316         cpri = ctd->td_priority;
  317         if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
  318             TD_IS_INHIBITED(ctd))
  319                 return (0);
  320 #ifndef FULL_PREEMPTION
  321         if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
  322                 return (0);
  323 #endif
  324 
  325         if (ctd->td_critnest > 1) {
  326                 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
  327                     ctd->td_critnest);
  328                 ctd->td_owepreempt = 1;
  329                 return (0);
  330         }
  331         /*
  332          * Thread is runnable but not yet put on system run queue.
  333          */
  334         MPASS(ctd->td_lock == td->td_lock);
  335         MPASS(TD_ON_RUNQ(td));
  336         TD_SET_RUNNING(td);
  337         CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
  338             td->td_proc->p_pid, td->td_name);
  339         mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td);
  340         /*
  341          * td's lock pointer may have changed.  We have to return with it
  342          * locked.
  343          */
  344         spinlock_enter();
  345         thread_unlock(ctd);
  346         thread_lock(td);
  347         spinlock_exit();
  348         return (1);
  349 #else
  350         return (0);
  351 #endif
  352 }
  353 
  354 /*
  355  * Constants for digital decay and forget:
  356  *      90% of (td_estcpu) usage in 5 * loadav time
  357  *      95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
  358  *          Note that, as ps(1) mentions, this can let percentages
  359  *          total over 100% (I've seen 137.9% for 3 processes).
  360  *
  361  * Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
  362  *
  363  * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
  364  * That is, the system wants to compute a value of decay such
  365  * that the following for loop:
  366  *      for (i = 0; i < (5 * loadavg); i++)
  367  *              td_estcpu *= decay;
  368  * will compute
  369  *      td_estcpu *= 0.1;
  370  * for all values of loadavg:
  371  *
  372  * Mathematically this loop can be expressed by saying:
  373  *      decay ** (5 * loadavg) ~= .1
  374  *
  375  * The system computes decay as:
  376  *      decay = (2 * loadavg) / (2 * loadavg + 1)
  377  *
  378  * We wish to prove that the system's computation of decay
  379  * will always fulfill the equation:
  380  *      decay ** (5 * loadavg) ~= .1
  381  *
  382  * If we compute b as:
  383  *      b = 2 * loadavg
  384  * then
  385  *      decay = b / (b + 1)
  386  *
  387  * We now need to prove two things:
  388  *      1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
  389  *      2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
  390  *
  391  * Facts:
  392  *         For x close to zero, exp(x) =~ 1 + x, since
  393  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
  394  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
  395  *         For x close to zero, ln(1+x) =~ x, since
  396  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
  397  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
  398  *         ln(.1) =~ -2.30
  399  *
  400  * Proof of (1):
  401  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
  402  *      solving for factor,
  403  *      ln(factor) =~ (-2.30/5*loadav), or
  404  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
  405  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
  406  *
  407  * Proof of (2):
  408  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
  409  *      solving for power,
  410  *      power*ln(b/(b+1)) =~ -2.30, or
  411  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
  412  *
  413  * Actual power values for the implemented algorithm are as follows:
  414  *      loadav: 1       2       3       4
  415  *      power:  5.68    10.32   14.94   19.55
  416  */
  417 
  418 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
  419 #define loadfactor(loadav)      (2 * (loadav))
  420 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
  421 
  422 /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
  423 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
  424 SYSCTL_UINT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
  425 
  426 /*
  427  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
  428  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
  429  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
  430  *
  431  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
  432  *      1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
  433  *
  434  * If you don't want to bother with the faster/more-accurate formula, you
  435  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
  436  * (more general) method of calculating the %age of CPU used by a process.
  437  */
  438 #define CCPU_SHIFT      11
  439 
  440 /*
  441  * Recompute process priorities, every hz ticks.
  442  * MP-safe, called without the Giant mutex.
  443  */
  444 /* ARGSUSED */
  445 static void
  446 schedcpu(void)
  447 {
  448         register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
  449         struct thread *td;
  450         struct proc *p;
  451         struct td_sched *ts;
  452         int awake, realstathz;
  453 
  454         realstathz = stathz ? stathz : hz;
  455         sx_slock(&allproc_lock);
  456         FOREACH_PROC_IN_SYSTEM(p) {
  457                 PROC_LOCK(p);
  458                 if (p->p_state == PRS_NEW) {
  459                         PROC_UNLOCK(p);
  460                         continue;
  461                 }
  462                 FOREACH_THREAD_IN_PROC(p, td) {
  463                         awake = 0;
  464                         thread_lock(td);
  465                         ts = td->td_sched;
  466                         /*
  467                          * Increment sleep time (if sleeping).  We
  468                          * ignore overflow, as above.
  469                          */
  470                         /*
  471                          * The td_sched slptimes are not touched in wakeup
  472                          * because the thread may not HAVE everything in
  473                          * memory? XXX I think this is out of date.
  474                          */
  475                         if (TD_ON_RUNQ(td)) {
  476                                 awake = 1;
  477                                 td->td_flags &= ~TDF_DIDRUN;
  478                         } else if (TD_IS_RUNNING(td)) {
  479                                 awake = 1;
  480                                 /* Do not clear TDF_DIDRUN */
  481                         } else if (td->td_flags & TDF_DIDRUN) {
  482                                 awake = 1;
  483                                 td->td_flags &= ~TDF_DIDRUN;
  484                         }
  485 
  486                         /*
  487                          * ts_pctcpu is only for ps and ttyinfo().
  488                          */
  489                         ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
  490                         /*
  491                          * If the td_sched has been idle the entire second,
  492                          * stop recalculating its priority until
  493                          * it wakes up.
  494                          */
  495                         if (ts->ts_cpticks != 0) {
  496 #if     (FSHIFT >= CCPU_SHIFT)
  497                                 ts->ts_pctcpu += (realstathz == 100)
  498                                     ? ((fixpt_t) ts->ts_cpticks) <<
  499                                     (FSHIFT - CCPU_SHIFT) :
  500                                     100 * (((fixpt_t) ts->ts_cpticks)
  501                                     << (FSHIFT - CCPU_SHIFT)) / realstathz;
  502 #else
  503                                 ts->ts_pctcpu += ((FSCALE - ccpu) *
  504                                     (ts->ts_cpticks *
  505                                     FSCALE / realstathz)) >> FSHIFT;
  506 #endif
  507                                 ts->ts_cpticks = 0;
  508                         }
  509                         /*
  510                          * If there are ANY running threads in this process,
  511                          * then don't count it as sleeping.
  512                          * XXX: this is broken.
  513                          */
  514                         if (awake) {
  515                                 if (ts->ts_slptime > 1) {
  516                                         /*
  517                                          * In an ideal world, this should not
  518                                          * happen, because whoever woke us
  519                                          * up from the long sleep should have
  520                                          * unwound the slptime and reset our
  521                                          * priority before we run at the stale
  522                                          * priority.  Should KASSERT at some
  523                                          * point when all the cases are fixed.
  524                                          */
  525                                         updatepri(td);
  526                                 }
  527                                 ts->ts_slptime = 0;
  528                         } else
  529                                 ts->ts_slptime++;
  530                         if (ts->ts_slptime > 1) {
  531                                 thread_unlock(td);
  532                                 continue;
  533                         }
  534                         td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
  535                         resetpriority(td);
  536                         resetpriority_thread(td);
  537                         thread_unlock(td);
  538                 }
  539                 PROC_UNLOCK(p);
  540         }
  541         sx_sunlock(&allproc_lock);
  542 }
  543 
  544 /*
  545  * Main loop for a kthread that executes schedcpu once a second.
  546  */
  547 static void
  548 schedcpu_thread(void)
  549 {
  550 
  551         for (;;) {
  552                 schedcpu();
  553                 pause("-", hz);
  554         }
  555 }
  556 
  557 /*
  558  * Recalculate the priority of a process after it has slept for a while.
  559  * For all load averages >= 1 and max td_estcpu of 255, sleeping for at
  560  * least six times the loadfactor will decay td_estcpu to zero.
  561  */
  562 static void
  563 updatepri(struct thread *td)
  564 {
  565         struct td_sched *ts;
  566         fixpt_t loadfac;
  567         unsigned int newcpu;
  568 
  569         ts = td->td_sched;
  570         loadfac = loadfactor(averunnable.ldavg[0]);
  571         if (ts->ts_slptime > 5 * loadfac)
  572                 td->td_estcpu = 0;
  573         else {
  574                 newcpu = td->td_estcpu;
  575                 ts->ts_slptime--;       /* was incremented in schedcpu() */
  576                 while (newcpu && --ts->ts_slptime)
  577                         newcpu = decay_cpu(loadfac, newcpu);
  578                 td->td_estcpu = newcpu;
  579         }
  580 }
  581 
  582 /*
  583  * Compute the priority of a process when running in user mode.
  584  * Arrange to reschedule if the resulting priority is better
  585  * than that of the current process.
  586  */
  587 static void
  588 resetpriority(struct thread *td)
  589 {
  590         register unsigned int newpriority;
  591 
  592         if (td->td_pri_class == PRI_TIMESHARE) {
  593                 newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
  594                     NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
  595                 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
  596                     PRI_MAX_TIMESHARE);
  597                 sched_user_prio(td, newpriority);
  598         }
  599 }
  600 
  601 /*
  602  * Update the thread's priority when the associated process's user
  603  * priority changes.
  604  */
  605 static void
  606 resetpriority_thread(struct thread *td)
  607 {
  608 
  609         /* Only change threads with a time sharing user priority. */
  610         if (td->td_priority < PRI_MIN_TIMESHARE ||
  611             td->td_priority > PRI_MAX_TIMESHARE)
  612                 return;
  613 
  614         /* XXX the whole needresched thing is broken, but not silly. */
  615         maybe_resched(td);
  616 
  617         sched_prio(td, td->td_user_pri);
  618 }
  619 
  620 /* ARGSUSED */
  621 static void
  622 sched_setup(void *dummy)
  623 {
  624         setup_runqs();
  625 
  626         if (sched_quantum == 0)
  627                 sched_quantum = SCHED_QUANTUM;
  628         hogticks = 2 * sched_quantum;
  629 
  630         /* Account for thread0. */
  631         sched_load_add();
  632 }
  633 
  634 /* External interfaces start here */
  635 
  636 /*
  637  * Very early in the boot some setup of scheduler-specific
  638  * parts of proc0 and of some scheduler resources needs to be done.
  639  * Called from:
  640  *  proc0_init()
  641  */
  642 void
  643 schedinit(void)
  644 {
  645         /*
  646          * Set up the scheduler specific parts of proc0.
  647          */
  648         proc0.p_sched = NULL; /* XXX */
  649         thread0.td_sched = &td_sched0;
  650         thread0.td_lock = &sched_lock;
  651         mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
  652 }
  653 
  654 int
  655 sched_runnable(void)
  656 {
  657 #ifdef SMP
  658         return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
  659 #else
  660         return runq_check(&runq);
  661 #endif
  662 }
  663 
  664 int
  665 sched_rr_interval(void)
  666 {
  667         if (sched_quantum == 0)
  668                 sched_quantum = SCHED_QUANTUM;
  669         return (sched_quantum);
  670 }
  671 
  672 /*
  673  * We adjust the priority of the current process.  The priority of
  674  * a process gets worse as it accumulates CPU time.  The cpu usage
  675  * estimator (td_estcpu) is increased here.  resetpriority() will
  676  * compute a different priority each time td_estcpu increases by
  677  * INVERSE_ESTCPU_WEIGHT
  678  * (until MAXPRI is reached).  The cpu usage estimator ramps up
  679  * quite quickly when the process is running (linearly), and decays
  680  * away exponentially, at a rate which is proportionally slower when
  681  * the system is busy.  The basic principle is that the system will
  682  * 90% forget that the process used a lot of CPU time in 5 * loadav
  683  * seconds.  This causes the system to favor processes which haven't
  684  * run much recently, and to round-robin among other processes.
  685  */
  686 void
  687 sched_clock(struct thread *td)
  688 {
  689         struct pcpuidlestat *stat;
  690         struct td_sched *ts;
  691 
  692         THREAD_LOCK_ASSERT(td, MA_OWNED);
  693         ts = td->td_sched;
  694 
  695         ts->ts_cpticks++;
  696         td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
  697         if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
  698                 resetpriority(td);
  699                 resetpriority_thread(td);
  700         }
  701 
  702         /*
  703          * Force a context switch if the current thread has used up a full
  704          * quantum (default quantum is 100ms).
  705          */
  706         if (!TD_IS_IDLETHREAD(td) &&
  707             ticks - PCPU_GET(switchticks) >= sched_quantum)
  708                 td->td_flags |= TDF_NEEDRESCHED;
  709 
  710         stat = DPCPU_PTR(idlestat);
  711         stat->oldidlecalls = stat->idlecalls;
  712         stat->idlecalls = 0;
  713 }
  714 
  715 /*
  716  * Charge child's scheduling CPU usage to parent.
  717  */
  718 void
  719 sched_exit(struct proc *p, struct thread *td)
  720 {
  721 
  722         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
  723             "prio:%d", td->td_priority);
  724 
  725         PROC_LOCK_ASSERT(p, MA_OWNED);
  726         sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
  727 }
  728 
  729 void
  730 sched_exit_thread(struct thread *td, struct thread *child)
  731 {
  732 
  733         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
  734             "prio:%d", child->td_priority);
  735         thread_lock(td);
  736         td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
  737         thread_unlock(td);
  738         thread_lock(child);
  739         if ((child->td_flags & TDF_NOLOAD) == 0)
  740                 sched_load_rem();
  741         thread_unlock(child);
  742 }
  743 
  744 void
  745 sched_fork(struct thread *td, struct thread *childtd)
  746 {
  747         sched_fork_thread(td, childtd);
  748 }
  749 
  750 void
  751 sched_fork_thread(struct thread *td, struct thread *childtd)
  752 {
  753         struct td_sched *ts;
  754 
  755         childtd->td_estcpu = td->td_estcpu;
  756         childtd->td_lock = &sched_lock;
  757         childtd->td_cpuset = cpuset_ref(td->td_cpuset);
  758         childtd->td_priority = childtd->td_base_pri;
  759         ts = childtd->td_sched;
  760         bzero(ts, sizeof(*ts));
  761         ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
  762 }
  763 
  764 void
  765 sched_nice(struct proc *p, int nice)
  766 {
  767         struct thread *td;
  768 
  769         PROC_LOCK_ASSERT(p, MA_OWNED);
  770         p->p_nice = nice;
  771         FOREACH_THREAD_IN_PROC(p, td) {
  772                 thread_lock(td);
  773                 resetpriority(td);
  774                 resetpriority_thread(td);
  775                 thread_unlock(td);
  776         }
  777 }
  778 
  779 void
  780 sched_class(struct thread *td, int class)
  781 {
  782         THREAD_LOCK_ASSERT(td, MA_OWNED);
  783         td->td_pri_class = class;
  784 }
  785 
  786 /*
  787  * Adjust the priority of a thread.
  788  */
  789 static void
  790 sched_priority(struct thread *td, u_char prio)
  791 {
  792 
  793 
  794         KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
  795             "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
  796             sched_tdname(curthread));
  797         if (td != curthread && prio > td->td_priority) {
  798                 KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
  799                     "lend prio", "prio:%d", td->td_priority, "new prio:%d",
  800                     prio, KTR_ATTR_LINKED, sched_tdname(td));
  801         }
  802         THREAD_LOCK_ASSERT(td, MA_OWNED);
  803         if (td->td_priority == prio)
  804                 return;
  805         td->td_priority = prio;
  806         if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
  807                 sched_rem(td);
  808                 sched_add(td, SRQ_BORING);
  809         }
  810 }
  811 
  812 /*
  813  * Update a thread's priority when it is lent another thread's
  814  * priority.
  815  */
  816 void
  817 sched_lend_prio(struct thread *td, u_char prio)
  818 {
  819 
  820         td->td_flags |= TDF_BORROWING;
  821         sched_priority(td, prio);
  822 }
  823 
  824 /*
  825  * Restore a thread's priority when priority propagation is
  826  * over.  The prio argument is the minimum priority the thread
  827  * needs to have to satisfy other possible priority lending
  828  * requests.  If the thread's regulary priority is less
  829  * important than prio the thread will keep a priority boost
  830  * of prio.
  831  */
  832 void
  833 sched_unlend_prio(struct thread *td, u_char prio)
  834 {
  835         u_char base_pri;
  836 
  837         if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
  838             td->td_base_pri <= PRI_MAX_TIMESHARE)
  839                 base_pri = td->td_user_pri;
  840         else
  841                 base_pri = td->td_base_pri;
  842         if (prio >= base_pri) {
  843                 td->td_flags &= ~TDF_BORROWING;
  844                 sched_prio(td, base_pri);
  845         } else
  846                 sched_lend_prio(td, prio);
  847 }
  848 
  849 void
  850 sched_prio(struct thread *td, u_char prio)
  851 {
  852         u_char oldprio;
  853 
  854         /* First, update the base priority. */
  855         td->td_base_pri = prio;
  856 
  857         /*
  858          * If the thread is borrowing another thread's priority, don't ever
  859          * lower the priority.
  860          */
  861         if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
  862                 return;
  863 
  864         /* Change the real priority. */
  865         oldprio = td->td_priority;
  866         sched_priority(td, prio);
  867 
  868         /*
  869          * If the thread is on a turnstile, then let the turnstile update
  870          * its state.
  871          */
  872         if (TD_ON_LOCK(td) && oldprio != prio)
  873                 turnstile_adjust(td, oldprio);
  874 }
  875 
  876 void
  877 sched_user_prio(struct thread *td, u_char prio)
  878 {
  879 
  880         THREAD_LOCK_ASSERT(td, MA_OWNED);
  881         td->td_base_user_pri = prio;
  882         if (td->td_lend_user_pri <= prio)
  883                 return;
  884         td->td_user_pri = prio;
  885 }
  886 
  887 void
  888 sched_lend_user_prio(struct thread *td, u_char prio)
  889 {
  890 
  891         THREAD_LOCK_ASSERT(td, MA_OWNED);
  892         td->td_lend_user_pri = prio;
  893         td->td_user_pri = min(prio, td->td_base_user_pri);
  894         if (td->td_priority > td->td_user_pri)
  895                 sched_prio(td, td->td_user_pri);
  896         else if (td->td_priority != td->td_user_pri)
  897                 td->td_flags |= TDF_NEEDRESCHED;
  898 }
  899 
  900 void
  901 sched_sleep(struct thread *td, int pri)
  902 {
  903 
  904         THREAD_LOCK_ASSERT(td, MA_OWNED);
  905         td->td_slptick = ticks;
  906         td->td_sched->ts_slptime = 0;
  907         if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
  908                 sched_prio(td, pri);
  909         if (TD_IS_SUSPENDED(td) || pri >= PSOCK)
  910                 td->td_flags |= TDF_CANSWAP;
  911 }
  912 
  913 void
  914 sched_switch(struct thread *td, struct thread *newtd, int flags)
  915 {
  916         struct mtx *tmtx;
  917         struct td_sched *ts;
  918         struct proc *p;
  919 
  920         tmtx = NULL;
  921         ts = td->td_sched;
  922         p = td->td_proc;
  923 
  924         THREAD_LOCK_ASSERT(td, MA_OWNED);
  925 
  926         /* 
  927          * Switch to the sched lock to fix things up and pick
  928          * a new thread.
  929          * Block the td_lock in order to avoid breaking the critical path.
  930          */
  931         if (td->td_lock != &sched_lock) {
  932                 mtx_lock_spin(&sched_lock);
  933                 tmtx = thread_lock_block(td);
  934         }
  935 
  936         if ((td->td_flags & TDF_NOLOAD) == 0)
  937                 sched_load_rem();
  938 
  939         td->td_lastcpu = td->td_oncpu;
  940         if (!(flags & SW_PREEMPT))
  941                 td->td_flags &= ~TDF_NEEDRESCHED;
  942         td->td_owepreempt = 0;
  943         td->td_oncpu = NOCPU;
  944 
  945         /*
  946          * At the last moment, if this thread is still marked RUNNING,
  947          * then put it back on the run queue as it has not been suspended
  948          * or stopped or any thing else similar.  We never put the idle
  949          * threads on the run queue, however.
  950          */
  951         if (td->td_flags & TDF_IDLETD) {
  952                 TD_SET_CAN_RUN(td);
  953 #ifdef SMP
  954                 CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask);
  955 #endif
  956         } else {
  957                 if (TD_IS_RUNNING(td)) {
  958                         /* Put us back on the run queue. */
  959                         sched_add(td, (flags & SW_PREEMPT) ?
  960                             SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
  961                             SRQ_OURSELF|SRQ_YIELDING);
  962                 }
  963         }
  964         if (newtd) {
  965                 /*
  966                  * The thread we are about to run needs to be counted
  967                  * as if it had been added to the run queue and selected.
  968                  * It came from:
  969                  * * A preemption
  970                  * * An upcall
  971                  * * A followon
  972                  */
  973                 KASSERT((newtd->td_inhibitors == 0),
  974                         ("trying to run inhibited thread"));
  975                 newtd->td_flags |= TDF_DIDRUN;
  976                 TD_SET_RUNNING(newtd);
  977                 if ((newtd->td_flags & TDF_NOLOAD) == 0)
  978                         sched_load_add();
  979         } else {
  980                 newtd = choosethread();
  981                 MPASS(newtd->td_lock == &sched_lock);
  982         }
  983 
  984         if (td != newtd) {
  985 #ifdef  HWPMC_HOOKS
  986                 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
  987                         PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
  988 #endif
  989                 /* I feel sleepy */
  990                 lock_profile_release_lock(&sched_lock.lock_object);
  991 #ifdef KDTRACE_HOOKS
  992                 /*
  993                  * If DTrace has set the active vtime enum to anything
  994                  * other than INACTIVE (0), then it should have set the
  995                  * function to call.
  996                  */
  997                 if (dtrace_vtime_active)
  998                         (*dtrace_vtime_switch_func)(newtd);
  999 #endif
 1000 
 1001                 cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
 1002                 lock_profile_obtain_lock_success(&sched_lock.lock_object,
 1003                     0, 0, __FILE__, __LINE__);
 1004                 /*
 1005                  * Where am I?  What year is it?
 1006                  * We are in the same thread that went to sleep above,
 1007                  * but any amount of time may have passed. All our context
 1008                  * will still be available as will local variables.
 1009                  * PCPU values however may have changed as we may have
 1010                  * changed CPU so don't trust cached values of them.
 1011                  * New threads will go to fork_exit() instead of here
 1012                  * so if you change things here you may need to change
 1013                  * things there too.
 1014                  *
 1015                  * If the thread above was exiting it will never wake
 1016                  * up again here, so either it has saved everything it
 1017                  * needed to, or the thread_wait() or wait() will
 1018                  * need to reap it.
 1019                  */
 1020 #ifdef  HWPMC_HOOKS
 1021                 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
 1022                         PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
 1023 #endif
 1024         }
 1025 
 1026 #ifdef SMP
 1027         if (td->td_flags & TDF_IDLETD)
 1028                 CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask);
 1029 #endif
 1030         sched_lock.mtx_lock = (uintptr_t)td;
 1031         td->td_oncpu = PCPU_GET(cpuid);
 1032         MPASS(td->td_lock == &sched_lock);
 1033 }
 1034 
 1035 void
 1036 sched_wakeup(struct thread *td)
 1037 {
 1038         struct td_sched *ts;
 1039 
 1040         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1041         ts = td->td_sched;
 1042         td->td_flags &= ~TDF_CANSWAP;
 1043         if (ts->ts_slptime > 1) {
 1044                 updatepri(td);
 1045                 resetpriority(td);
 1046         }
 1047         td->td_slptick = 0;
 1048         ts->ts_slptime = 0;
 1049         sched_add(td, SRQ_BORING);
 1050 }
 1051 
 1052 #ifdef SMP
 1053 static int
 1054 forward_wakeup(int cpunum)
 1055 {
 1056         struct pcpu *pc;
 1057         cpuset_t dontuse, map, map2;
 1058         u_int id, me;
 1059         int iscpuset;
 1060 
 1061         mtx_assert(&sched_lock, MA_OWNED);
 1062 
 1063         CTR0(KTR_RUNQ, "forward_wakeup()");
 1064 
 1065         if ((!forward_wakeup_enabled) ||
 1066              (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
 1067                 return (0);
 1068         if (!smp_started || cold || panicstr)
 1069                 return (0);
 1070 
 1071         forward_wakeups_requested++;
 1072 
 1073         /*
 1074          * Check the idle mask we received against what we calculated
 1075          * before in the old version.
 1076          */
 1077         me = PCPU_GET(cpuid);
 1078 
 1079         /* Don't bother if we should be doing it ourself. */
 1080         if (CPU_ISSET(me, &idle_cpus_mask) &&
 1081             (cpunum == NOCPU || me == cpunum))
 1082                 return (0);
 1083 
 1084         CPU_SETOF(me, &dontuse);
 1085         CPU_OR(&dontuse, &stopped_cpus);
 1086         CPU_OR(&dontuse, &hlt_cpus_mask);
 1087         CPU_ZERO(&map2);
 1088         if (forward_wakeup_use_loop) {
 1089                 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
 1090                         id = pc->pc_cpuid;
 1091                         if (!CPU_ISSET(id, &dontuse) &&
 1092                             pc->pc_curthread == pc->pc_idlethread) {
 1093                                 CPU_SET(id, &map2);
 1094                         }
 1095                 }
 1096         }
 1097 
 1098         if (forward_wakeup_use_mask) {
 1099                 map = idle_cpus_mask;
 1100                 CPU_NAND(&map, &dontuse);
 1101 
 1102                 /* If they are both on, compare and use loop if different. */
 1103                 if (forward_wakeup_use_loop) {
 1104                         if (CPU_CMP(&map, &map2)) {
 1105                                 printf("map != map2, loop method preferred\n");
 1106                                 map = map2;
 1107                         }
 1108                 }
 1109         } else {
 1110                 map = map2;
 1111         }
 1112 
 1113         /* If we only allow a specific CPU, then mask off all the others. */
 1114         if (cpunum != NOCPU) {
 1115                 KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
 1116                 iscpuset = CPU_ISSET(cpunum, &map);
 1117                 if (iscpuset == 0)
 1118                         CPU_ZERO(&map);
 1119                 else
 1120                         CPU_SETOF(cpunum, &map);
 1121         }
 1122         if (!CPU_EMPTY(&map)) {
 1123                 forward_wakeups_delivered++;
 1124                 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
 1125                         id = pc->pc_cpuid;
 1126                         if (!CPU_ISSET(id, &map))
 1127                                 continue;
 1128                         if (cpu_idle_wakeup(pc->pc_cpuid))
 1129                                 CPU_CLR(id, &map);
 1130                 }
 1131                 if (!CPU_EMPTY(&map))
 1132                         ipi_selected(map, IPI_AST);
 1133                 return (1);
 1134         }
 1135         if (cpunum == NOCPU)
 1136                 printf("forward_wakeup: Idle processor not found\n");
 1137         return (0);
 1138 }
 1139 
 1140 static void
 1141 kick_other_cpu(int pri, int cpuid)
 1142 {
 1143         struct pcpu *pcpu;
 1144         int cpri;
 1145 
 1146         pcpu = pcpu_find(cpuid);
 1147         if (CPU_ISSET(cpuid, &idle_cpus_mask)) {
 1148                 forward_wakeups_delivered++;
 1149                 if (!cpu_idle_wakeup(cpuid))
 1150                         ipi_cpu(cpuid, IPI_AST);
 1151                 return;
 1152         }
 1153 
 1154         cpri = pcpu->pc_curthread->td_priority;
 1155         if (pri >= cpri)
 1156                 return;
 1157 
 1158 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
 1159 #if !defined(FULL_PREEMPTION)
 1160         if (pri <= PRI_MAX_ITHD)
 1161 #endif /* ! FULL_PREEMPTION */
 1162         {
 1163                 ipi_cpu(cpuid, IPI_PREEMPT);
 1164                 return;
 1165         }
 1166 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
 1167 
 1168         pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
 1169         ipi_cpu(cpuid, IPI_AST);
 1170         return;
 1171 }
 1172 #endif /* SMP */
 1173 
 1174 #ifdef SMP
 1175 static int
 1176 sched_pickcpu(struct thread *td)
 1177 {
 1178         int best, cpu;
 1179 
 1180         mtx_assert(&sched_lock, MA_OWNED);
 1181 
 1182         if (THREAD_CAN_SCHED(td, td->td_lastcpu))
 1183                 best = td->td_lastcpu;
 1184         else
 1185                 best = NOCPU;
 1186         CPU_FOREACH(cpu) {
 1187                 if (!THREAD_CAN_SCHED(td, cpu))
 1188                         continue;
 1189         
 1190                 if (best == NOCPU)
 1191                         best = cpu;
 1192                 else if (runq_length[cpu] < runq_length[best])
 1193                         best = cpu;
 1194         }
 1195         KASSERT(best != NOCPU, ("no valid CPUs"));
 1196 
 1197         return (best);
 1198 }
 1199 #endif
 1200 
 1201 void
 1202 sched_add(struct thread *td, int flags)
 1203 #ifdef SMP
 1204 {
 1205         cpuset_t tidlemsk;
 1206         struct td_sched *ts;
 1207         u_int cpu, cpuid;
 1208         int forwarded = 0;
 1209         int single_cpu = 0;
 1210 
 1211         ts = td->td_sched;
 1212         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1213         KASSERT((td->td_inhibitors == 0),
 1214             ("sched_add: trying to run inhibited thread"));
 1215         KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
 1216             ("sched_add: bad thread state"));
 1217         KASSERT(td->td_flags & TDF_INMEM,
 1218             ("sched_add: thread swapped out"));
 1219 
 1220         KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
 1221             "prio:%d", td->td_priority, KTR_ATTR_LINKED,
 1222             sched_tdname(curthread));
 1223         KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
 1224             KTR_ATTR_LINKED, sched_tdname(td));
 1225 
 1226 
 1227         /*
 1228          * Now that the thread is moving to the run-queue, set the lock
 1229          * to the scheduler's lock.
 1230          */
 1231         if (td->td_lock != &sched_lock) {
 1232                 mtx_lock_spin(&sched_lock);
 1233                 thread_lock_set(td, &sched_lock);
 1234         }
 1235         TD_SET_RUNQ(td);
 1236 
 1237         /*
 1238          * If SMP is started and the thread is pinned or otherwise limited to
 1239          * a specific set of CPUs, queue the thread to a per-CPU run queue.
 1240          * Otherwise, queue the thread to the global run queue.
 1241          *
 1242          * If SMP has not yet been started we must use the global run queue
 1243          * as per-CPU state may not be initialized yet and we may crash if we
 1244          * try to access the per-CPU run queues.
 1245          */
 1246         if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND ||
 1247             ts->ts_flags & TSF_AFFINITY)) {
 1248                 if (td->td_pinned != 0)
 1249                         cpu = td->td_lastcpu;
 1250                 else if (td->td_flags & TDF_BOUND) {
 1251                         /* Find CPU from bound runq. */
 1252                         KASSERT(SKE_RUNQ_PCPU(ts),
 1253                             ("sched_add: bound td_sched not on cpu runq"));
 1254                         cpu = ts->ts_runq - &runq_pcpu[0];
 1255                 } else
 1256                         /* Find a valid CPU for our cpuset */
 1257                         cpu = sched_pickcpu(td);
 1258                 ts->ts_runq = &runq_pcpu[cpu];
 1259                 single_cpu = 1;
 1260                 CTR3(KTR_RUNQ,
 1261                     "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
 1262                     cpu);
 1263         } else {
 1264                 CTR2(KTR_RUNQ,
 1265                     "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
 1266                     td);
 1267                 cpu = NOCPU;
 1268                 ts->ts_runq = &runq;
 1269         }
 1270 
 1271         cpuid = PCPU_GET(cpuid);
 1272         if (single_cpu && cpu != cpuid) {
 1273                 kick_other_cpu(td->td_priority, cpu);
 1274         } else {
 1275                 if (!single_cpu) {
 1276                         tidlemsk = idle_cpus_mask;
 1277                         CPU_NAND(&tidlemsk, &hlt_cpus_mask);
 1278                         CPU_CLR(cpuid, &tidlemsk);
 1279 
 1280                         if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&
 1281                             ((flags & SRQ_INTR) == 0) &&
 1282                             !CPU_EMPTY(&tidlemsk))
 1283                                 forwarded = forward_wakeup(cpu);
 1284                 }
 1285 
 1286                 if (!forwarded) {
 1287                         if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
 1288                                 return;
 1289                         else
 1290                                 maybe_resched(td);
 1291                 }
 1292         }
 1293 
 1294         if ((td->td_flags & TDF_NOLOAD) == 0)
 1295                 sched_load_add();
 1296         runq_add(ts->ts_runq, td, flags);
 1297         if (cpu != NOCPU)
 1298                 runq_length[cpu]++;
 1299 }
 1300 #else /* SMP */
 1301 {
 1302         struct td_sched *ts;
 1303 
 1304         ts = td->td_sched;
 1305         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1306         KASSERT((td->td_inhibitors == 0),
 1307             ("sched_add: trying to run inhibited thread"));
 1308         KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
 1309             ("sched_add: bad thread state"));
 1310         KASSERT(td->td_flags & TDF_INMEM,
 1311             ("sched_add: thread swapped out"));
 1312         KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
 1313             "prio:%d", td->td_priority, KTR_ATTR_LINKED,
 1314             sched_tdname(curthread));
 1315         KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
 1316             KTR_ATTR_LINKED, sched_tdname(td));
 1317 
 1318         /*
 1319          * Now that the thread is moving to the run-queue, set the lock
 1320          * to the scheduler's lock.
 1321          */
 1322         if (td->td_lock != &sched_lock) {
 1323                 mtx_lock_spin(&sched_lock);
 1324                 thread_lock_set(td, &sched_lock);
 1325         }
 1326         TD_SET_RUNQ(td);
 1327         CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
 1328         ts->ts_runq = &runq;
 1329 
 1330         /*
 1331          * If we are yielding (on the way out anyhow) or the thread
 1332          * being saved is US, then don't try be smart about preemption
 1333          * or kicking off another CPU as it won't help and may hinder.
 1334          * In the YIEDLING case, we are about to run whoever is being
 1335          * put in the queue anyhow, and in the OURSELF case, we are
 1336          * puting ourself on the run queue which also only happens
 1337          * when we are about to yield.
 1338          */
 1339         if ((flags & SRQ_YIELDING) == 0) {
 1340                 if (maybe_preempt(td))
 1341                         return;
 1342         }
 1343         if ((td->td_flags & TDF_NOLOAD) == 0)
 1344                 sched_load_add();
 1345         runq_add(ts->ts_runq, td, flags);
 1346         maybe_resched(td);
 1347 }
 1348 #endif /* SMP */
 1349 
 1350 void
 1351 sched_rem(struct thread *td)
 1352 {
 1353         struct td_sched *ts;
 1354 
 1355         ts = td->td_sched;
 1356         KASSERT(td->td_flags & TDF_INMEM,
 1357             ("sched_rem: thread swapped out"));
 1358         KASSERT(TD_ON_RUNQ(td),
 1359             ("sched_rem: thread not on run queue"));
 1360         mtx_assert(&sched_lock, MA_OWNED);
 1361         KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
 1362             "prio:%d", td->td_priority, KTR_ATTR_LINKED,
 1363             sched_tdname(curthread));
 1364 
 1365         if ((td->td_flags & TDF_NOLOAD) == 0)
 1366                 sched_load_rem();
 1367 #ifdef SMP
 1368         if (ts->ts_runq != &runq)
 1369                 runq_length[ts->ts_runq - runq_pcpu]--;
 1370 #endif
 1371         runq_remove(ts->ts_runq, td);
 1372         TD_SET_CAN_RUN(td);
 1373 }
 1374 
 1375 /*
 1376  * Select threads to run.  Note that running threads still consume a
 1377  * slot.
 1378  */
 1379 struct thread *
 1380 sched_choose(void)
 1381 {
 1382         struct thread *td;
 1383         struct runq *rq;
 1384 
 1385         mtx_assert(&sched_lock,  MA_OWNED);
 1386 #ifdef SMP
 1387         struct thread *tdcpu;
 1388 
 1389         rq = &runq;
 1390         td = runq_choose_fuzz(&runq, runq_fuzz);
 1391         tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
 1392 
 1393         if (td == NULL ||
 1394             (tdcpu != NULL &&
 1395              tdcpu->td_priority < td->td_priority)) {
 1396                 CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
 1397                      PCPU_GET(cpuid));
 1398                 td = tdcpu;
 1399                 rq = &runq_pcpu[PCPU_GET(cpuid)];
 1400         } else {
 1401                 CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
 1402         }
 1403 
 1404 #else
 1405         rq = &runq;
 1406         td = runq_choose(&runq);
 1407 #endif
 1408 
 1409         if (td) {
 1410 #ifdef SMP
 1411                 if (td == tdcpu)
 1412                         runq_length[PCPU_GET(cpuid)]--;
 1413 #endif
 1414                 runq_remove(rq, td);
 1415                 td->td_flags |= TDF_DIDRUN;
 1416 
 1417                 KASSERT(td->td_flags & TDF_INMEM,
 1418                     ("sched_choose: thread swapped out"));
 1419                 return (td);
 1420         }
 1421         return (PCPU_GET(idlethread));
 1422 }
 1423 
 1424 void
 1425 sched_preempt(struct thread *td)
 1426 {
 1427         thread_lock(td);
 1428         if (td->td_critnest > 1)
 1429                 td->td_owepreempt = 1;
 1430         else
 1431                 mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL);
 1432         thread_unlock(td);
 1433 }
 1434 
 1435 void
 1436 sched_userret(struct thread *td)
 1437 {
 1438         /*
 1439          * XXX we cheat slightly on the locking here to avoid locking in
 1440          * the usual case.  Setting td_priority here is essentially an
 1441          * incomplete workaround for not setting it properly elsewhere.
 1442          * Now that some interrupt handlers are threads, not setting it
 1443          * properly elsewhere can clobber it in the window between setting
 1444          * it here and returning to user mode, so don't waste time setting
 1445          * it perfectly here.
 1446          */
 1447         KASSERT((td->td_flags & TDF_BORROWING) == 0,
 1448             ("thread with borrowed priority returning to userland"));
 1449         if (td->td_priority != td->td_user_pri) {
 1450                 thread_lock(td);
 1451                 td->td_priority = td->td_user_pri;
 1452                 td->td_base_pri = td->td_user_pri;
 1453                 thread_unlock(td);
 1454         }
 1455 }
 1456 
 1457 void
 1458 sched_bind(struct thread *td, int cpu)
 1459 {
 1460         struct td_sched *ts;
 1461 
 1462         THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
 1463         KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
 1464 
 1465         ts = td->td_sched;
 1466 
 1467         td->td_flags |= TDF_BOUND;
 1468 #ifdef SMP
 1469         ts->ts_runq = &runq_pcpu[cpu];
 1470         if (PCPU_GET(cpuid) == cpu)
 1471                 return;
 1472 
 1473         mi_switch(SW_VOL, NULL);
 1474 #endif
 1475 }
 1476 
 1477 void
 1478 sched_unbind(struct thread* td)
 1479 {
 1480         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1481         KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
 1482         td->td_flags &= ~TDF_BOUND;
 1483 }
 1484 
 1485 int
 1486 sched_is_bound(struct thread *td)
 1487 {
 1488         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1489         return (td->td_flags & TDF_BOUND);
 1490 }
 1491 
 1492 void
 1493 sched_relinquish(struct thread *td)
 1494 {
 1495         thread_lock(td);
 1496         mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
 1497         thread_unlock(td);
 1498 }
 1499 
 1500 int
 1501 sched_load(void)
 1502 {
 1503         return (sched_tdcnt);
 1504 }
 1505 
 1506 int
 1507 sched_sizeof_proc(void)
 1508 {
 1509         return (sizeof(struct proc));
 1510 }
 1511 
 1512 int
 1513 sched_sizeof_thread(void)
 1514 {
 1515         return (sizeof(struct thread) + sizeof(struct td_sched));
 1516 }
 1517 
 1518 fixpt_t
 1519 sched_pctcpu(struct thread *td)
 1520 {
 1521         struct td_sched *ts;
 1522 
 1523         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1524         ts = td->td_sched;
 1525         return (ts->ts_pctcpu);
 1526 }
 1527 
 1528 void
 1529 sched_tick(int cnt)
 1530 {
 1531 }
 1532 
 1533 /*
 1534  * The actual idle process.
 1535  */
 1536 void
 1537 sched_idletd(void *dummy)
 1538 {
 1539         struct pcpuidlestat *stat;
 1540 
 1541         stat = DPCPU_PTR(idlestat);
 1542         for (;;) {
 1543                 mtx_assert(&Giant, MA_NOTOWNED);
 1544 
 1545                 while (sched_runnable() == 0) {
 1546                         cpu_idle(stat->idlecalls + stat->oldidlecalls > 64);
 1547                         stat->idlecalls++;
 1548                 }
 1549 
 1550                 mtx_lock_spin(&sched_lock);
 1551                 mi_switch(SW_VOL | SWT_IDLE, NULL);
 1552                 mtx_unlock_spin(&sched_lock);
 1553         }
 1554 }
 1555 
 1556 /*
 1557  * A CPU is entering for the first time or a thread is exiting.
 1558  */
 1559 void
 1560 sched_throw(struct thread *td)
 1561 {
 1562         /*
 1563          * Correct spinlock nesting.  The idle thread context that we are
 1564          * borrowing was created so that it would start out with a single
 1565          * spin lock (sched_lock) held in fork_trampoline().  Since we've
 1566          * explicitly acquired locks in this function, the nesting count
 1567          * is now 2 rather than 1.  Since we are nested, calling
 1568          * spinlock_exit() will simply adjust the counts without allowing
 1569          * spin lock using code to interrupt us.
 1570          */
 1571         if (td == NULL) {
 1572                 mtx_lock_spin(&sched_lock);
 1573                 spinlock_exit();
 1574         } else {
 1575                 lock_profile_release_lock(&sched_lock.lock_object);
 1576                 MPASS(td->td_lock == &sched_lock);
 1577         }
 1578         mtx_assert(&sched_lock, MA_OWNED);
 1579         KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
 1580         PCPU_SET(switchtime, cpu_ticks());
 1581         PCPU_SET(switchticks, ticks);
 1582         cpu_throw(td, choosethread());  /* doesn't return */
 1583 }
 1584 
 1585 void
 1586 sched_fork_exit(struct thread *td)
 1587 {
 1588 
 1589         /*
 1590          * Finish setting up thread glue so that it begins execution in a
 1591          * non-nested critical section with sched_lock held but not recursed.
 1592          */
 1593         td->td_oncpu = PCPU_GET(cpuid);
 1594         sched_lock.mtx_lock = (uintptr_t)td;
 1595         lock_profile_obtain_lock_success(&sched_lock.lock_object,
 1596             0, 0, __FILE__, __LINE__);
 1597         THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
 1598 }
 1599 
 1600 char *
 1601 sched_tdname(struct thread *td)
 1602 {
 1603 #ifdef KTR
 1604         struct td_sched *ts;
 1605 
 1606         ts = td->td_sched;
 1607         if (ts->ts_name[0] == '\0')
 1608                 snprintf(ts->ts_name, sizeof(ts->ts_name),
 1609                     "%s tid %d", td->td_name, td->td_tid);
 1610         return (ts->ts_name);
 1611 #else   
 1612         return (td->td_name);
 1613 #endif
 1614 }
 1615 
 1616 void
 1617 sched_affinity(struct thread *td)
 1618 {
 1619 #ifdef SMP
 1620         struct td_sched *ts;
 1621         int cpu;
 1622 
 1623         THREAD_LOCK_ASSERT(td, MA_OWNED);       
 1624 
 1625         /*
 1626          * Set the TSF_AFFINITY flag if there is at least one CPU this
 1627          * thread can't run on.
 1628          */
 1629         ts = td->td_sched;
 1630         ts->ts_flags &= ~TSF_AFFINITY;
 1631         CPU_FOREACH(cpu) {
 1632                 if (!THREAD_CAN_SCHED(td, cpu)) {
 1633                         ts->ts_flags |= TSF_AFFINITY;
 1634                         break;
 1635                 }
 1636         }
 1637 
 1638         /*
 1639          * If this thread can run on all CPUs, nothing else to do.
 1640          */
 1641         if (!(ts->ts_flags & TSF_AFFINITY))
 1642                 return;
 1643 
 1644         /* Pinned threads and bound threads should be left alone. */
 1645         if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
 1646                 return;
 1647 
 1648         switch (td->td_state) {
 1649         case TDS_RUNQ:
 1650                 /*
 1651                  * If we are on a per-CPU runqueue that is in the set,
 1652                  * then nothing needs to be done.
 1653                  */
 1654                 if (ts->ts_runq != &runq &&
 1655                     THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
 1656                         return;
 1657 
 1658                 /* Put this thread on a valid per-CPU runqueue. */
 1659                 sched_rem(td);
 1660                 sched_add(td, SRQ_BORING);
 1661                 break;
 1662         case TDS_RUNNING:
 1663                 /*
 1664                  * See if our current CPU is in the set.  If not, force a
 1665                  * context switch.
 1666                  */
 1667                 if (THREAD_CAN_SCHED(td, td->td_oncpu))
 1668                         return;
 1669 
 1670                 td->td_flags |= TDF_NEEDRESCHED;
 1671                 if (td != curthread)
 1672                         ipi_cpu(cpu, IPI_AST);
 1673                 break;
 1674         default:
 1675                 break;
 1676         }
 1677 #endif
 1678 }

Cache object: 856f1fd5c14974e6d400e87c511ba66b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.