The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/sched_4bsd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/8.1/sys/kern/sched_4bsd.c 203662 2010-02-08 14:08:52Z attilio $");
   37 
   38 #include "opt_hwpmc_hooks.h"
   39 #include "opt_sched.h"
   40 #include "opt_kdtrace.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/cpuset.h>
   45 #include <sys/kernel.h>
   46 #include <sys/ktr.h>
   47 #include <sys/lock.h>
   48 #include <sys/kthread.h>
   49 #include <sys/mutex.h>
   50 #include <sys/proc.h>
   51 #include <sys/resourcevar.h>
   52 #include <sys/sched.h>
   53 #include <sys/smp.h>
   54 #include <sys/sysctl.h>
   55 #include <sys/sx.h>
   56 #include <sys/turnstile.h>
   57 #include <sys/umtx.h>
   58 #include <machine/pcb.h>
   59 #include <machine/smp.h>
   60 
   61 #ifdef HWPMC_HOOKS
   62 #include <sys/pmckern.h>
   63 #endif
   64 
   65 #ifdef KDTRACE_HOOKS
   66 #include <sys/dtrace_bsd.h>
   67 int                             dtrace_vtime_active;
   68 dtrace_vtime_switch_func_t      dtrace_vtime_switch_func;
   69 #endif
   70 
   71 /*
   72  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
   73  * the range 100-256 Hz (approximately).
   74  */
   75 #define ESTCPULIM(e) \
   76     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
   77     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
   78 #ifdef SMP
   79 #define INVERSE_ESTCPU_WEIGHT   (8 * smp_cpus)
   80 #else
   81 #define INVERSE_ESTCPU_WEIGHT   8       /* 1 / (priorities per estcpu level). */
   82 #endif
   83 #define NICE_WEIGHT             1       /* Priorities per nice level. */
   84 
   85 #define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
   86 
   87 /*
   88  * The schedulable entity that runs a context.
   89  * This is  an extension to the thread structure and is tailored to
   90  * the requirements of this scheduler
   91  */
   92 struct td_sched {
   93         fixpt_t         ts_pctcpu;      /* (j) %cpu during p_swtime. */
   94         int             ts_cpticks;     /* (j) Ticks of cpu time. */
   95         int             ts_slptime;     /* (j) Seconds !RUNNING. */
   96         int             ts_flags;
   97         struct runq     *ts_runq;       /* runq the thread is currently on */
   98 #ifdef KTR
   99         char            ts_name[TS_NAME_LEN];
  100 #endif
  101 };
  102 
  103 /* flags kept in td_flags */
  104 #define TDF_DIDRUN      TDF_SCHED0      /* thread actually ran. */
  105 #define TDF_BOUND       TDF_SCHED1      /* Bound to one CPU. */
  106 
  107 /* flags kept in ts_flags */
  108 #define TSF_AFFINITY    0x0001          /* Has a non-"full" CPU set. */
  109 
  110 #define SKE_RUNQ_PCPU(ts)                                               \
  111     ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
  112 
  113 #define THREAD_CAN_SCHED(td, cpu)       \
  114     CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
  115 
  116 static struct td_sched td_sched0;
  117 struct mtx sched_lock;
  118 
  119 static int      sched_tdcnt;    /* Total runnable threads in the system. */
  120 static int      sched_quantum;  /* Roundrobin scheduling quantum in ticks. */
  121 #define SCHED_QUANTUM   (hz / 10)       /* Default sched quantum */
  122 
  123 static void     setup_runqs(void);
  124 static void     schedcpu(void);
  125 static void     schedcpu_thread(void);
  126 static void     sched_priority(struct thread *td, u_char prio);
  127 static void     sched_setup(void *dummy);
  128 static void     maybe_resched(struct thread *td);
  129 static void     updatepri(struct thread *td);
  130 static void     resetpriority(struct thread *td);
  131 static void     resetpriority_thread(struct thread *td);
  132 #ifdef SMP
  133 static int      sched_pickcpu(struct thread *td);
  134 static int      forward_wakeup(int cpunum);
  135 static void     kick_other_cpu(int pri, int cpuid);
  136 #endif
  137 
  138 static struct kproc_desc sched_kp = {
  139         "schedcpu",
  140         schedcpu_thread,
  141         NULL
  142 };
  143 SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start,
  144     &sched_kp);
  145 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
  146 
  147 /*
  148  * Global run queue.
  149  */
  150 static struct runq runq;
  151 
  152 #ifdef SMP
  153 /*
  154  * Per-CPU run queues
  155  */
  156 static struct runq runq_pcpu[MAXCPU];
  157 long runq_length[MAXCPU];
  158 #endif
  159 
  160 static void
  161 setup_runqs(void)
  162 {
  163 #ifdef SMP
  164         int i;
  165 
  166         for (i = 0; i < MAXCPU; ++i)
  167                 runq_init(&runq_pcpu[i]);
  168 #endif
  169 
  170         runq_init(&runq);
  171 }
  172 
  173 static int
  174 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
  175 {
  176         int error, new_val;
  177 
  178         new_val = sched_quantum * tick;
  179         error = sysctl_handle_int(oidp, &new_val, 0, req);
  180         if (error != 0 || req->newptr == NULL)
  181                 return (error);
  182         if (new_val < tick)
  183                 return (EINVAL);
  184         sched_quantum = new_val / tick;
  185         hogticks = 2 * sched_quantum;
  186         return (0);
  187 }
  188 
  189 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
  190 
  191 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
  192     "Scheduler name");
  193 
  194 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
  195     0, sizeof sched_quantum, sysctl_kern_quantum, "I",
  196     "Roundrobin scheduling quantum in microseconds");
  197 
  198 #ifdef SMP
  199 /* Enable forwarding of wakeups to all other cpus */
  200 SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
  201 
  202 static int runq_fuzz = 1;
  203 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
  204 
  205 static int forward_wakeup_enabled = 1;
  206 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
  207            &forward_wakeup_enabled, 0,
  208            "Forwarding of wakeup to idle CPUs");
  209 
  210 static int forward_wakeups_requested = 0;
  211 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
  212            &forward_wakeups_requested, 0,
  213            "Requests for Forwarding of wakeup to idle CPUs");
  214 
  215 static int forward_wakeups_delivered = 0;
  216 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
  217            &forward_wakeups_delivered, 0,
  218            "Completed Forwarding of wakeup to idle CPUs");
  219 
  220 static int forward_wakeup_use_mask = 1;
  221 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
  222            &forward_wakeup_use_mask, 0,
  223            "Use the mask of idle cpus");
  224 
  225 static int forward_wakeup_use_loop = 0;
  226 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
  227            &forward_wakeup_use_loop, 0,
  228            "Use a loop to find idle cpus");
  229 
  230 static int forward_wakeup_use_single = 0;
  231 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
  232            &forward_wakeup_use_single, 0,
  233            "Only signal one idle cpu");
  234 
  235 static int forward_wakeup_use_htt = 0;
  236 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
  237            &forward_wakeup_use_htt, 0,
  238            "account for htt");
  239 
  240 #endif
  241 #if 0
  242 static int sched_followon = 0;
  243 SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
  244            &sched_followon, 0,
  245            "allow threads to share a quantum");
  246 #endif
  247 
  248 static __inline void
  249 sched_load_add(void)
  250 {
  251 
  252         sched_tdcnt++;
  253         KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
  254 }
  255 
  256 static __inline void
  257 sched_load_rem(void)
  258 {
  259 
  260         sched_tdcnt--;
  261         KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
  262 }
  263 /*
  264  * Arrange to reschedule if necessary, taking the priorities and
  265  * schedulers into account.
  266  */
  267 static void
  268 maybe_resched(struct thread *td)
  269 {
  270 
  271         THREAD_LOCK_ASSERT(td, MA_OWNED);
  272         if (td->td_priority < curthread->td_priority)
  273                 curthread->td_flags |= TDF_NEEDRESCHED;
  274 }
  275 
  276 /*
  277  * This function is called when a thread is about to be put on run queue
  278  * because it has been made runnable or its priority has been adjusted.  It
  279  * determines if the new thread should be immediately preempted to.  If so,
  280  * it switches to it and eventually returns true.  If not, it returns false
  281  * so that the caller may place the thread on an appropriate run queue.
  282  */
  283 int
  284 maybe_preempt(struct thread *td)
  285 {
  286 #ifdef PREEMPTION
  287         struct thread *ctd;
  288         int cpri, pri;
  289 
  290         /*
  291          * The new thread should not preempt the current thread if any of the
  292          * following conditions are true:
  293          *
  294          *  - The kernel is in the throes of crashing (panicstr).
  295          *  - The current thread has a higher (numerically lower) or
  296          *    equivalent priority.  Note that this prevents curthread from
  297          *    trying to preempt to itself.
  298          *  - It is too early in the boot for context switches (cold is set).
  299          *  - The current thread has an inhibitor set or is in the process of
  300          *    exiting.  In this case, the current thread is about to switch
  301          *    out anyways, so there's no point in preempting.  If we did,
  302          *    the current thread would not be properly resumed as well, so
  303          *    just avoid that whole landmine.
  304          *  - If the new thread's priority is not a realtime priority and
  305          *    the current thread's priority is not an idle priority and
  306          *    FULL_PREEMPTION is disabled.
  307          *
  308          * If all of these conditions are false, but the current thread is in
  309          * a nested critical section, then we have to defer the preemption
  310          * until we exit the critical section.  Otherwise, switch immediately
  311          * to the new thread.
  312          */
  313         ctd = curthread;
  314         THREAD_LOCK_ASSERT(td, MA_OWNED);
  315         KASSERT((td->td_inhibitors == 0),
  316                         ("maybe_preempt: trying to run inhibited thread"));
  317         pri = td->td_priority;
  318         cpri = ctd->td_priority;
  319         if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
  320             TD_IS_INHIBITED(ctd))
  321                 return (0);
  322 #ifndef FULL_PREEMPTION
  323         if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
  324                 return (0);
  325 #endif
  326 
  327         if (ctd->td_critnest > 1) {
  328                 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
  329                     ctd->td_critnest);
  330                 ctd->td_owepreempt = 1;
  331                 return (0);
  332         }
  333         /*
  334          * Thread is runnable but not yet put on system run queue.
  335          */
  336         MPASS(ctd->td_lock == td->td_lock);
  337         MPASS(TD_ON_RUNQ(td));
  338         TD_SET_RUNNING(td);
  339         CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
  340             td->td_proc->p_pid, td->td_name);
  341         mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td);
  342         /*
  343          * td's lock pointer may have changed.  We have to return with it
  344          * locked.
  345          */
  346         spinlock_enter();
  347         thread_unlock(ctd);
  348         thread_lock(td);
  349         spinlock_exit();
  350         return (1);
  351 #else
  352         return (0);
  353 #endif
  354 }
  355 
  356 /*
  357  * Constants for digital decay and forget:
  358  *      90% of (td_estcpu) usage in 5 * loadav time
  359  *      95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
  360  *          Note that, as ps(1) mentions, this can let percentages
  361  *          total over 100% (I've seen 137.9% for 3 processes).
  362  *
  363  * Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
  364  *
  365  * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
  366  * That is, the system wants to compute a value of decay such
  367  * that the following for loop:
  368  *      for (i = 0; i < (5 * loadavg); i++)
  369  *              td_estcpu *= decay;
  370  * will compute
  371  *      td_estcpu *= 0.1;
  372  * for all values of loadavg:
  373  *
  374  * Mathematically this loop can be expressed by saying:
  375  *      decay ** (5 * loadavg) ~= .1
  376  *
  377  * The system computes decay as:
  378  *      decay = (2 * loadavg) / (2 * loadavg + 1)
  379  *
  380  * We wish to prove that the system's computation of decay
  381  * will always fulfill the equation:
  382  *      decay ** (5 * loadavg) ~= .1
  383  *
  384  * If we compute b as:
  385  *      b = 2 * loadavg
  386  * then
  387  *      decay = b / (b + 1)
  388  *
  389  * We now need to prove two things:
  390  *      1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
  391  *      2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
  392  *
  393  * Facts:
  394  *         For x close to zero, exp(x) =~ 1 + x, since
  395  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
  396  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
  397  *         For x close to zero, ln(1+x) =~ x, since
  398  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
  399  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
  400  *         ln(.1) =~ -2.30
  401  *
  402  * Proof of (1):
  403  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
  404  *      solving for factor,
  405  *      ln(factor) =~ (-2.30/5*loadav), or
  406  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
  407  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
  408  *
  409  * Proof of (2):
  410  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
  411  *      solving for power,
  412  *      power*ln(b/(b+1)) =~ -2.30, or
  413  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
  414  *
  415  * Actual power values for the implemented algorithm are as follows:
  416  *      loadav: 1       2       3       4
  417  *      power:  5.68    10.32   14.94   19.55
  418  */
  419 
  420 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
  421 #define loadfactor(loadav)      (2 * (loadav))
  422 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
  423 
  424 /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
  425 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
  426 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
  427 
  428 /*
  429  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
  430  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
  431  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
  432  *
  433  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
  434  *      1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
  435  *
  436  * If you don't want to bother with the faster/more-accurate formula, you
  437  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
  438  * (more general) method of calculating the %age of CPU used by a process.
  439  */
  440 #define CCPU_SHIFT      11
  441 
  442 /*
  443  * Recompute process priorities, every hz ticks.
  444  * MP-safe, called without the Giant mutex.
  445  */
  446 /* ARGSUSED */
  447 static void
  448 schedcpu(void)
  449 {
  450         register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
  451         struct thread *td;
  452         struct proc *p;
  453         struct td_sched *ts;
  454         int awake, realstathz;
  455 
  456         realstathz = stathz ? stathz : hz;
  457         sx_slock(&allproc_lock);
  458         FOREACH_PROC_IN_SYSTEM(p) {
  459                 PROC_LOCK(p);
  460                 FOREACH_THREAD_IN_PROC(p, td) {
  461                         awake = 0;
  462                         thread_lock(td);
  463                         ts = td->td_sched;
  464                         /*
  465                          * Increment sleep time (if sleeping).  We
  466                          * ignore overflow, as above.
  467                          */
  468                         /*
  469                          * The td_sched slptimes are not touched in wakeup
  470                          * because the thread may not HAVE everything in
  471                          * memory? XXX I think this is out of date.
  472                          */
  473                         if (TD_ON_RUNQ(td)) {
  474                                 awake = 1;
  475                                 td->td_flags &= ~TDF_DIDRUN;
  476                         } else if (TD_IS_RUNNING(td)) {
  477                                 awake = 1;
  478                                 /* Do not clear TDF_DIDRUN */
  479                         } else if (td->td_flags & TDF_DIDRUN) {
  480                                 awake = 1;
  481                                 td->td_flags &= ~TDF_DIDRUN;
  482                         }
  483 
  484                         /*
  485                          * ts_pctcpu is only for ps and ttyinfo().
  486                          */
  487                         ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
  488                         /*
  489                          * If the td_sched has been idle the entire second,
  490                          * stop recalculating its priority until
  491                          * it wakes up.
  492                          */
  493                         if (ts->ts_cpticks != 0) {
  494 #if     (FSHIFT >= CCPU_SHIFT)
  495                                 ts->ts_pctcpu += (realstathz == 100)
  496                                     ? ((fixpt_t) ts->ts_cpticks) <<
  497                                     (FSHIFT - CCPU_SHIFT) :
  498                                     100 * (((fixpt_t) ts->ts_cpticks)
  499                                     << (FSHIFT - CCPU_SHIFT)) / realstathz;
  500 #else
  501                                 ts->ts_pctcpu += ((FSCALE - ccpu) *
  502                                     (ts->ts_cpticks *
  503                                     FSCALE / realstathz)) >> FSHIFT;
  504 #endif
  505                                 ts->ts_cpticks = 0;
  506                         }
  507                         /*
  508                          * If there are ANY running threads in this process,
  509                          * then don't count it as sleeping.
  510                          * XXX: this is broken.
  511                          */
  512                         if (awake) {
  513                                 if (ts->ts_slptime > 1) {
  514                                         /*
  515                                          * In an ideal world, this should not
  516                                          * happen, because whoever woke us
  517                                          * up from the long sleep should have
  518                                          * unwound the slptime and reset our
  519                                          * priority before we run at the stale
  520                                          * priority.  Should KASSERT at some
  521                                          * point when all the cases are fixed.
  522                                          */
  523                                         updatepri(td);
  524                                 }
  525                                 ts->ts_slptime = 0;
  526                         } else
  527                                 ts->ts_slptime++;
  528                         if (ts->ts_slptime > 1) {
  529                                 thread_unlock(td);
  530                                 continue;
  531                         }
  532                         td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
  533                         resetpriority(td);
  534                         resetpriority_thread(td);
  535                         thread_unlock(td);
  536                 }
  537                 PROC_UNLOCK(p);
  538         }
  539         sx_sunlock(&allproc_lock);
  540 }
  541 
  542 /*
  543  * Main loop for a kthread that executes schedcpu once a second.
  544  */
  545 static void
  546 schedcpu_thread(void)
  547 {
  548 
  549         for (;;) {
  550                 schedcpu();
  551                 pause("-", hz);
  552         }
  553 }
  554 
  555 /*
  556  * Recalculate the priority of a process after it has slept for a while.
  557  * For all load averages >= 1 and max td_estcpu of 255, sleeping for at
  558  * least six times the loadfactor will decay td_estcpu to zero.
  559  */
  560 static void
  561 updatepri(struct thread *td)
  562 {
  563         struct td_sched *ts;
  564         fixpt_t loadfac;
  565         unsigned int newcpu;
  566 
  567         ts = td->td_sched;
  568         loadfac = loadfactor(averunnable.ldavg[0]);
  569         if (ts->ts_slptime > 5 * loadfac)
  570                 td->td_estcpu = 0;
  571         else {
  572                 newcpu = td->td_estcpu;
  573                 ts->ts_slptime--;       /* was incremented in schedcpu() */
  574                 while (newcpu && --ts->ts_slptime)
  575                         newcpu = decay_cpu(loadfac, newcpu);
  576                 td->td_estcpu = newcpu;
  577         }
  578 }
  579 
  580 /*
  581  * Compute the priority of a process when running in user mode.
  582  * Arrange to reschedule if the resulting priority is better
  583  * than that of the current process.
  584  */
  585 static void
  586 resetpriority(struct thread *td)
  587 {
  588         register unsigned int newpriority;
  589 
  590         if (td->td_pri_class == PRI_TIMESHARE) {
  591                 newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
  592                     NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
  593                 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
  594                     PRI_MAX_TIMESHARE);
  595                 sched_user_prio(td, newpriority);
  596         }
  597 }
  598 
  599 /*
  600  * Update the thread's priority when the associated process's user
  601  * priority changes.
  602  */
  603 static void
  604 resetpriority_thread(struct thread *td)
  605 {
  606 
  607         /* Only change threads with a time sharing user priority. */
  608         if (td->td_priority < PRI_MIN_TIMESHARE ||
  609             td->td_priority > PRI_MAX_TIMESHARE)
  610                 return;
  611 
  612         /* XXX the whole needresched thing is broken, but not silly. */
  613         maybe_resched(td);
  614 
  615         sched_prio(td, td->td_user_pri);
  616 }
  617 
  618 /* ARGSUSED */
  619 static void
  620 sched_setup(void *dummy)
  621 {
  622         setup_runqs();
  623 
  624         if (sched_quantum == 0)
  625                 sched_quantum = SCHED_QUANTUM;
  626         hogticks = 2 * sched_quantum;
  627 
  628         /* Account for thread0. */
  629         sched_load_add();
  630 }
  631 
  632 /* External interfaces start here */
  633 
  634 /*
  635  * Very early in the boot some setup of scheduler-specific
  636  * parts of proc0 and of some scheduler resources needs to be done.
  637  * Called from:
  638  *  proc0_init()
  639  */
  640 void
  641 schedinit(void)
  642 {
  643         /*
  644          * Set up the scheduler specific parts of proc0.
  645          */
  646         proc0.p_sched = NULL; /* XXX */
  647         thread0.td_sched = &td_sched0;
  648         thread0.td_lock = &sched_lock;
  649         mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
  650 }
  651 
  652 int
  653 sched_runnable(void)
  654 {
  655 #ifdef SMP
  656         return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
  657 #else
  658         return runq_check(&runq);
  659 #endif
  660 }
  661 
  662 int
  663 sched_rr_interval(void)
  664 {
  665         if (sched_quantum == 0)
  666                 sched_quantum = SCHED_QUANTUM;
  667         return (sched_quantum);
  668 }
  669 
  670 /*
  671  * We adjust the priority of the current process.  The priority of
  672  * a process gets worse as it accumulates CPU time.  The cpu usage
  673  * estimator (td_estcpu) is increased here.  resetpriority() will
  674  * compute a different priority each time td_estcpu increases by
  675  * INVERSE_ESTCPU_WEIGHT
  676  * (until MAXPRI is reached).  The cpu usage estimator ramps up
  677  * quite quickly when the process is running (linearly), and decays
  678  * away exponentially, at a rate which is proportionally slower when
  679  * the system is busy.  The basic principle is that the system will
  680  * 90% forget that the process used a lot of CPU time in 5 * loadav
  681  * seconds.  This causes the system to favor processes which haven't
  682  * run much recently, and to round-robin among other processes.
  683  */
  684 void
  685 sched_clock(struct thread *td)
  686 {
  687         struct td_sched *ts;
  688 
  689         THREAD_LOCK_ASSERT(td, MA_OWNED);
  690         ts = td->td_sched;
  691 
  692         ts->ts_cpticks++;
  693         td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
  694         if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
  695                 resetpriority(td);
  696                 resetpriority_thread(td);
  697         }
  698 
  699         /*
  700          * Force a context switch if the current thread has used up a full
  701          * quantum (default quantum is 100ms).
  702          */
  703         if (!TD_IS_IDLETHREAD(td) &&
  704             ticks - PCPU_GET(switchticks) >= sched_quantum)
  705                 td->td_flags |= TDF_NEEDRESCHED;
  706 }
  707 
  708 /*
  709  * Charge child's scheduling CPU usage to parent.
  710  */
  711 void
  712 sched_exit(struct proc *p, struct thread *td)
  713 {
  714 
  715         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
  716             "prio:td", td->td_priority);
  717 
  718         PROC_LOCK_ASSERT(p, MA_OWNED);
  719         sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
  720 }
  721 
  722 void
  723 sched_exit_thread(struct thread *td, struct thread *child)
  724 {
  725 
  726         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
  727             "prio:td", child->td_priority);
  728         thread_lock(td);
  729         td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
  730         thread_unlock(td);
  731         mtx_lock_spin(&sched_lock);
  732         if ((child->td_proc->p_flag & P_NOLOAD) == 0)
  733                 sched_load_rem();
  734         mtx_unlock_spin(&sched_lock);
  735 }
  736 
  737 void
  738 sched_fork(struct thread *td, struct thread *childtd)
  739 {
  740         sched_fork_thread(td, childtd);
  741 }
  742 
  743 void
  744 sched_fork_thread(struct thread *td, struct thread *childtd)
  745 {
  746         struct td_sched *ts;
  747 
  748         childtd->td_estcpu = td->td_estcpu;
  749         childtd->td_lock = &sched_lock;
  750         childtd->td_cpuset = cpuset_ref(td->td_cpuset);
  751         ts = childtd->td_sched;
  752         bzero(ts, sizeof(*ts));
  753         ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
  754 }
  755 
  756 void
  757 sched_nice(struct proc *p, int nice)
  758 {
  759         struct thread *td;
  760 
  761         PROC_LOCK_ASSERT(p, MA_OWNED);
  762         p->p_nice = nice;
  763         FOREACH_THREAD_IN_PROC(p, td) {
  764                 thread_lock(td);
  765                 resetpriority(td);
  766                 resetpriority_thread(td);
  767                 thread_unlock(td);
  768         }
  769 }
  770 
  771 void
  772 sched_class(struct thread *td, int class)
  773 {
  774         THREAD_LOCK_ASSERT(td, MA_OWNED);
  775         td->td_pri_class = class;
  776 }
  777 
  778 /*
  779  * Adjust the priority of a thread.
  780  */
  781 static void
  782 sched_priority(struct thread *td, u_char prio)
  783 {
  784 
  785 
  786         KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
  787             "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
  788             sched_tdname(curthread));
  789         if (td != curthread && prio > td->td_priority) {
  790                 KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
  791                     "lend prio", "prio:%d", td->td_priority, "new prio:%d",
  792                     prio, KTR_ATTR_LINKED, sched_tdname(td));
  793         }
  794         THREAD_LOCK_ASSERT(td, MA_OWNED);
  795         if (td->td_priority == prio)
  796                 return;
  797         td->td_priority = prio;
  798         if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
  799                 sched_rem(td);
  800                 sched_add(td, SRQ_BORING);
  801         }
  802 }
  803 
  804 /*
  805  * Update a thread's priority when it is lent another thread's
  806  * priority.
  807  */
  808 void
  809 sched_lend_prio(struct thread *td, u_char prio)
  810 {
  811 
  812         td->td_flags |= TDF_BORROWING;
  813         sched_priority(td, prio);
  814 }
  815 
  816 /*
  817  * Restore a thread's priority when priority propagation is
  818  * over.  The prio argument is the minimum priority the thread
  819  * needs to have to satisfy other possible priority lending
  820  * requests.  If the thread's regulary priority is less
  821  * important than prio the thread will keep a priority boost
  822  * of prio.
  823  */
  824 void
  825 sched_unlend_prio(struct thread *td, u_char prio)
  826 {
  827         u_char base_pri;
  828 
  829         if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
  830             td->td_base_pri <= PRI_MAX_TIMESHARE)
  831                 base_pri = td->td_user_pri;
  832         else
  833                 base_pri = td->td_base_pri;
  834         if (prio >= base_pri) {
  835                 td->td_flags &= ~TDF_BORROWING;
  836                 sched_prio(td, base_pri);
  837         } else
  838                 sched_lend_prio(td, prio);
  839 }
  840 
  841 void
  842 sched_prio(struct thread *td, u_char prio)
  843 {
  844         u_char oldprio;
  845 
  846         /* First, update the base priority. */
  847         td->td_base_pri = prio;
  848 
  849         /*
  850          * If the thread is borrowing another thread's priority, don't ever
  851          * lower the priority.
  852          */
  853         if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
  854                 return;
  855 
  856         /* Change the real priority. */
  857         oldprio = td->td_priority;
  858         sched_priority(td, prio);
  859 
  860         /*
  861          * If the thread is on a turnstile, then let the turnstile update
  862          * its state.
  863          */
  864         if (TD_ON_LOCK(td) && oldprio != prio)
  865                 turnstile_adjust(td, oldprio);
  866 }
  867 
  868 void
  869 sched_user_prio(struct thread *td, u_char prio)
  870 {
  871         u_char oldprio;
  872 
  873         THREAD_LOCK_ASSERT(td, MA_OWNED);
  874         td->td_base_user_pri = prio;
  875         if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
  876                 return;
  877         oldprio = td->td_user_pri;
  878         td->td_user_pri = prio;
  879 }
  880 
  881 void
  882 sched_lend_user_prio(struct thread *td, u_char prio)
  883 {
  884         u_char oldprio;
  885 
  886         THREAD_LOCK_ASSERT(td, MA_OWNED);
  887         td->td_flags |= TDF_UBORROWING;
  888         oldprio = td->td_user_pri;
  889         td->td_user_pri = prio;
  890 }
  891 
  892 void
  893 sched_unlend_user_prio(struct thread *td, u_char prio)
  894 {
  895         u_char base_pri;
  896 
  897         THREAD_LOCK_ASSERT(td, MA_OWNED);
  898         base_pri = td->td_base_user_pri;
  899         if (prio >= base_pri) {
  900                 td->td_flags &= ~TDF_UBORROWING;
  901                 sched_user_prio(td, base_pri);
  902         } else {
  903                 sched_lend_user_prio(td, prio);
  904         }
  905 }
  906 
  907 void
  908 sched_sleep(struct thread *td, int pri)
  909 {
  910 
  911         THREAD_LOCK_ASSERT(td, MA_OWNED);
  912         td->td_slptick = ticks;
  913         td->td_sched->ts_slptime = 0;
  914         if (pri)
  915                 sched_prio(td, pri);
  916         if (TD_IS_SUSPENDED(td) || pri >= PSOCK)
  917                 td->td_flags |= TDF_CANSWAP;
  918 }
  919 
  920 void
  921 sched_switch(struct thread *td, struct thread *newtd, int flags)
  922 {
  923         struct mtx *tmtx;
  924         struct td_sched *ts;
  925         struct proc *p;
  926 
  927         tmtx = NULL;
  928         ts = td->td_sched;
  929         p = td->td_proc;
  930 
  931         THREAD_LOCK_ASSERT(td, MA_OWNED);
  932 
  933         /* 
  934          * Switch to the sched lock to fix things up and pick
  935          * a new thread.
  936          * Block the td_lock in order to avoid breaking the critical path.
  937          */
  938         if (td->td_lock != &sched_lock) {
  939                 mtx_lock_spin(&sched_lock);
  940                 tmtx = thread_lock_block(td);
  941         }
  942 
  943         if ((p->p_flag & P_NOLOAD) == 0)
  944                 sched_load_rem();
  945 
  946         if (newtd) {
  947                 MPASS(newtd->td_lock == &sched_lock);
  948                 newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
  949         }
  950 
  951         td->td_lastcpu = td->td_oncpu;
  952         td->td_flags &= ~TDF_NEEDRESCHED;
  953         td->td_owepreempt = 0;
  954         td->td_oncpu = NOCPU;
  955 
  956         /*
  957          * At the last moment, if this thread is still marked RUNNING,
  958          * then put it back on the run queue as it has not been suspended
  959          * or stopped or any thing else similar.  We never put the idle
  960          * threads on the run queue, however.
  961          */
  962         if (td->td_flags & TDF_IDLETD) {
  963                 TD_SET_CAN_RUN(td);
  964 #ifdef SMP
  965                 idle_cpus_mask &= ~PCPU_GET(cpumask);
  966 #endif
  967         } else {
  968                 if (TD_IS_RUNNING(td)) {
  969                         /* Put us back on the run queue. */
  970                         sched_add(td, (flags & SW_PREEMPT) ?
  971                             SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
  972                             SRQ_OURSELF|SRQ_YIELDING);
  973                 }
  974         }
  975         if (newtd) {
  976                 /*
  977                  * The thread we are about to run needs to be counted
  978                  * as if it had been added to the run queue and selected.
  979                  * It came from:
  980                  * * A preemption
  981                  * * An upcall
  982                  * * A followon
  983                  */
  984                 KASSERT((newtd->td_inhibitors == 0),
  985                         ("trying to run inhibited thread"));
  986                 newtd->td_flags |= TDF_DIDRUN;
  987                 TD_SET_RUNNING(newtd);
  988                 if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
  989                         sched_load_add();
  990         } else {
  991                 newtd = choosethread();
  992                 MPASS(newtd->td_lock == &sched_lock);
  993         }
  994 
  995         if (td != newtd) {
  996 #ifdef  HWPMC_HOOKS
  997                 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
  998                         PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
  999 #endif
 1000                 /* I feel sleepy */
 1001                 lock_profile_release_lock(&sched_lock.lock_object);
 1002 #ifdef KDTRACE_HOOKS
 1003                 /*
 1004                  * If DTrace has set the active vtime enum to anything
 1005                  * other than INACTIVE (0), then it should have set the
 1006                  * function to call.
 1007                  */
 1008                 if (dtrace_vtime_active)
 1009                         (*dtrace_vtime_switch_func)(newtd);
 1010 #endif
 1011 
 1012                 cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
 1013                 lock_profile_obtain_lock_success(&sched_lock.lock_object,
 1014                     0, 0, __FILE__, __LINE__);
 1015                 /*
 1016                  * Where am I?  What year is it?
 1017                  * We are in the same thread that went to sleep above,
 1018                  * but any amount of time may have passed. All our context
 1019                  * will still be available as will local variables.
 1020                  * PCPU values however may have changed as we may have
 1021                  * changed CPU so don't trust cached values of them.
 1022                  * New threads will go to fork_exit() instead of here
 1023                  * so if you change things here you may need to change
 1024                  * things there too.
 1025                  *
 1026                  * If the thread above was exiting it will never wake
 1027                  * up again here, so either it has saved everything it
 1028                  * needed to, or the thread_wait() or wait() will
 1029                  * need to reap it.
 1030                  */
 1031 #ifdef  HWPMC_HOOKS
 1032                 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
 1033                         PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
 1034 #endif
 1035         }
 1036 
 1037 #ifdef SMP
 1038         if (td->td_flags & TDF_IDLETD)
 1039                 idle_cpus_mask |= PCPU_GET(cpumask);
 1040 #endif
 1041         sched_lock.mtx_lock = (uintptr_t)td;
 1042         td->td_oncpu = PCPU_GET(cpuid);
 1043         MPASS(td->td_lock == &sched_lock);
 1044 }
 1045 
 1046 void
 1047 sched_wakeup(struct thread *td)
 1048 {
 1049         struct td_sched *ts;
 1050 
 1051         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1052         ts = td->td_sched;
 1053         td->td_flags &= ~TDF_CANSWAP;
 1054         if (ts->ts_slptime > 1) {
 1055                 updatepri(td);
 1056                 resetpriority(td);
 1057         }
 1058         td->td_slptick = 0;
 1059         ts->ts_slptime = 0;
 1060         sched_add(td, SRQ_BORING);
 1061 }
 1062 
 1063 #ifdef SMP
 1064 static int
 1065 forward_wakeup(int cpunum)
 1066 {
 1067         struct pcpu *pc;
 1068         cpumask_t dontuse, id, map, map2, map3, me;
 1069 
 1070         mtx_assert(&sched_lock, MA_OWNED);
 1071 
 1072         CTR0(KTR_RUNQ, "forward_wakeup()");
 1073 
 1074         if ((!forward_wakeup_enabled) ||
 1075              (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
 1076                 return (0);
 1077         if (!smp_started || cold || panicstr)
 1078                 return (0);
 1079 
 1080         forward_wakeups_requested++;
 1081 
 1082         /*
 1083          * Check the idle mask we received against what we calculated
 1084          * before in the old version.
 1085          */
 1086         me = PCPU_GET(cpumask);
 1087 
 1088         /* Don't bother if we should be doing it ourself. */
 1089         if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
 1090                 return (0);
 1091 
 1092         dontuse = me | stopped_cpus | hlt_cpus_mask;
 1093         map3 = 0;
 1094         if (forward_wakeup_use_loop) {
 1095                 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
 1096                         id = pc->pc_cpumask;
 1097                         if ((id & dontuse) == 0 &&
 1098                             pc->pc_curthread == pc->pc_idlethread) {
 1099                                 map3 |= id;
 1100                         }
 1101                 }
 1102         }
 1103 
 1104         if (forward_wakeup_use_mask) {
 1105                 map = 0;
 1106                 map = idle_cpus_mask & ~dontuse;
 1107 
 1108                 /* If they are both on, compare and use loop if different. */
 1109                 if (forward_wakeup_use_loop) {
 1110                         if (map != map3) {
 1111                                 printf("map (%02X) != map3 (%02X)\n", map,
 1112                                     map3);
 1113                                 map = map3;
 1114                         }
 1115                 }
 1116         } else {
 1117                 map = map3;
 1118         }
 1119 
 1120         /* If we only allow a specific CPU, then mask off all the others. */
 1121         if (cpunum != NOCPU) {
 1122                 KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
 1123                 map &= (1 << cpunum);
 1124         } else {
 1125                 /* Try choose an idle die. */
 1126                 if (forward_wakeup_use_htt) {
 1127                         map2 =  (map & (map >> 1)) & 0x5555;
 1128                         if (map2) {
 1129                                 map = map2;
 1130                         }
 1131                 }
 1132 
 1133                 /* Set only one bit. */
 1134                 if (forward_wakeup_use_single) {
 1135                         map = map & ((~map) + 1);
 1136                 }
 1137         }
 1138         if (map) {
 1139                 forward_wakeups_delivered++;
 1140                 ipi_selected(map, IPI_AST);
 1141                 return (1);
 1142         }
 1143         if (cpunum == NOCPU)
 1144                 printf("forward_wakeup: Idle processor not found\n");
 1145         return (0);
 1146 }
 1147 
 1148 static void
 1149 kick_other_cpu(int pri, int cpuid)
 1150 {
 1151         struct pcpu *pcpu;
 1152         int cpri;
 1153 
 1154         pcpu = pcpu_find(cpuid);
 1155         if (idle_cpus_mask & pcpu->pc_cpumask) {
 1156                 forward_wakeups_delivered++;
 1157                 ipi_selected(pcpu->pc_cpumask, IPI_AST);
 1158                 return;
 1159         }
 1160 
 1161         cpri = pcpu->pc_curthread->td_priority;
 1162         if (pri >= cpri)
 1163                 return;
 1164 
 1165 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
 1166 #if !defined(FULL_PREEMPTION)
 1167         if (pri <= PRI_MAX_ITHD)
 1168 #endif /* ! FULL_PREEMPTION */
 1169         {
 1170                 ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT);
 1171                 return;
 1172         }
 1173 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
 1174 
 1175         pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
 1176         ipi_selected(pcpu->pc_cpumask, IPI_AST);
 1177         return;
 1178 }
 1179 #endif /* SMP */
 1180 
 1181 #ifdef SMP
 1182 static int
 1183 sched_pickcpu(struct thread *td)
 1184 {
 1185         int best, cpu;
 1186 
 1187         mtx_assert(&sched_lock, MA_OWNED);
 1188 
 1189         if (THREAD_CAN_SCHED(td, td->td_lastcpu))
 1190                 best = td->td_lastcpu;
 1191         else
 1192                 best = NOCPU;
 1193         for (cpu = 0; cpu <= mp_maxid; cpu++) {
 1194                 if (CPU_ABSENT(cpu))
 1195                         continue;
 1196                 if (!THREAD_CAN_SCHED(td, cpu))
 1197                         continue;
 1198         
 1199                 if (best == NOCPU)
 1200                         best = cpu;
 1201                 else if (runq_length[cpu] < runq_length[best])
 1202                         best = cpu;
 1203         }
 1204         KASSERT(best != NOCPU, ("no valid CPUs"));
 1205 
 1206         return (best);
 1207 }
 1208 #endif
 1209 
 1210 void
 1211 sched_add(struct thread *td, int flags)
 1212 #ifdef SMP
 1213 {
 1214         struct td_sched *ts;
 1215         int forwarded = 0;
 1216         int cpu;
 1217         int single_cpu = 0;
 1218 
 1219         ts = td->td_sched;
 1220         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1221         KASSERT((td->td_inhibitors == 0),
 1222             ("sched_add: trying to run inhibited thread"));
 1223         KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
 1224             ("sched_add: bad thread state"));
 1225         KASSERT(td->td_flags & TDF_INMEM,
 1226             ("sched_add: thread swapped out"));
 1227 
 1228         KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
 1229             "prio:%d", td->td_priority, KTR_ATTR_LINKED,
 1230             sched_tdname(curthread));
 1231         KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
 1232             KTR_ATTR_LINKED, sched_tdname(td));
 1233 
 1234 
 1235         /*
 1236          * Now that the thread is moving to the run-queue, set the lock
 1237          * to the scheduler's lock.
 1238          */
 1239         if (td->td_lock != &sched_lock) {
 1240                 mtx_lock_spin(&sched_lock);
 1241                 thread_lock_set(td, &sched_lock);
 1242         }
 1243         TD_SET_RUNQ(td);
 1244 
 1245         if (td->td_pinned != 0) {
 1246                 cpu = td->td_lastcpu;
 1247                 ts->ts_runq = &runq_pcpu[cpu];
 1248                 single_cpu = 1;
 1249                 CTR3(KTR_RUNQ,
 1250                     "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
 1251                     cpu);
 1252         } else if (td->td_flags & TDF_BOUND) {
 1253                 /* Find CPU from bound runq. */
 1254                 KASSERT(SKE_RUNQ_PCPU(ts),
 1255                     ("sched_add: bound td_sched not on cpu runq"));
 1256                 cpu = ts->ts_runq - &runq_pcpu[0];
 1257                 single_cpu = 1;
 1258                 CTR3(KTR_RUNQ,
 1259                     "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
 1260                     cpu);
 1261         } else if (ts->ts_flags & TSF_AFFINITY) {
 1262                 /* Find a valid CPU for our cpuset */
 1263                 cpu = sched_pickcpu(td);
 1264                 ts->ts_runq = &runq_pcpu[cpu];
 1265                 single_cpu = 1;
 1266                 CTR3(KTR_RUNQ,
 1267                     "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
 1268                     cpu);
 1269         } else {
 1270                 CTR2(KTR_RUNQ,
 1271                     "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
 1272                     td);
 1273                 cpu = NOCPU;
 1274                 ts->ts_runq = &runq;
 1275         }
 1276 
 1277         if (single_cpu && (cpu != PCPU_GET(cpuid))) {
 1278                 kick_other_cpu(td->td_priority, cpu);
 1279         } else {
 1280                 if (!single_cpu) {
 1281                         cpumask_t me = PCPU_GET(cpumask);
 1282                         cpumask_t idle = idle_cpus_mask & me;
 1283 
 1284                         if (!idle && ((flags & SRQ_INTR) == 0) &&
 1285                             (idle_cpus_mask & ~(hlt_cpus_mask | me)))
 1286                                 forwarded = forward_wakeup(cpu);
 1287                 }
 1288 
 1289                 if (!forwarded) {
 1290                         if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
 1291                                 return;
 1292                         else
 1293                                 maybe_resched(td);
 1294                 }
 1295         }
 1296 
 1297         if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 1298                 sched_load_add();
 1299         runq_add(ts->ts_runq, td, flags);
 1300         if (cpu != NOCPU)
 1301                 runq_length[cpu]++;
 1302 }
 1303 #else /* SMP */
 1304 {
 1305         struct td_sched *ts;
 1306 
 1307         ts = td->td_sched;
 1308         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1309         KASSERT((td->td_inhibitors == 0),
 1310             ("sched_add: trying to run inhibited thread"));
 1311         KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
 1312             ("sched_add: bad thread state"));
 1313         KASSERT(td->td_flags & TDF_INMEM,
 1314             ("sched_add: thread swapped out"));
 1315         KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
 1316             "prio:%d", td->td_priority, KTR_ATTR_LINKED,
 1317             sched_tdname(curthread));
 1318         KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
 1319             KTR_ATTR_LINKED, sched_tdname(td));
 1320 
 1321         /*
 1322          * Now that the thread is moving to the run-queue, set the lock
 1323          * to the scheduler's lock.
 1324          */
 1325         if (td->td_lock != &sched_lock) {
 1326                 mtx_lock_spin(&sched_lock);
 1327                 thread_lock_set(td, &sched_lock);
 1328         }
 1329         TD_SET_RUNQ(td);
 1330         CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
 1331         ts->ts_runq = &runq;
 1332 
 1333         /*
 1334          * If we are yielding (on the way out anyhow) or the thread
 1335          * being saved is US, then don't try be smart about preemption
 1336          * or kicking off another CPU as it won't help and may hinder.
 1337          * In the YIEDLING case, we are about to run whoever is being
 1338          * put in the queue anyhow, and in the OURSELF case, we are
 1339          * puting ourself on the run queue which also only happens
 1340          * when we are about to yield.
 1341          */
 1342         if ((flags & SRQ_YIELDING) == 0) {
 1343                 if (maybe_preempt(td))
 1344                         return;
 1345         }
 1346         if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 1347                 sched_load_add();
 1348         runq_add(ts->ts_runq, td, flags);
 1349         maybe_resched(td);
 1350 }
 1351 #endif /* SMP */
 1352 
 1353 void
 1354 sched_rem(struct thread *td)
 1355 {
 1356         struct td_sched *ts;
 1357 
 1358         ts = td->td_sched;
 1359         KASSERT(td->td_flags & TDF_INMEM,
 1360             ("sched_rem: thread swapped out"));
 1361         KASSERT(TD_ON_RUNQ(td),
 1362             ("sched_rem: thread not on run queue"));
 1363         mtx_assert(&sched_lock, MA_OWNED);
 1364         KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
 1365             "prio:%d", td->td_priority, KTR_ATTR_LINKED,
 1366             sched_tdname(curthread));
 1367 
 1368         if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 1369                 sched_load_rem();
 1370 #ifdef SMP
 1371         if (ts->ts_runq != &runq)
 1372                 runq_length[ts->ts_runq - runq_pcpu]--;
 1373 #endif
 1374         runq_remove(ts->ts_runq, td);
 1375         TD_SET_CAN_RUN(td);
 1376 }
 1377 
 1378 /*
 1379  * Select threads to run.  Note that running threads still consume a
 1380  * slot.
 1381  */
 1382 struct thread *
 1383 sched_choose(void)
 1384 {
 1385         struct thread *td;
 1386         struct runq *rq;
 1387 
 1388         mtx_assert(&sched_lock,  MA_OWNED);
 1389 #ifdef SMP
 1390         struct thread *tdcpu;
 1391 
 1392         rq = &runq;
 1393         td = runq_choose_fuzz(&runq, runq_fuzz);
 1394         tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
 1395 
 1396         if (td == NULL ||
 1397             (tdcpu != NULL &&
 1398              tdcpu->td_priority < td->td_priority)) {
 1399                 CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
 1400                      PCPU_GET(cpuid));
 1401                 td = tdcpu;
 1402                 rq = &runq_pcpu[PCPU_GET(cpuid)];
 1403         } else {
 1404                 CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
 1405         }
 1406 
 1407 #else
 1408         rq = &runq;
 1409         td = runq_choose(&runq);
 1410 #endif
 1411 
 1412         if (td) {
 1413 #ifdef SMP
 1414                 if (td == tdcpu)
 1415                         runq_length[PCPU_GET(cpuid)]--;
 1416 #endif
 1417                 runq_remove(rq, td);
 1418                 td->td_flags |= TDF_DIDRUN;
 1419 
 1420                 KASSERT(td->td_flags & TDF_INMEM,
 1421                     ("sched_choose: thread swapped out"));
 1422                 return (td);
 1423         }
 1424         return (PCPU_GET(idlethread));
 1425 }
 1426 
 1427 void
 1428 sched_preempt(struct thread *td)
 1429 {
 1430         thread_lock(td);
 1431         if (td->td_critnest > 1)
 1432                 td->td_owepreempt = 1;
 1433         else
 1434                 mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL);
 1435         thread_unlock(td);
 1436 }
 1437 
 1438 void
 1439 sched_userret(struct thread *td)
 1440 {
 1441         /*
 1442          * XXX we cheat slightly on the locking here to avoid locking in
 1443          * the usual case.  Setting td_priority here is essentially an
 1444          * incomplete workaround for not setting it properly elsewhere.
 1445          * Now that some interrupt handlers are threads, not setting it
 1446          * properly elsewhere can clobber it in the window between setting
 1447          * it here and returning to user mode, so don't waste time setting
 1448          * it perfectly here.
 1449          */
 1450         KASSERT((td->td_flags & TDF_BORROWING) == 0,
 1451             ("thread with borrowed priority returning to userland"));
 1452         if (td->td_priority != td->td_user_pri) {
 1453                 thread_lock(td);
 1454                 td->td_priority = td->td_user_pri;
 1455                 td->td_base_pri = td->td_user_pri;
 1456                 thread_unlock(td);
 1457         }
 1458 }
 1459 
 1460 void
 1461 sched_bind(struct thread *td, int cpu)
 1462 {
 1463         struct td_sched *ts;
 1464 
 1465         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1466         KASSERT(TD_IS_RUNNING(td),
 1467             ("sched_bind: cannot bind non-running thread"));
 1468 
 1469         ts = td->td_sched;
 1470 
 1471         td->td_flags |= TDF_BOUND;
 1472 #ifdef SMP
 1473         ts->ts_runq = &runq_pcpu[cpu];
 1474         if (PCPU_GET(cpuid) == cpu)
 1475                 return;
 1476 
 1477         mi_switch(SW_VOL, NULL);
 1478 #endif
 1479 }
 1480 
 1481 void
 1482 sched_unbind(struct thread* td)
 1483 {
 1484         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1485         td->td_flags &= ~TDF_BOUND;
 1486 }
 1487 
 1488 int
 1489 sched_is_bound(struct thread *td)
 1490 {
 1491         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1492         return (td->td_flags & TDF_BOUND);
 1493 }
 1494 
 1495 void
 1496 sched_relinquish(struct thread *td)
 1497 {
 1498         thread_lock(td);
 1499         mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
 1500         thread_unlock(td);
 1501 }
 1502 
 1503 int
 1504 sched_load(void)
 1505 {
 1506         return (sched_tdcnt);
 1507 }
 1508 
 1509 int
 1510 sched_sizeof_proc(void)
 1511 {
 1512         return (sizeof(struct proc));
 1513 }
 1514 
 1515 int
 1516 sched_sizeof_thread(void)
 1517 {
 1518         return (sizeof(struct thread) + sizeof(struct td_sched));
 1519 }
 1520 
 1521 fixpt_t
 1522 sched_pctcpu(struct thread *td)
 1523 {
 1524         struct td_sched *ts;
 1525 
 1526         ts = td->td_sched;
 1527         return (ts->ts_pctcpu);
 1528 }
 1529 
 1530 void
 1531 sched_tick(void)
 1532 {
 1533 }
 1534 
 1535 /*
 1536  * The actual idle process.
 1537  */
 1538 void
 1539 sched_idletd(void *dummy)
 1540 {
 1541 
 1542         for (;;) {
 1543                 mtx_assert(&Giant, MA_NOTOWNED);
 1544 
 1545                 while (sched_runnable() == 0)
 1546                         cpu_idle(0);
 1547 
 1548                 mtx_lock_spin(&sched_lock);
 1549                 mi_switch(SW_VOL | SWT_IDLE, NULL);
 1550                 mtx_unlock_spin(&sched_lock);
 1551         }
 1552 }
 1553 
 1554 /*
 1555  * A CPU is entering for the first time or a thread is exiting.
 1556  */
 1557 void
 1558 sched_throw(struct thread *td)
 1559 {
 1560         /*
 1561          * Correct spinlock nesting.  The idle thread context that we are
 1562          * borrowing was created so that it would start out with a single
 1563          * spin lock (sched_lock) held in fork_trampoline().  Since we've
 1564          * explicitly acquired locks in this function, the nesting count
 1565          * is now 2 rather than 1.  Since we are nested, calling
 1566          * spinlock_exit() will simply adjust the counts without allowing
 1567          * spin lock using code to interrupt us.
 1568          */
 1569         if (td == NULL) {
 1570                 mtx_lock_spin(&sched_lock);
 1571                 spinlock_exit();
 1572         } else {
 1573                 lock_profile_release_lock(&sched_lock.lock_object);
 1574                 MPASS(td->td_lock == &sched_lock);
 1575         }
 1576         mtx_assert(&sched_lock, MA_OWNED);
 1577         KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
 1578         PCPU_SET(switchtime, cpu_ticks());
 1579         PCPU_SET(switchticks, ticks);
 1580         cpu_throw(td, choosethread());  /* doesn't return */
 1581 }
 1582 
 1583 void
 1584 sched_fork_exit(struct thread *td)
 1585 {
 1586 
 1587         /*
 1588          * Finish setting up thread glue so that it begins execution in a
 1589          * non-nested critical section with sched_lock held but not recursed.
 1590          */
 1591         td->td_oncpu = PCPU_GET(cpuid);
 1592         sched_lock.mtx_lock = (uintptr_t)td;
 1593         lock_profile_obtain_lock_success(&sched_lock.lock_object,
 1594             0, 0, __FILE__, __LINE__);
 1595         THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
 1596 }
 1597 
 1598 char *
 1599 sched_tdname(struct thread *td)
 1600 {
 1601 #ifdef KTR
 1602         struct td_sched *ts;
 1603 
 1604         ts = td->td_sched;
 1605         if (ts->ts_name[0] == '\0')
 1606                 snprintf(ts->ts_name, sizeof(ts->ts_name),
 1607                     "%s tid %d", td->td_name, td->td_tid);
 1608         return (ts->ts_name);
 1609 #else   
 1610         return (td->td_name);
 1611 #endif
 1612 }
 1613 
 1614 void
 1615 sched_affinity(struct thread *td)
 1616 {
 1617 #ifdef SMP
 1618         struct td_sched *ts;
 1619         int cpu;
 1620 
 1621         THREAD_LOCK_ASSERT(td, MA_OWNED);       
 1622 
 1623         /*
 1624          * Set the TSF_AFFINITY flag if there is at least one CPU this
 1625          * thread can't run on.
 1626          */
 1627         ts = td->td_sched;
 1628         ts->ts_flags &= ~TSF_AFFINITY;
 1629         for (cpu = 0; cpu <= mp_maxid; cpu++) {
 1630                 if (CPU_ABSENT(cpu))
 1631                         continue;
 1632                 if (!THREAD_CAN_SCHED(td, cpu)) {
 1633                         ts->ts_flags |= TSF_AFFINITY;
 1634                         break;
 1635                 }
 1636         }
 1637 
 1638         /*
 1639          * If this thread can run on all CPUs, nothing else to do.
 1640          */
 1641         if (!(ts->ts_flags & TSF_AFFINITY))
 1642                 return;
 1643 
 1644         /* Pinned threads and bound threads should be left alone. */
 1645         if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
 1646                 return;
 1647 
 1648         switch (td->td_state) {
 1649         case TDS_RUNQ:
 1650                 /*
 1651                  * If we are on a per-CPU runqueue that is in the set,
 1652                  * then nothing needs to be done.
 1653                  */
 1654                 if (ts->ts_runq != &runq &&
 1655                     THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
 1656                         return;
 1657 
 1658                 /* Put this thread on a valid per-CPU runqueue. */
 1659                 sched_rem(td);
 1660                 sched_add(td, SRQ_BORING);
 1661                 break;
 1662         case TDS_RUNNING:
 1663                 /*
 1664                  * See if our current CPU is in the set.  If not, force a
 1665                  * context switch.
 1666                  */
 1667                 if (THREAD_CAN_SCHED(td, td->td_oncpu))
 1668                         return;
 1669 
 1670                 td->td_flags |= TDF_NEEDRESCHED;
 1671                 if (td != curthread)
 1672                         ipi_selected(1 << cpu, IPI_AST);
 1673                 break;
 1674         default:
 1675                 break;
 1676         }
 1677 #endif
 1678 }

Cache object: d8fa53298434e38380a66aa203c64794


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.