The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/sched_4bsd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/6.2/sys/kern/sched_4bsd.c 164286 2006-11-14 20:42:41Z cvs2svn $");
   37 
   38 #include "opt_hwpmc_hooks.h"
   39 
   40 #define kse td_sched
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/kernel.h>
   45 #include <sys/ktr.h>
   46 #include <sys/lock.h>
   47 #include <sys/kthread.h>
   48 #include <sys/mutex.h>
   49 #include <sys/proc.h>
   50 #include <sys/resourcevar.h>
   51 #include <sys/sched.h>
   52 #include <sys/smp.h>
   53 #include <sys/sysctl.h>
   54 #include <sys/sx.h>
   55 #include <sys/turnstile.h>
   56 #include <machine/smp.h>
   57 
   58 #ifdef HWPMC_HOOKS
   59 #include <sys/pmckern.h>
   60 #endif
   61 
   62 /*
   63  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
   64  * the range 100-256 Hz (approximately).
   65  */
   66 #define ESTCPULIM(e) \
   67     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
   68     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
   69 #ifdef SMP
   70 #define INVERSE_ESTCPU_WEIGHT   (8 * smp_cpus)
   71 #else
   72 #define INVERSE_ESTCPU_WEIGHT   8       /* 1 / (priorities per estcpu level). */
   73 #endif
   74 #define NICE_WEIGHT             1       /* Priorities per nice level. */
   75 
   76 /*
   77  * The schedulable entity that can be given a context to run.
   78  * A process may have several of these. Probably one per processor
   79  * but posibly a few more. In this universe they are grouped
   80  * with a KSEG that contains the priority and niceness
   81  * for the group.
   82  */
   83 struct kse {
   84         TAILQ_ENTRY(kse) ke_procq;      /* (j/z) Run queue. */
   85         struct thread   *ke_thread;     /* (*) Active associated thread. */
   86         fixpt_t         ke_pctcpu;      /* (j) %cpu during p_swtime. */
   87         char            ke_rqindex;     /* (j) Run queue index. */
   88         enum {
   89                 KES_THREAD = 0x0,       /* slaved to thread state */
   90                 KES_ONRUNQ
   91         } ke_state;                     /* (j) KSE status. */
   92         int             ke_cpticks;     /* (j) Ticks of cpu time. */
   93         struct runq     *ke_runq;       /* runq the kse is currently on */
   94 };
   95 
   96 #define ke_proc         ke_thread->td_proc
   97 #define ke_ksegrp       ke_thread->td_ksegrp
   98 
   99 #define td_kse td_sched
  100 
  101 /* flags kept in td_flags */
  102 #define TDF_DIDRUN      TDF_SCHED0      /* KSE actually ran. */
  103 #define TDF_EXIT        TDF_SCHED1      /* KSE is being killed. */
  104 #define TDF_BOUND       TDF_SCHED2
  105 
  106 #define ke_flags        ke_thread->td_flags
  107 #define KEF_DIDRUN      TDF_DIDRUN /* KSE actually ran. */
  108 #define KEF_EXIT        TDF_EXIT /* KSE is being killed. */
  109 #define KEF_BOUND       TDF_BOUND /* stuck to one CPU */
  110 
  111 #define SKE_RUNQ_PCPU(ke)                                               \
  112     ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq)
  113 
  114 struct kg_sched {
  115         struct thread   *skg_last_assigned; /* (j) Last thread assigned to */
  116                                            /* the system scheduler. */
  117         int     skg_avail_opennings;    /* (j) Num KSEs requested in group. */
  118         int     skg_concurrency;        /* (j) Num KSEs requested in group. */
  119 };
  120 #define kg_last_assigned        kg_sched->skg_last_assigned
  121 #define kg_avail_opennings      kg_sched->skg_avail_opennings
  122 #define kg_concurrency          kg_sched->skg_concurrency
  123 
  124 #define SLOT_RELEASE(kg)                                                \
  125 do {                                                                    \
  126         kg->kg_avail_opennings++;                                       \
  127         CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)",                \
  128         kg,                                                             \
  129         kg->kg_concurrency,                                             \
  130          kg->kg_avail_opennings);                                       \
  131 /*      KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency),         \
  132             ("slots out of whack"));*/                                  \
  133 } while (0)
  134 
  135 #define SLOT_USE(kg)                                                    \
  136 do {                                                                    \
  137         kg->kg_avail_opennings--;                                       \
  138         CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)",                    \
  139         kg,                                                             \
  140         kg->kg_concurrency,                                             \
  141          kg->kg_avail_opennings);                                       \
  142 /*      KASSERT((kg->kg_avail_opennings >= 0),                          \
  143             ("slots out of whack"));*/                                  \
  144 } while (0)
  145 
  146 /*
  147  * KSE_CAN_MIGRATE macro returns true if the kse can migrate between
  148  * cpus.
  149  */
  150 #define KSE_CAN_MIGRATE(ke)                                             \
  151     ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
  152 
  153 static struct kse kse0;
  154 static struct kg_sched kg_sched0;
  155 
  156 static int      sched_tdcnt;    /* Total runnable threads in the system. */
  157 static int      sched_quantum;  /* Roundrobin scheduling quantum in ticks. */
  158 #define SCHED_QUANTUM   (hz / 10)       /* Default sched quantum */
  159 
  160 static struct callout roundrobin_callout;
  161 
  162 static void     slot_fill(struct ksegrp *kg);
  163 static struct kse *sched_choose(void);          /* XXX Should be thread * */
  164 
  165 static void     setup_runqs(void);
  166 static void     roundrobin(void *arg);
  167 static void     schedcpu(void);
  168 static void     schedcpu_thread(void);
  169 static void     sched_priority(struct thread *td, u_char prio);
  170 static void     sched_setup(void *dummy);
  171 static void     maybe_resched(struct thread *td);
  172 static void     updatepri(struct ksegrp *kg);
  173 static void     resetpriority(struct ksegrp *kg);
  174 static void     resetpriority_thread(struct thread *td, struct ksegrp *kg);
  175 #ifdef SMP
  176 static int      forward_wakeup(int  cpunum);
  177 #endif
  178 
  179 static struct kproc_desc sched_kp = {
  180         "schedcpu",
  181         schedcpu_thread,
  182         NULL
  183 };
  184 SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp)
  185 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
  186 
  187 /*
  188  * Global run queue.
  189  */
  190 static struct runq runq;
  191 
  192 #ifdef SMP
  193 /*
  194  * Per-CPU run queues
  195  */
  196 static struct runq runq_pcpu[MAXCPU];
  197 #endif
  198 
  199 static void
  200 setup_runqs(void)
  201 {
  202 #ifdef SMP
  203         int i;
  204 
  205         for (i = 0; i < MAXCPU; ++i)
  206                 runq_init(&runq_pcpu[i]);
  207 #endif
  208 
  209         runq_init(&runq);
  210 }
  211 
  212 static int
  213 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
  214 {
  215         int error, new_val;
  216 
  217         new_val = sched_quantum * tick;
  218         error = sysctl_handle_int(oidp, &new_val, 0, req);
  219         if (error != 0 || req->newptr == NULL)
  220                 return (error);
  221         if (new_val < tick)
  222                 return (EINVAL);
  223         sched_quantum = new_val / tick;
  224         hogticks = 2 * sched_quantum;
  225         return (0);
  226 }
  227 
  228 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
  229 
  230 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
  231     "Scheduler name");
  232 
  233 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
  234     0, sizeof sched_quantum, sysctl_kern_quantum, "I",
  235     "Roundrobin scheduling quantum in microseconds");
  236 
  237 #ifdef SMP
  238 /* Enable forwarding of wakeups to all other cpus */
  239 SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
  240 
  241 static int forward_wakeup_enabled = 1;
  242 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
  243            &forward_wakeup_enabled, 0,
  244            "Forwarding of wakeup to idle CPUs");
  245 
  246 static int forward_wakeups_requested = 0;
  247 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
  248            &forward_wakeups_requested, 0,
  249            "Requests for Forwarding of wakeup to idle CPUs");
  250 
  251 static int forward_wakeups_delivered = 0;
  252 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
  253            &forward_wakeups_delivered, 0,
  254            "Completed Forwarding of wakeup to idle CPUs");
  255 
  256 static int forward_wakeup_use_mask = 1;
  257 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
  258            &forward_wakeup_use_mask, 0,
  259            "Use the mask of idle cpus");
  260 
  261 static int forward_wakeup_use_loop = 0;
  262 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
  263            &forward_wakeup_use_loop, 0,
  264            "Use a loop to find idle cpus");
  265 
  266 static int forward_wakeup_use_single = 0;
  267 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
  268            &forward_wakeup_use_single, 0,
  269            "Only signal one idle cpu");
  270 
  271 static int forward_wakeup_use_htt = 0;
  272 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
  273            &forward_wakeup_use_htt, 0,
  274            "account for htt");
  275 
  276 #endif
  277 static int sched_followon = 0;
  278 SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
  279            &sched_followon, 0,
  280            "allow threads to share a quantum");
  281 
  282 static int sched_pfollowons = 0;
  283 SYSCTL_INT(_kern_sched, OID_AUTO, pfollowons, CTLFLAG_RD,
  284            &sched_pfollowons, 0,
  285            "number of followons done to a different ksegrp");
  286 
  287 static int sched_kgfollowons = 0;
  288 SYSCTL_INT(_kern_sched, OID_AUTO, kgfollowons, CTLFLAG_RD,
  289            &sched_kgfollowons, 0,
  290            "number of followons done in a ksegrp");
  291 
  292 static __inline void
  293 sched_load_add(void)
  294 {
  295         sched_tdcnt++;
  296         CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
  297 }
  298 
  299 static __inline void
  300 sched_load_rem(void)
  301 {
  302         sched_tdcnt--;
  303         CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
  304 }
  305 /*
  306  * Arrange to reschedule if necessary, taking the priorities and
  307  * schedulers into account.
  308  */
  309 static void
  310 maybe_resched(struct thread *td)
  311 {
  312 
  313         mtx_assert(&sched_lock, MA_OWNED);
  314         if (td->td_priority < curthread->td_priority)
  315                 curthread->td_flags |= TDF_NEEDRESCHED;
  316 }
  317 
  318 /*
  319  * Force switch among equal priority processes every 100ms.
  320  * We don't actually need to force a context switch of the current process.
  321  * The act of firing the event triggers a context switch to softclock() and
  322  * then switching back out again which is equivalent to a preemption, thus
  323  * no further work is needed on the local CPU.
  324  */
  325 /* ARGSUSED */
  326 static void
  327 roundrobin(void *arg)
  328 {
  329 
  330 #ifdef SMP
  331         mtx_lock_spin(&sched_lock);
  332         forward_roundrobin();
  333         mtx_unlock_spin(&sched_lock);
  334 #endif
  335 
  336         callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
  337 }
  338 
  339 /*
  340  * Constants for digital decay and forget:
  341  *      90% of (kg_estcpu) usage in 5 * loadav time
  342  *      95% of (ke_pctcpu) usage in 60 seconds (load insensitive)
  343  *          Note that, as ps(1) mentions, this can let percentages
  344  *          total over 100% (I've seen 137.9% for 3 processes).
  345  *
  346  * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously.
  347  *
  348  * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds.
  349  * That is, the system wants to compute a value of decay such
  350  * that the following for loop:
  351  *      for (i = 0; i < (5 * loadavg); i++)
  352  *              kg_estcpu *= decay;
  353  * will compute
  354  *      kg_estcpu *= 0.1;
  355  * for all values of loadavg:
  356  *
  357  * Mathematically this loop can be expressed by saying:
  358  *      decay ** (5 * loadavg) ~= .1
  359  *
  360  * The system computes decay as:
  361  *      decay = (2 * loadavg) / (2 * loadavg + 1)
  362  *
  363  * We wish to prove that the system's computation of decay
  364  * will always fulfill the equation:
  365  *      decay ** (5 * loadavg) ~= .1
  366  *
  367  * If we compute b as:
  368  *      b = 2 * loadavg
  369  * then
  370  *      decay = b / (b + 1)
  371  *
  372  * We now need to prove two things:
  373  *      1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
  374  *      2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
  375  *
  376  * Facts:
  377  *         For x close to zero, exp(x) =~ 1 + x, since
  378  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
  379  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
  380  *         For x close to zero, ln(1+x) =~ x, since
  381  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
  382  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
  383  *         ln(.1) =~ -2.30
  384  *
  385  * Proof of (1):
  386  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
  387  *      solving for factor,
  388  *      ln(factor) =~ (-2.30/5*loadav), or
  389  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
  390  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
  391  *
  392  * Proof of (2):
  393  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
  394  *      solving for power,
  395  *      power*ln(b/(b+1)) =~ -2.30, or
  396  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
  397  *
  398  * Actual power values for the implemented algorithm are as follows:
  399  *      loadav: 1       2       3       4
  400  *      power:  5.68    10.32   14.94   19.55
  401  */
  402 
  403 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
  404 #define loadfactor(loadav)      (2 * (loadav))
  405 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
  406 
  407 /* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
  408 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
  409 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
  410 
  411 /*
  412  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
  413  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
  414  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
  415  *
  416  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
  417  *      1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
  418  *
  419  * If you don't want to bother with the faster/more-accurate formula, you
  420  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
  421  * (more general) method of calculating the %age of CPU used by a process.
  422  */
  423 #define CCPU_SHIFT      11
  424 
  425 /*
  426  * Recompute process priorities, every hz ticks.
  427  * MP-safe, called without the Giant mutex.
  428  */
  429 /* ARGSUSED */
  430 static void
  431 schedcpu(void)
  432 {
  433         register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
  434         struct thread *td;
  435         struct proc *p;
  436         struct kse *ke;
  437         struct ksegrp *kg;
  438         int awake, realstathz;
  439 
  440         realstathz = stathz ? stathz : hz;
  441         sx_slock(&allproc_lock);
  442         FOREACH_PROC_IN_SYSTEM(p) {
  443                 /*
  444                  * Prevent state changes and protect run queue.
  445                  */
  446                 mtx_lock_spin(&sched_lock);
  447                 /*
  448                  * Increment time in/out of memory.  We ignore overflow; with
  449                  * 16-bit int's (remember them?) overflow takes 45 days.
  450                  */
  451                 p->p_swtime++;
  452                 FOREACH_KSEGRP_IN_PROC(p, kg) { 
  453                         awake = 0;
  454                         FOREACH_THREAD_IN_GROUP(kg, td) {
  455                                 ke = td->td_kse;
  456                                 /*
  457                                  * Increment sleep time (if sleeping).  We
  458                                  * ignore overflow, as above.
  459                                  */
  460                                 /*
  461                                  * The kse slptimes are not touched in wakeup
  462                                  * because the thread may not HAVE a KSE.
  463                                  */
  464                                 if (ke->ke_state == KES_ONRUNQ) {
  465                                         awake = 1;
  466                                         ke->ke_flags &= ~KEF_DIDRUN;
  467                                 } else if ((ke->ke_state == KES_THREAD) &&
  468                                     (TD_IS_RUNNING(td))) {
  469                                         awake = 1;
  470                                         /* Do not clear KEF_DIDRUN */
  471                                 } else if (ke->ke_flags & KEF_DIDRUN) {
  472                                         awake = 1;
  473                                         ke->ke_flags &= ~KEF_DIDRUN;
  474                                 }
  475 
  476                                 /*
  477                                  * ke_pctcpu is only for ps and ttyinfo().
  478                                  * Do it per kse, and add them up at the end?
  479                                  * XXXKSE
  480                                  */
  481                                 ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
  482                                     FSHIFT;
  483                                 /*
  484                                  * If the kse has been idle the entire second,
  485                                  * stop recalculating its priority until
  486                                  * it wakes up.
  487                                  */
  488                                 if (ke->ke_cpticks == 0)
  489                                         continue;
  490 #if     (FSHIFT >= CCPU_SHIFT)
  491                                 ke->ke_pctcpu += (realstathz == 100)
  492                                     ? ((fixpt_t) ke->ke_cpticks) <<
  493                                     (FSHIFT - CCPU_SHIFT) :
  494                                     100 * (((fixpt_t) ke->ke_cpticks)
  495                                     << (FSHIFT - CCPU_SHIFT)) / realstathz;
  496 #else
  497                                 ke->ke_pctcpu += ((FSCALE - ccpu) *
  498                                     (ke->ke_cpticks *
  499                                     FSCALE / realstathz)) >> FSHIFT;
  500 #endif
  501                                 ke->ke_cpticks = 0;
  502                         } /* end of kse loop */
  503                         /* 
  504                          * If there are ANY running threads in this KSEGRP,
  505                          * then don't count it as sleeping.
  506                          */
  507                         if (awake) {
  508                                 if (kg->kg_slptime > 1) {
  509                                         /*
  510                                          * In an ideal world, this should not
  511                                          * happen, because whoever woke us
  512                                          * up from the long sleep should have
  513                                          * unwound the slptime and reset our
  514                                          * priority before we run at the stale
  515                                          * priority.  Should KASSERT at some
  516                                          * point when all the cases are fixed.
  517                                          */
  518                                         updatepri(kg);
  519                                 }
  520                                 kg->kg_slptime = 0;
  521                         } else
  522                                 kg->kg_slptime++;
  523                         if (kg->kg_slptime > 1)
  524                                 continue;
  525                         kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
  526                         resetpriority(kg);
  527                         FOREACH_THREAD_IN_GROUP(kg, td) {
  528                                 resetpriority_thread(td, kg);
  529                         }
  530                 } /* end of ksegrp loop */
  531                 mtx_unlock_spin(&sched_lock);
  532         } /* end of process loop */
  533         sx_sunlock(&allproc_lock);
  534 }
  535 
  536 /*
  537  * Main loop for a kthread that executes schedcpu once a second.
  538  */
  539 static void
  540 schedcpu_thread(void)
  541 {
  542         int nowake;
  543 
  544         for (;;) {
  545                 schedcpu();
  546                 tsleep(&nowake, 0, "-", hz);
  547         }
  548 }
  549 
  550 /*
  551  * Recalculate the priority of a process after it has slept for a while.
  552  * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at
  553  * least six times the loadfactor will decay kg_estcpu to zero.
  554  */
  555 static void
  556 updatepri(struct ksegrp *kg)
  557 {
  558         register fixpt_t loadfac;
  559         register unsigned int newcpu;
  560 
  561         loadfac = loadfactor(averunnable.ldavg[0]);
  562         if (kg->kg_slptime > 5 * loadfac)
  563                 kg->kg_estcpu = 0;
  564         else {
  565                 newcpu = kg->kg_estcpu;
  566                 kg->kg_slptime--;       /* was incremented in schedcpu() */
  567                 while (newcpu && --kg->kg_slptime)
  568                         newcpu = decay_cpu(loadfac, newcpu);
  569                 kg->kg_estcpu = newcpu;
  570         }
  571 }
  572 
  573 /*
  574  * Compute the priority of a process when running in user mode.
  575  * Arrange to reschedule if the resulting priority is better
  576  * than that of the current process.
  577  */
  578 static void
  579 resetpriority(struct ksegrp *kg)
  580 {
  581         register unsigned int newpriority;
  582 
  583         if (kg->kg_pri_class == PRI_TIMESHARE) {
  584                 newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
  585                     NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN);
  586                 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
  587                     PRI_MAX_TIMESHARE);
  588                 kg->kg_user_pri = newpriority;
  589         }
  590 }
  591 
  592 /*
  593  * Update the thread's priority when the associated ksegroup's user
  594  * priority changes.
  595  */
  596 static void
  597 resetpriority_thread(struct thread *td, struct ksegrp *kg)
  598 {
  599 
  600         /* Only change threads with a time sharing user priority. */
  601         if (td->td_priority < PRI_MIN_TIMESHARE ||
  602             td->td_priority > PRI_MAX_TIMESHARE)
  603                 return;
  604 
  605         /* XXX the whole needresched thing is broken, but not silly. */
  606         maybe_resched(td);
  607 
  608         sched_prio(td, kg->kg_user_pri);
  609 }
  610 
  611 /* ARGSUSED */
  612 static void
  613 sched_setup(void *dummy)
  614 {
  615         setup_runqs();
  616 
  617         if (sched_quantum == 0)
  618                 sched_quantum = SCHED_QUANTUM;
  619         hogticks = 2 * sched_quantum;
  620 
  621         callout_init(&roundrobin_callout, CALLOUT_MPSAFE);
  622 
  623         /* Kick off timeout driven events by calling first time. */
  624         roundrobin(NULL);
  625 
  626         /* Account for thread0. */
  627         sched_load_add();
  628 }
  629 
  630 /* External interfaces start here */
  631 /*
  632  * Very early in the boot some setup of scheduler-specific
  633  * parts of proc0 and of some scheduler resources needs to be done.
  634  * Called from:
  635  *  proc0_init()
  636  */
  637 void
  638 schedinit(void)
  639 {
  640         /*
  641          * Set up the scheduler specific parts of proc0.
  642          */
  643         proc0.p_sched = NULL; /* XXX */
  644         ksegrp0.kg_sched = &kg_sched0;
  645         thread0.td_sched = &kse0;
  646         kse0.ke_thread = &thread0;
  647         kse0.ke_state = KES_THREAD;
  648         kg_sched0.skg_concurrency = 1;
  649         kg_sched0.skg_avail_opennings = 0; /* we are already running */
  650 }
  651 
  652 int
  653 sched_runnable(void)
  654 {
  655 #ifdef SMP
  656         return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
  657 #else
  658         return runq_check(&runq);
  659 #endif
  660 }
  661 
  662 int 
  663 sched_rr_interval(void)
  664 {
  665         if (sched_quantum == 0)
  666                 sched_quantum = SCHED_QUANTUM;
  667         return (sched_quantum);
  668 }
  669 
  670 /*
  671  * We adjust the priority of the current process.  The priority of
  672  * a process gets worse as it accumulates CPU time.  The cpu usage
  673  * estimator (kg_estcpu) is increased here.  resetpriority() will
  674  * compute a different priority each time kg_estcpu increases by
  675  * INVERSE_ESTCPU_WEIGHT
  676  * (until MAXPRI is reached).  The cpu usage estimator ramps up
  677  * quite quickly when the process is running (linearly), and decays
  678  * away exponentially, at a rate which is proportionally slower when
  679  * the system is busy.  The basic principle is that the system will
  680  * 90% forget that the process used a lot of CPU time in 5 * loadav
  681  * seconds.  This causes the system to favor processes which haven't
  682  * run much recently, and to round-robin among other processes.
  683  */
  684 void
  685 sched_clock(struct thread *td)
  686 {
  687         struct ksegrp *kg;
  688         struct kse *ke;
  689 
  690         mtx_assert(&sched_lock, MA_OWNED);
  691         kg = td->td_ksegrp;
  692         ke = td->td_kse;
  693 
  694         ke->ke_cpticks++;
  695         kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
  696         if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
  697                 resetpriority(kg);
  698                 resetpriority_thread(td, kg);
  699         }
  700 }
  701 
  702 /*
  703  * charge childs scheduling cpu usage to parent.
  704  *
  705  * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
  706  * Charge it to the ksegrp that did the wait since process estcpu is sum of
  707  * all ksegrps, this is strictly as expected.  Assume that the child process
  708  * aggregated all the estcpu into the 'built-in' ksegrp.
  709  */
  710 void
  711 sched_exit(struct proc *p, struct thread *td)
  712 {
  713         sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
  714         sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
  715 }
  716 
  717 void
  718 sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd)
  719 {
  720 
  721         mtx_assert(&sched_lock, MA_OWNED);
  722         kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu);
  723 }
  724 
  725 void
  726 sched_exit_thread(struct thread *td, struct thread *child)
  727 {
  728         CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
  729             child, child->td_proc->p_comm, child->td_priority);
  730         if ((child->td_proc->p_flag & P_NOLOAD) == 0)
  731                 sched_load_rem();
  732 }
  733 
  734 void
  735 sched_fork(struct thread *td, struct thread *childtd)
  736 {
  737         sched_fork_ksegrp(td, childtd->td_ksegrp);
  738         sched_fork_thread(td, childtd);
  739 }
  740 
  741 void
  742 sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
  743 {
  744         mtx_assert(&sched_lock, MA_OWNED);
  745         child->kg_estcpu = td->td_ksegrp->kg_estcpu;
  746 }
  747 
  748 void
  749 sched_fork_thread(struct thread *td, struct thread *childtd)
  750 {
  751         sched_newthread(childtd);
  752 }
  753 
  754 void
  755 sched_nice(struct proc *p, int nice)
  756 {
  757         struct ksegrp *kg;
  758         struct thread *td;
  759 
  760         PROC_LOCK_ASSERT(p, MA_OWNED);
  761         mtx_assert(&sched_lock, MA_OWNED);
  762         p->p_nice = nice;
  763         FOREACH_KSEGRP_IN_PROC(p, kg) {
  764                 resetpriority(kg);
  765                 FOREACH_THREAD_IN_GROUP(kg, td) {
  766                         resetpriority_thread(td, kg);
  767                 }
  768         }
  769 }
  770 
  771 void
  772 sched_class(struct ksegrp *kg, int class)
  773 {
  774         mtx_assert(&sched_lock, MA_OWNED);
  775         kg->kg_pri_class = class;
  776 }
  777 
  778 /*
  779  * Adjust the priority of a thread.
  780  * This may include moving the thread within the KSEGRP,
  781  * changing the assignment of a kse to the thread,
  782  * and moving a KSE in the system run queue.
  783  */
  784 static void
  785 sched_priority(struct thread *td, u_char prio)
  786 {
  787         CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
  788             td, td->td_proc->p_comm, td->td_priority, prio, curthread, 
  789             curthread->td_proc->p_comm);
  790 
  791         mtx_assert(&sched_lock, MA_OWNED);
  792         if (td->td_priority == prio)
  793                 return;
  794         if (TD_ON_RUNQ(td)) {
  795                 adjustrunqueue(td, prio);
  796         } else {
  797                 td->td_priority = prio;
  798         }
  799 }
  800 
  801 /*
  802  * Update a thread's priority when it is lent another thread's
  803  * priority.
  804  */
  805 void
  806 sched_lend_prio(struct thread *td, u_char prio)
  807 {
  808 
  809         td->td_flags |= TDF_BORROWING;
  810         sched_priority(td, prio);
  811 }
  812 
  813 /*
  814  * Restore a thread's priority when priority propagation is
  815  * over.  The prio argument is the minimum priority the thread
  816  * needs to have to satisfy other possible priority lending
  817  * requests.  If the thread's regulary priority is less
  818  * important than prio the thread will keep a priority boost
  819  * of prio.
  820  */
  821 void
  822 sched_unlend_prio(struct thread *td, u_char prio)
  823 {
  824         u_char base_pri;
  825 
  826         if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
  827             td->td_base_pri <= PRI_MAX_TIMESHARE)
  828                 base_pri = td->td_ksegrp->kg_user_pri;
  829         else
  830                 base_pri = td->td_base_pri;
  831         if (prio >= base_pri) {
  832                 td->td_flags &= ~TDF_BORROWING;
  833                 sched_prio(td, base_pri);
  834         } else
  835                 sched_lend_prio(td, prio);
  836 }
  837 
  838 void
  839 sched_prio(struct thread *td, u_char prio)
  840 {
  841         u_char oldprio;
  842 
  843         /* First, update the base priority. */
  844         td->td_base_pri = prio;
  845 
  846         /*
  847          * If the thread is borrowing another thread's priority, don't ever
  848          * lower the priority.
  849          */
  850         if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
  851                 return;
  852 
  853         /* Change the real priority. */
  854         oldprio = td->td_priority;
  855         sched_priority(td, prio);
  856 
  857         /*
  858          * If the thread is on a turnstile, then let the turnstile update
  859          * its state.
  860          */
  861         if (TD_ON_LOCK(td) && oldprio != prio)
  862                 turnstile_adjust(td, oldprio);
  863 }
  864 
  865 void
  866 sched_sleep(struct thread *td)
  867 {
  868 
  869         mtx_assert(&sched_lock, MA_OWNED);
  870         td->td_ksegrp->kg_slptime = 0;
  871 }
  872 
  873 static void remrunqueue(struct thread *td);
  874 
  875 void
  876 sched_switch(struct thread *td, struct thread *newtd, int flags)
  877 {
  878         struct kse *ke;
  879         struct ksegrp *kg;
  880         struct proc *p;
  881 
  882         ke = td->td_kse;
  883         p = td->td_proc;
  884 
  885         mtx_assert(&sched_lock, MA_OWNED);
  886 
  887         if ((p->p_flag & P_NOLOAD) == 0)
  888                 sched_load_rem();
  889         /* 
  890          * We are volunteering to switch out so we get to nominate
  891          * a successor for the rest of our quantum
  892          * First try another thread in our ksegrp, and then look for 
  893          * other ksegrps in our process.
  894          */
  895         if (sched_followon &&
  896             (p->p_flag & P_HADTHREADS) &&
  897             (flags & SW_VOL) &&
  898             newtd == NULL) {
  899                 /* lets schedule another thread from this process */
  900                  kg = td->td_ksegrp;
  901                  if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
  902                         remrunqueue(newtd);
  903                         sched_kgfollowons++;
  904                  } else {
  905                         FOREACH_KSEGRP_IN_PROC(p, kg) {
  906                                 if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
  907                                         sched_pfollowons++;
  908                                         remrunqueue(newtd);
  909                                         break;
  910                                 }
  911                         }
  912                 }
  913         }
  914 
  915         if (newtd) 
  916                 newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
  917 
  918         td->td_lastcpu = td->td_oncpu;
  919         td->td_flags &= ~TDF_NEEDRESCHED;
  920         td->td_owepreempt = 0;
  921         td->td_oncpu = NOCPU;
  922         /*
  923          * At the last moment, if this thread is still marked RUNNING,
  924          * then put it back on the run queue as it has not been suspended
  925          * or stopped or any thing else similar.  We never put the idle
  926          * threads on the run queue, however.
  927          */
  928         if (td == PCPU_GET(idlethread))
  929                 TD_SET_CAN_RUN(td);
  930         else {
  931                 SLOT_RELEASE(td->td_ksegrp);
  932                 if (TD_IS_RUNNING(td)) {
  933                         /* Put us back on the run queue (kse and all). */
  934                         setrunqueue(td, (flags & SW_PREEMPT) ?
  935                             SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
  936                             SRQ_OURSELF|SRQ_YIELDING);
  937                 } else if (p->p_flag & P_HADTHREADS) {
  938                         /*
  939                          * We will not be on the run queue. So we must be
  940                          * sleeping or similar. As it's available,
  941                          * someone else can use the KSE if they need it.
  942                          * It's NOT available if we are about to need it
  943                          */
  944                         if (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)
  945                                 slot_fill(td->td_ksegrp);
  946                 }
  947         }
  948         if (newtd) {
  949                 /* 
  950                  * The thread we are about to run needs to be counted
  951                  * as if it had been added to the run queue and selected.
  952                  * It came from:
  953                  * * A preemption
  954                  * * An upcall 
  955                  * * A followon
  956                  */
  957                 KASSERT((newtd->td_inhibitors == 0),
  958                         ("trying to run inhibitted thread"));
  959                 SLOT_USE(newtd->td_ksegrp);
  960                 newtd->td_kse->ke_flags |= KEF_DIDRUN;
  961                 TD_SET_RUNNING(newtd);
  962                 if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
  963                         sched_load_add();
  964         } else {
  965                 newtd = choosethread();
  966         }
  967 
  968         if (td != newtd) {
  969 #ifdef  HWPMC_HOOKS
  970                 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
  971                         PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
  972 #endif
  973                 cpu_switch(td, newtd);
  974 #ifdef  HWPMC_HOOKS
  975                 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
  976                         PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
  977 #endif
  978         }
  979 
  980         sched_lock.mtx_lock = (uintptr_t)td;
  981         td->td_oncpu = PCPU_GET(cpuid);
  982 }
  983 
  984 void
  985 sched_wakeup(struct thread *td)
  986 {
  987         struct ksegrp *kg;
  988 
  989         mtx_assert(&sched_lock, MA_OWNED);
  990         kg = td->td_ksegrp;
  991         if (kg->kg_slptime > 1) {
  992                 updatepri(kg);
  993                 resetpriority(kg);
  994         }
  995         kg->kg_slptime = 0;
  996         setrunqueue(td, SRQ_BORING);
  997 }
  998 
  999 #ifdef SMP
 1000 /* enable HTT_2 if you have a 2-way HTT cpu.*/
 1001 static int
 1002 forward_wakeup(int  cpunum)
 1003 {
 1004         cpumask_t map, me, dontuse;
 1005         cpumask_t map2;
 1006         struct pcpu *pc;
 1007         cpumask_t id, map3;
 1008 
 1009         mtx_assert(&sched_lock, MA_OWNED);
 1010 
 1011         CTR0(KTR_RUNQ, "forward_wakeup()");
 1012 
 1013         if ((!forward_wakeup_enabled) ||
 1014              (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
 1015                 return (0);
 1016         if (!smp_started || cold || panicstr)
 1017                 return (0);
 1018 
 1019         forward_wakeups_requested++;
 1020 
 1021 /*
 1022  * check the idle mask we received against what we calculated before
 1023  * in the old version.
 1024  */
 1025         me = PCPU_GET(cpumask);
 1026         /* 
 1027          * don't bother if we should be doing it ourself..
 1028          */
 1029         if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
 1030                 return (0);
 1031 
 1032         dontuse = me | stopped_cpus | hlt_cpus_mask;
 1033         map3 = 0;
 1034         if (forward_wakeup_use_loop) {
 1035                 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
 1036                         id = pc->pc_cpumask;
 1037                         if ( (id & dontuse) == 0 &&
 1038                             pc->pc_curthread == pc->pc_idlethread) {
 1039                                 map3 |= id;
 1040                         }
 1041                 }
 1042         }
 1043 
 1044         if (forward_wakeup_use_mask) {
 1045                 map = 0;
 1046                 map = idle_cpus_mask & ~dontuse;
 1047 
 1048                 /* If they are both on, compare and use loop if different */
 1049                 if (forward_wakeup_use_loop) {
 1050                         if (map != map3) {
 1051                                 printf("map (%02X) != map3 (%02X)\n",
 1052                                                 map, map3);
 1053                                 map = map3;
 1054                         }
 1055                 }
 1056         } else {
 1057                 map = map3;
 1058         }
 1059         /* If we only allow a specific CPU, then mask off all the others */
 1060         if (cpunum != NOCPU) {
 1061                 KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
 1062                 map &= (1 << cpunum);
 1063         } else {
 1064                 /* Try choose an idle die. */
 1065                 if (forward_wakeup_use_htt) {
 1066                         map2 =  (map & (map >> 1)) & 0x5555;
 1067                         if (map2) {
 1068                                 map = map2;
 1069                         }
 1070                 }
 1071 
 1072                 /* set only one bit */ 
 1073                 if (forward_wakeup_use_single) {
 1074                         map = map & ((~map) + 1);
 1075                 }
 1076         }
 1077         if (map) {
 1078                 forward_wakeups_delivered++;
 1079                 ipi_selected(map, IPI_AST);
 1080                 return (1);
 1081         }
 1082         if (cpunum == NOCPU)
 1083                 printf("forward_wakeup: Idle processor not found\n");
 1084         return (0);
 1085 }
 1086 #endif
 1087 
 1088 #ifdef SMP
 1089 static void kick_other_cpu(int pri,int cpuid);
 1090 
 1091 static void
 1092 kick_other_cpu(int pri,int cpuid)
 1093 {       
 1094         struct pcpu * pcpu = pcpu_find(cpuid);
 1095         int cpri = pcpu->pc_curthread->td_priority;
 1096 
 1097         if (idle_cpus_mask & pcpu->pc_cpumask) {
 1098                 forward_wakeups_delivered++;
 1099                 ipi_selected(pcpu->pc_cpumask, IPI_AST);
 1100                 return;
 1101         }
 1102 
 1103         if (pri >= cpri)
 1104                 return;
 1105 
 1106 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
 1107 #if !defined(FULL_PREEMPTION)
 1108         if (pri <= PRI_MAX_ITHD)
 1109 #endif /* ! FULL_PREEMPTION */
 1110         {
 1111                 ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT);
 1112                 return;
 1113         }
 1114 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
 1115 
 1116         pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
 1117         ipi_selected( pcpu->pc_cpumask , IPI_AST);
 1118         return;
 1119 }
 1120 #endif /* SMP */
 1121 
 1122 void
 1123 sched_add(struct thread *td, int flags)
 1124 #ifdef SMP
 1125 {
 1126         struct kse *ke;
 1127         int forwarded = 0;
 1128         int cpu;
 1129         int single_cpu = 0;
 1130 
 1131         ke = td->td_kse;
 1132         mtx_assert(&sched_lock, MA_OWNED);
 1133         KASSERT(ke->ke_state != KES_ONRUNQ,
 1134             ("sched_add: kse %p (%s) already in run queue", ke,
 1135             ke->ke_proc->p_comm));
 1136         KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
 1137             ("sched_add: process swapped out"));
 1138         CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
 1139             td, td->td_proc->p_comm, td->td_priority, curthread,
 1140             curthread->td_proc->p_comm);
 1141 
 1142 
 1143         if (td->td_pinned != 0) {
 1144                 cpu = td->td_lastcpu;
 1145                 ke->ke_runq = &runq_pcpu[cpu];
 1146                 single_cpu = 1;
 1147                 CTR3(KTR_RUNQ,
 1148                     "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu);
 1149         } else if ((ke)->ke_flags & KEF_BOUND) {
 1150                 /* Find CPU from bound runq */
 1151                 KASSERT(SKE_RUNQ_PCPU(ke),("sched_add: bound kse not on cpu runq"));
 1152                 cpu = ke->ke_runq - &runq_pcpu[0];
 1153                 single_cpu = 1;
 1154                 CTR3(KTR_RUNQ,
 1155                     "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu);
 1156         } else {        
 1157                 CTR2(KTR_RUNQ,
 1158                     "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td);
 1159                 cpu = NOCPU;
 1160                 ke->ke_runq = &runq;
 1161         }
 1162         
 1163         if (single_cpu && (cpu != PCPU_GET(cpuid))) {
 1164                 kick_other_cpu(td->td_priority,cpu);
 1165         } else {
 1166                 
 1167                 if (!single_cpu) {
 1168                         cpumask_t me = PCPU_GET(cpumask);
 1169                         int idle = idle_cpus_mask & me; 
 1170 
 1171                         if (!idle && ((flags & SRQ_INTR) == 0) &&
 1172                             (idle_cpus_mask & ~(hlt_cpus_mask | me)))
 1173                                 forwarded = forward_wakeup(cpu);
 1174                 }
 1175 
 1176                 if (!forwarded) {
 1177                         if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
 1178                                 return;
 1179                         else
 1180                                 maybe_resched(td);
 1181                 }
 1182         }
 1183         
 1184         if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 1185                 sched_load_add();
 1186         SLOT_USE(td->td_ksegrp);
 1187         runq_add(ke->ke_runq, ke, flags);
 1188         ke->ke_state = KES_ONRUNQ;
 1189 }
 1190 #else /* SMP */
 1191 {
 1192         struct kse *ke;
 1193         ke = td->td_kse;
 1194         mtx_assert(&sched_lock, MA_OWNED);
 1195         KASSERT(ke->ke_state != KES_ONRUNQ,
 1196             ("sched_add: kse %p (%s) already in run queue", ke,
 1197             ke->ke_proc->p_comm));
 1198         KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
 1199             ("sched_add: process swapped out"));
 1200         CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
 1201             td, td->td_proc->p_comm, td->td_priority, curthread,
 1202             curthread->td_proc->p_comm);
 1203         CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td);
 1204         ke->ke_runq = &runq;
 1205 
 1206         /* 
 1207          * If we are yielding (on the way out anyhow) 
 1208          * or the thread being saved is US,
 1209          * then don't try be smart about preemption
 1210          * or kicking off another CPU
 1211          * as it won't help and may hinder.
 1212          * In the YIEDLING case, we are about to run whoever is 
 1213          * being put in the queue anyhow, and in the 
 1214          * OURSELF case, we are puting ourself on the run queue
 1215          * which also only happens when we are about to yield.
 1216          */
 1217         if((flags & SRQ_YIELDING) == 0) {
 1218                 if (maybe_preempt(td))
 1219                         return;
 1220         }       
 1221         if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 1222                 sched_load_add();
 1223         SLOT_USE(td->td_ksegrp);
 1224         runq_add(ke->ke_runq, ke, flags);
 1225         ke->ke_state = KES_ONRUNQ;
 1226         maybe_resched(td);
 1227 }
 1228 #endif /* SMP */
 1229 
 1230 void
 1231 sched_rem(struct thread *td)
 1232 {
 1233         struct kse *ke;
 1234 
 1235         ke = td->td_kse;
 1236         KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
 1237             ("sched_rem: process swapped out"));
 1238         KASSERT((ke->ke_state == KES_ONRUNQ),
 1239             ("sched_rem: KSE not on run queue"));
 1240         mtx_assert(&sched_lock, MA_OWNED);
 1241         CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
 1242             td, td->td_proc->p_comm, td->td_priority, curthread,
 1243             curthread->td_proc->p_comm);
 1244 
 1245         if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 1246                 sched_load_rem();
 1247         SLOT_RELEASE(td->td_ksegrp);
 1248         runq_remove(ke->ke_runq, ke);
 1249 
 1250         ke->ke_state = KES_THREAD;
 1251 }
 1252 
 1253 /*
 1254  * Select threads to run.
 1255  * Notice that the running threads still consume a slot.
 1256  */
 1257 struct kse *
 1258 sched_choose(void)
 1259 {
 1260         struct kse *ke;
 1261         struct runq *rq;
 1262 
 1263 #ifdef SMP
 1264         struct kse *kecpu;
 1265 
 1266         rq = &runq;
 1267         ke = runq_choose(&runq);
 1268         kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
 1269 
 1270         if (ke == NULL || 
 1271             (kecpu != NULL && 
 1272              kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) {
 1273                 CTR2(KTR_RUNQ, "choosing kse %p from pcpu runq %d", kecpu,
 1274                      PCPU_GET(cpuid));
 1275                 ke = kecpu;
 1276                 rq = &runq_pcpu[PCPU_GET(cpuid)];
 1277         } else { 
 1278                 CTR1(KTR_RUNQ, "choosing kse %p from main runq", ke);
 1279         }
 1280 
 1281 #else
 1282         rq = &runq;
 1283         ke = runq_choose(&runq);
 1284 #endif
 1285 
 1286         if (ke != NULL) {
 1287                 runq_remove(rq, ke);
 1288                 ke->ke_state = KES_THREAD;
 1289 
 1290                 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
 1291                     ("sched_choose: process swapped out"));
 1292         }
 1293         return (ke);
 1294 }
 1295 
 1296 void
 1297 sched_userret(struct thread *td)
 1298 {
 1299         struct ksegrp *kg;
 1300         /*
 1301          * XXX we cheat slightly on the locking here to avoid locking in
 1302          * the usual case.  Setting td_priority here is essentially an
 1303          * incomplete workaround for not setting it properly elsewhere.
 1304          * Now that some interrupt handlers are threads, not setting it
 1305          * properly elsewhere can clobber it in the window between setting
 1306          * it here and returning to user mode, so don't waste time setting
 1307          * it perfectly here.
 1308          */
 1309         KASSERT((td->td_flags & TDF_BORROWING) == 0,
 1310             ("thread with borrowed priority returning to userland"));
 1311         kg = td->td_ksegrp;
 1312         if (td->td_priority != kg->kg_user_pri) {
 1313                 mtx_lock_spin(&sched_lock);
 1314                 td->td_priority = kg->kg_user_pri;
 1315                 td->td_base_pri = kg->kg_user_pri;
 1316                 mtx_unlock_spin(&sched_lock);
 1317         }
 1318 }
 1319 
 1320 void
 1321 sched_bind(struct thread *td, int cpu)
 1322 {
 1323         struct kse *ke;
 1324 
 1325         mtx_assert(&sched_lock, MA_OWNED);
 1326         KASSERT(TD_IS_RUNNING(td),
 1327             ("sched_bind: cannot bind non-running thread"));
 1328 
 1329         ke = td->td_kse;
 1330 
 1331         ke->ke_flags |= KEF_BOUND;
 1332 #ifdef SMP
 1333         ke->ke_runq = &runq_pcpu[cpu];
 1334         if (PCPU_GET(cpuid) == cpu)
 1335                 return;
 1336 
 1337         ke->ke_state = KES_THREAD;
 1338 
 1339         mi_switch(SW_VOL, NULL);
 1340 #endif
 1341 }
 1342 
 1343 void
 1344 sched_unbind(struct thread* td)
 1345 {
 1346         mtx_assert(&sched_lock, MA_OWNED);
 1347         td->td_kse->ke_flags &= ~KEF_BOUND;
 1348 }
 1349 
 1350 int
 1351 sched_is_bound(struct thread *td)
 1352 {
 1353         mtx_assert(&sched_lock, MA_OWNED);
 1354         return (td->td_kse->ke_flags & KEF_BOUND);
 1355 }
 1356 
 1357 int
 1358 sched_load(void)
 1359 {
 1360         return (sched_tdcnt);
 1361 }
 1362 
 1363 int
 1364 sched_sizeof_ksegrp(void)
 1365 {
 1366         return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
 1367 }
 1368 int
 1369 sched_sizeof_proc(void)
 1370 {
 1371         return (sizeof(struct proc));
 1372 }
 1373 int
 1374 sched_sizeof_thread(void)
 1375 {
 1376         return (sizeof(struct thread) + sizeof(struct kse));
 1377 }
 1378 
 1379 fixpt_t
 1380 sched_pctcpu(struct thread *td)
 1381 {
 1382         struct kse *ke;
 1383 
 1384         ke = td->td_kse;
 1385         return (ke->ke_pctcpu);
 1386 
 1387         return (0);
 1388 }
 1389 #define KERN_SWITCH_INCLUDE 1
 1390 #include "kern/kern_switch.c"

Cache object: fcf4fa5af682bdcb7493548d76945a66


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.