The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/sched_4bsd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. All advertising materials mentioning features or use of this software
   19  *    must display the following acknowledgement:
   20  *      This product includes software developed by the University of
   21  *      California, Berkeley and its contributors.
   22  * 4. Neither the name of the University nor the names of its contributors
   23  *    may be used to endorse or promote products derived from this software
   24  *    without specific prior written permission.
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   36  * SUCH DAMAGE.
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD: releng/5.2/sys/kern/sched_4bsd.c 122355 2003-11-09 13:45:54Z bde $");
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/kernel.h>
   45 #include <sys/ktr.h>
   46 #include <sys/lock.h>
   47 #include <sys/mutex.h>
   48 #include <sys/proc.h>
   49 #include <sys/resourcevar.h>
   50 #include <sys/sched.h>
   51 #include <sys/smp.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/sx.h>
   54 
   55 /*
   56  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
   57  * the range 100-256 Hz (approximately).
   58  */
   59 #define ESTCPULIM(e) \
   60     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
   61     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
   62 #ifdef SMP
   63 #define INVERSE_ESTCPU_WEIGHT   (8 * smp_cpus)
   64 #else
   65 #define INVERSE_ESTCPU_WEIGHT   8       /* 1 / (priorities per estcpu level). */
   66 #endif
   67 #define NICE_WEIGHT             1       /* Priorities per nice level. */
   68 
   69 struct ke_sched {
   70         int     ske_cpticks;    /* (j) Ticks of cpu time. */
   71 };
   72 
   73 static struct ke_sched ke_sched;
   74 
   75 struct ke_sched *kse0_sched = &ke_sched;
   76 struct kg_sched *ksegrp0_sched = NULL;
   77 struct p_sched *proc0_sched = NULL;
   78 struct td_sched *thread0_sched = NULL;
   79 
   80 static int      sched_quantum;  /* Roundrobin scheduling quantum in ticks. */
   81 #define SCHED_QUANTUM   (hz / 10)       /* Default sched quantum */
   82 
   83 static struct callout schedcpu_callout;
   84 static struct callout roundrobin_callout;
   85 
   86 static void     roundrobin(void *arg);
   87 static void     schedcpu(void *arg);
   88 static void     sched_setup(void *dummy);
   89 static void     maybe_resched(struct thread *td);
   90 static void     updatepri(struct ksegrp *kg);
   91 static void     resetpriority(struct ksegrp *kg);
   92 
   93 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
   94 
   95 /*
   96  * Global run queue.
   97  */
   98 static struct runq runq;
   99 SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq)
  100 
  101 static int
  102 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
  103 {
  104         int error, new_val;
  105 
  106         new_val = sched_quantum * tick;
  107         error = sysctl_handle_int(oidp, &new_val, 0, req);
  108         if (error != 0 || req->newptr == NULL)
  109                 return (error);
  110         if (new_val < tick)
  111                 return (EINVAL);
  112         sched_quantum = new_val / tick;
  113         hogticks = 2 * sched_quantum;
  114         return (0);
  115 }
  116 
  117 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
  118         0, sizeof sched_quantum, sysctl_kern_quantum, "I",
  119         "Roundrobin scheduling quantum in microseconds");
  120 
  121 /*
  122  * Arrange to reschedule if necessary, taking the priorities and
  123  * schedulers into account.
  124  */
  125 static void
  126 maybe_resched(struct thread *td)
  127 {
  128 
  129         mtx_assert(&sched_lock, MA_OWNED);
  130         if (td->td_priority < curthread->td_priority && curthread->td_kse)
  131                 curthread->td_flags |= TDF_NEEDRESCHED;
  132 }
  133 
  134 /*
  135  * Force switch among equal priority processes every 100ms.
  136  * We don't actually need to force a context switch of the current process.
  137  * The act of firing the event triggers a context switch to softclock() and
  138  * then switching back out again which is equivalent to a preemption, thus
  139  * no further work is needed on the local CPU.
  140  */
  141 /* ARGSUSED */
  142 static void
  143 roundrobin(void *arg)
  144 {
  145 
  146 #ifdef SMP
  147         mtx_lock_spin(&sched_lock);
  148         forward_roundrobin();
  149         mtx_unlock_spin(&sched_lock);
  150 #endif
  151 
  152         callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
  153 }
  154 
  155 /*
  156  * Constants for digital decay and forget:
  157  *      90% of (kg_estcpu) usage in 5 * loadav time
  158  *      95% of (ke_pctcpu) usage in 60 seconds (load insensitive)
  159  *          Note that, as ps(1) mentions, this can let percentages
  160  *          total over 100% (I've seen 137.9% for 3 processes).
  161  *
  162  * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously.
  163  *
  164  * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds.
  165  * That is, the system wants to compute a value of decay such
  166  * that the following for loop:
  167  *      for (i = 0; i < (5 * loadavg); i++)
  168  *              kg_estcpu *= decay;
  169  * will compute
  170  *      kg_estcpu *= 0.1;
  171  * for all values of loadavg:
  172  *
  173  * Mathematically this loop can be expressed by saying:
  174  *      decay ** (5 * loadavg) ~= .1
  175  *
  176  * The system computes decay as:
  177  *      decay = (2 * loadavg) / (2 * loadavg + 1)
  178  *
  179  * We wish to prove that the system's computation of decay
  180  * will always fulfill the equation:
  181  *      decay ** (5 * loadavg) ~= .1
  182  *
  183  * If we compute b as:
  184  *      b = 2 * loadavg
  185  * then
  186  *      decay = b / (b + 1)
  187  *
  188  * We now need to prove two things:
  189  *      1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
  190  *      2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
  191  *
  192  * Facts:
  193  *         For x close to zero, exp(x) =~ 1 + x, since
  194  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
  195  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
  196  *         For x close to zero, ln(1+x) =~ x, since
  197  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
  198  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
  199  *         ln(.1) =~ -2.30
  200  *
  201  * Proof of (1):
  202  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
  203  *      solving for factor,
  204  *      ln(factor) =~ (-2.30/5*loadav), or
  205  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
  206  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
  207  *
  208  * Proof of (2):
  209  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
  210  *      solving for power,
  211  *      power*ln(b/(b+1)) =~ -2.30, or
  212  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
  213  *
  214  * Actual power values for the implemented algorithm are as follows:
  215  *      loadav: 1       2       3       4
  216  *      power:  5.68    10.32   14.94   19.55
  217  */
  218 
  219 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
  220 #define loadfactor(loadav)      (2 * (loadav))
  221 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
  222 
  223 /* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
  224 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
  225 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
  226 
  227 /*
  228  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
  229  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
  230  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
  231  *
  232  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
  233  *      1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
  234  *
  235  * If you don't want to bother with the faster/more-accurate formula, you
  236  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
  237  * (more general) method of calculating the %age of CPU used by a process.
  238  */
  239 #define CCPU_SHIFT      11
  240 
  241 /*
  242  * Recompute process priorities, every hz ticks.
  243  * MP-safe, called without the Giant mutex.
  244  */
  245 /* ARGSUSED */
  246 static void
  247 schedcpu(void *arg)
  248 {
  249         register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
  250         struct thread *td;
  251         struct proc *p;
  252         struct kse *ke;
  253         struct ksegrp *kg;
  254         int awake, realstathz;
  255 
  256         realstathz = stathz ? stathz : hz;
  257         sx_slock(&allproc_lock);
  258         FOREACH_PROC_IN_SYSTEM(p) {
  259                 /*
  260                  * Prevent state changes and protect run queue.
  261                  */
  262                 mtx_lock_spin(&sched_lock);
  263                 /*
  264                  * Increment time in/out of memory.  We ignore overflow; with
  265                  * 16-bit int's (remember them?) overflow takes 45 days.
  266                  */
  267                 p->p_swtime++;
  268                 FOREACH_KSEGRP_IN_PROC(p, kg) { 
  269                         awake = 0;
  270                         FOREACH_KSE_IN_GROUP(kg, ke) {
  271                                 /*
  272                                  * Increment sleep time (if sleeping).  We
  273                                  * ignore overflow, as above.
  274                                  */
  275                                 /*
  276                                  * The kse slptimes are not touched in wakeup
  277                                  * because the thread may not HAVE a KSE.
  278                                  */
  279                                 if (ke->ke_state == KES_ONRUNQ) {
  280                                         awake = 1;
  281                                         ke->ke_flags &= ~KEF_DIDRUN;
  282                                 } else if ((ke->ke_state == KES_THREAD) &&
  283                                     (TD_IS_RUNNING(ke->ke_thread))) {
  284                                         awake = 1;
  285                                         /* Do not clear KEF_DIDRUN */
  286                                 } else if (ke->ke_flags & KEF_DIDRUN) {
  287                                         awake = 1;
  288                                         ke->ke_flags &= ~KEF_DIDRUN;
  289                                 }
  290 
  291                                 /*
  292                                  * ke_pctcpu is only for ps and ttyinfo().
  293                                  * Do it per kse, and add them up at the end?
  294                                  * XXXKSE
  295                                  */
  296                                 ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
  297                                     FSHIFT;
  298                                 /*
  299                                  * If the kse has been idle the entire second,
  300                                  * stop recalculating its priority until
  301                                  * it wakes up.
  302                                  */
  303                                 if (ke->ke_sched->ske_cpticks == 0)
  304                                         continue;
  305 #if     (FSHIFT >= CCPU_SHIFT)
  306                                 ke->ke_pctcpu += (realstathz == 100)
  307                                     ? ((fixpt_t) ke->ke_sched->ske_cpticks) <<
  308                                     (FSHIFT - CCPU_SHIFT) :
  309                                     100 * (((fixpt_t) ke->ke_sched->ske_cpticks)
  310                                     << (FSHIFT - CCPU_SHIFT)) / realstathz;
  311 #else
  312                                 ke->ke_pctcpu += ((FSCALE - ccpu) *
  313                                     (ke->ke_sched->ske_cpticks *
  314                                     FSCALE / realstathz)) >> FSHIFT;
  315 #endif
  316                                 ke->ke_sched->ske_cpticks = 0;
  317                         } /* end of kse loop */
  318                         /* 
  319                          * If there are ANY running threads in this KSEGRP,
  320                          * then don't count it as sleeping.
  321                          */
  322                         if (awake) {
  323                                 if (kg->kg_slptime > 1) {
  324                                         /*
  325                                          * In an ideal world, this should not
  326                                          * happen, because whoever woke us
  327                                          * up from the long sleep should have
  328                                          * unwound the slptime and reset our
  329                                          * priority before we run at the stale
  330                                          * priority.  Should KASSERT at some
  331                                          * point when all the cases are fixed.
  332                                          */
  333                                         updatepri(kg);
  334                                 }
  335                                 kg->kg_slptime = 0;
  336                         } else
  337                                 kg->kg_slptime++;
  338                         if (kg->kg_slptime > 1)
  339                                 continue;
  340                         kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
  341                         resetpriority(kg);
  342                         FOREACH_THREAD_IN_GROUP(kg, td) {
  343                                 if (td->td_priority >= PUSER) {
  344                                         sched_prio(td, kg->kg_user_pri);
  345                                 }
  346                         }
  347                 } /* end of ksegrp loop */
  348                 mtx_unlock_spin(&sched_lock);
  349         } /* end of process loop */
  350         sx_sunlock(&allproc_lock);
  351         callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
  352 }
  353 
  354 /*
  355  * Recalculate the priority of a process after it has slept for a while.
  356  * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at
  357  * least six times the loadfactor will decay kg_estcpu to zero.
  358  */
  359 static void
  360 updatepri(struct ksegrp *kg)
  361 {
  362         register fixpt_t loadfac;
  363         register unsigned int newcpu;
  364 
  365         loadfac = loadfactor(averunnable.ldavg[0]);
  366         if (kg->kg_slptime > 5 * loadfac)
  367                 kg->kg_estcpu = 0;
  368         else {
  369                 newcpu = kg->kg_estcpu;
  370                 kg->kg_slptime--;       /* was incremented in schedcpu() */
  371                 while (newcpu && --kg->kg_slptime)
  372                         newcpu = decay_cpu(loadfac, newcpu);
  373                 kg->kg_estcpu = newcpu;
  374         }
  375         resetpriority(kg);
  376 }
  377 
  378 /*
  379  * Compute the priority of a process when running in user mode.
  380  * Arrange to reschedule if the resulting priority is better
  381  * than that of the current process.
  382  */
  383 static void
  384 resetpriority(struct ksegrp *kg)
  385 {
  386         register unsigned int newpriority;
  387         struct thread *td;
  388 
  389         if (kg->kg_pri_class == PRI_TIMESHARE) {
  390                 newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
  391                     NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
  392                 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
  393                     PRI_MAX_TIMESHARE);
  394                 kg->kg_user_pri = newpriority;
  395         }
  396         FOREACH_THREAD_IN_GROUP(kg, td) {
  397                 maybe_resched(td);                      /* XXXKSE silly */
  398         }
  399 }
  400 
  401 /* ARGSUSED */
  402 static void
  403 sched_setup(void *dummy)
  404 {
  405 
  406         if (sched_quantum == 0)
  407                 sched_quantum = SCHED_QUANTUM;
  408         hogticks = 2 * sched_quantum;
  409 
  410         callout_init(&schedcpu_callout, CALLOUT_MPSAFE);
  411         callout_init(&roundrobin_callout, 0);
  412 
  413         /* Kick off timeout driven events by calling first time. */
  414         roundrobin(NULL);
  415         schedcpu(NULL);
  416 }
  417 
  418 /* External interfaces start here */
  419 int
  420 sched_runnable(void)
  421 {
  422         return runq_check(&runq);
  423 }
  424 
  425 int 
  426 sched_rr_interval(void)
  427 {
  428         if (sched_quantum == 0)
  429                 sched_quantum = SCHED_QUANTUM;
  430         return (sched_quantum);
  431 }
  432 
  433 /*
  434  * We adjust the priority of the current process.  The priority of
  435  * a process gets worse as it accumulates CPU time.  The cpu usage
  436  * estimator (kg_estcpu) is increased here.  resetpriority() will
  437  * compute a different priority each time kg_estcpu increases by
  438  * INVERSE_ESTCPU_WEIGHT
  439  * (until MAXPRI is reached).  The cpu usage estimator ramps up
  440  * quite quickly when the process is running (linearly), and decays
  441  * away exponentially, at a rate which is proportionally slower when
  442  * the system is busy.  The basic principle is that the system will
  443  * 90% forget that the process used a lot of CPU time in 5 * loadav
  444  * seconds.  This causes the system to favor processes which haven't
  445  * run much recently, and to round-robin among other processes.
  446  */
  447 void
  448 sched_clock(struct thread *td)
  449 {
  450         struct ksegrp *kg;
  451         struct kse *ke;
  452 
  453         mtx_assert(&sched_lock, MA_OWNED);
  454         kg = td->td_ksegrp;
  455         ke = td->td_kse;
  456 
  457         ke->ke_sched->ske_cpticks++;
  458         kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
  459         if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
  460                 resetpriority(kg);
  461                 if (td->td_priority >= PUSER)
  462                         td->td_priority = kg->kg_user_pri;
  463         }
  464 }
  465 
  466 /*
  467  * charge childs scheduling cpu usage to parent.
  468  *
  469  * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
  470  * Charge it to the ksegrp that did the wait since process estcpu is sum of
  471  * all ksegrps, this is strictly as expected.  Assume that the child process
  472  * aggregated all the estcpu into the 'built-in' ksegrp.
  473  */
  474 void
  475 sched_exit(struct proc *p, struct proc *p1)
  476 {
  477         sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
  478         sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
  479         sched_exit_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
  480 }
  481 
  482 void
  483 sched_exit_kse(struct kse *ke, struct kse *child)
  484 {
  485 }
  486 
  487 void
  488 sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
  489 {
  490 
  491         mtx_assert(&sched_lock, MA_OWNED);
  492         kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu);
  493 }
  494 
  495 void
  496 sched_exit_thread(struct thread *td, struct thread *child)
  497 {
  498 }
  499 
  500 void
  501 sched_fork(struct proc *p, struct proc *p1)
  502 {
  503         sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
  504         sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
  505         sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
  506 }
  507 
  508 void
  509 sched_fork_kse(struct kse *ke, struct kse *child)
  510 {
  511         child->ke_sched->ske_cpticks = 0;
  512 }
  513 
  514 void
  515 sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
  516 {
  517         mtx_assert(&sched_lock, MA_OWNED);
  518         child->kg_estcpu = kg->kg_estcpu;
  519 }
  520 
  521 void
  522 sched_fork_thread(struct thread *td, struct thread *child)
  523 {
  524 }
  525 
  526 void
  527 sched_nice(struct ksegrp *kg, int nice)
  528 {
  529 
  530         PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
  531         mtx_assert(&sched_lock, MA_OWNED);
  532         kg->kg_nice = nice;
  533         resetpriority(kg);
  534 }
  535 
  536 void
  537 sched_class(struct ksegrp *kg, int class)
  538 {
  539         mtx_assert(&sched_lock, MA_OWNED);
  540         kg->kg_pri_class = class;
  541 }
  542 
  543 /*
  544  * Adjust the priority of a thread.
  545  * This may include moving the thread within the KSEGRP,
  546  * changing the assignment of a kse to the thread,
  547  * and moving a KSE in the system run queue.
  548  */
  549 void
  550 sched_prio(struct thread *td, u_char prio)
  551 {
  552 
  553         mtx_assert(&sched_lock, MA_OWNED);
  554         if (TD_ON_RUNQ(td)) {
  555                 adjustrunqueue(td, prio);
  556         } else {
  557                 td->td_priority = prio;
  558         }
  559 }
  560 
  561 void
  562 sched_sleep(struct thread *td, u_char prio)
  563 {
  564 
  565         mtx_assert(&sched_lock, MA_OWNED);
  566         td->td_ksegrp->kg_slptime = 0;
  567         td->td_priority = prio;
  568 }
  569 
  570 void
  571 sched_switch(struct thread *td)
  572 {
  573         struct thread *newtd;
  574         struct kse *ke;
  575         struct proc *p;
  576 
  577         ke = td->td_kse;
  578         p = td->td_proc;
  579 
  580         mtx_assert(&sched_lock, MA_OWNED);
  581         KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
  582 
  583         td->td_lastcpu = td->td_oncpu;
  584         td->td_last_kse = ke;
  585         td->td_oncpu = NOCPU;
  586         td->td_flags &= ~TDF_NEEDRESCHED;
  587         /*
  588          * At the last moment, if this thread is still marked RUNNING,
  589          * then put it back on the run queue as it has not been suspended
  590          * or stopped or any thing else similar.
  591          */
  592         if (TD_IS_RUNNING(td)) {
  593                 /* Put us back on the run queue (kse and all). */
  594                 setrunqueue(td);
  595         } else if (p->p_flag & P_SA) {
  596                 /*
  597                  * We will not be on the run queue. So we must be
  598                  * sleeping or similar. As it's available,
  599                  * someone else can use the KSE if they need it.
  600                  */
  601                 kse_reassign(ke);
  602         }
  603         newtd = choosethread();
  604         if (td != newtd)
  605                 cpu_switch(td, newtd);
  606         sched_lock.mtx_lock = (uintptr_t)td;
  607         td->td_oncpu = PCPU_GET(cpuid);
  608 }
  609 
  610 void
  611 sched_wakeup(struct thread *td)
  612 {
  613         struct ksegrp *kg;
  614 
  615         mtx_assert(&sched_lock, MA_OWNED);
  616         kg = td->td_ksegrp;
  617         if (kg->kg_slptime > 1)
  618                 updatepri(kg);
  619         kg->kg_slptime = 0;
  620         setrunqueue(td);
  621         maybe_resched(td);
  622 }
  623 
  624 void
  625 sched_add(struct thread *td)
  626 {
  627         struct kse *ke;
  628 
  629         ke = td->td_kse;
  630         mtx_assert(&sched_lock, MA_OWNED);
  631         KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
  632         KASSERT((ke->ke_thread->td_kse != NULL),
  633             ("runq_add: No KSE on thread"));
  634         KASSERT(ke->ke_state != KES_ONRUNQ,
  635             ("runq_add: kse %p (%s) already in run queue", ke,
  636             ke->ke_proc->p_comm));
  637         KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
  638             ("runq_add: process swapped out"));
  639         ke->ke_ksegrp->kg_runq_kses++;
  640         ke->ke_state = KES_ONRUNQ;
  641 
  642         runq_add(&runq, ke);
  643 }
  644 
  645 void
  646 sched_rem(struct thread *td)
  647 {
  648         struct kse *ke;
  649 
  650         ke = td->td_kse;
  651         KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
  652             ("runq_remove: process swapped out"));
  653         KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
  654         mtx_assert(&sched_lock, MA_OWNED);
  655 
  656         runq_remove(&runq, ke);
  657         ke->ke_state = KES_THREAD;
  658         ke->ke_ksegrp->kg_runq_kses--;
  659 }
  660 
  661 struct kse *
  662 sched_choose(void)
  663 {
  664         struct kse *ke;
  665 
  666         ke = runq_choose(&runq);
  667 
  668         if (ke != NULL) {
  669                 runq_remove(&runq, ke);
  670                 ke->ke_state = KES_THREAD;
  671 
  672                 KASSERT((ke->ke_thread != NULL),
  673                     ("runq_choose: No thread on KSE"));
  674                 KASSERT((ke->ke_thread->td_kse != NULL),
  675                     ("runq_choose: No KSE on thread"));
  676                 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
  677                     ("runq_choose: process swapped out"));
  678         }
  679         return (ke);
  680 }
  681 
  682 void
  683 sched_userret(struct thread *td)
  684 {
  685         struct ksegrp *kg;
  686         /*
  687          * XXX we cheat slightly on the locking here to avoid locking in
  688          * the usual case.  Setting td_priority here is essentially an
  689          * incomplete workaround for not setting it properly elsewhere.
  690          * Now that some interrupt handlers are threads, not setting it
  691          * properly elsewhere can clobber it in the window between setting
  692          * it here and returning to user mode, so don't waste time setting
  693          * it perfectly here.
  694          */
  695         kg = td->td_ksegrp;
  696         if (td->td_priority != kg->kg_user_pri) {
  697                 mtx_lock_spin(&sched_lock);
  698                 td->td_priority = kg->kg_user_pri;
  699                 mtx_unlock_spin(&sched_lock);
  700         }
  701 }
  702 
  703 int
  704 sched_sizeof_kse(void)
  705 {
  706         return (sizeof(struct kse) + sizeof(struct ke_sched));
  707 }
  708 int
  709 sched_sizeof_ksegrp(void)
  710 {
  711         return (sizeof(struct ksegrp));
  712 }
  713 int
  714 sched_sizeof_proc(void)
  715 {
  716         return (sizeof(struct proc));
  717 }
  718 int
  719 sched_sizeof_thread(void)
  720 {
  721         return (sizeof(struct thread));
  722 }
  723 
  724 fixpt_t
  725 sched_pctcpu(struct thread *td)
  726 {
  727         struct kse *ke;
  728 
  729         ke = td->td_kse;
  730         if (ke == NULL)
  731                 ke = td->td_last_kse;
  732         if (ke)
  733                 return (ke->ke_pctcpu);
  734 
  735         return (0);
  736 }

Cache object: 9756184873834b727e684c3d080ead02


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.