The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/sched_4bsd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * (c) UNIX System Laboratories, Inc.
    5  * All or some portions of this file are derived from material licensed
    6  * to the University of California by American Telephone and Telegraph
    7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
    8  * the permission of UNIX System Laboratories, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/10.1/sys/kern/sched_4bsd.c 271194 2014-09-06 15:26:38Z mav $");
   37 
   38 #include "opt_hwpmc_hooks.h"
   39 #include "opt_sched.h"
   40 #include "opt_kdtrace.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/cpuset.h>
   45 #include <sys/kernel.h>
   46 #include <sys/ktr.h>
   47 #include <sys/lock.h>
   48 #include <sys/kthread.h>
   49 #include <sys/mutex.h>
   50 #include <sys/proc.h>
   51 #include <sys/resourcevar.h>
   52 #include <sys/sched.h>
   53 #include <sys/sdt.h>
   54 #include <sys/smp.h>
   55 #include <sys/sysctl.h>
   56 #include <sys/sx.h>
   57 #include <sys/turnstile.h>
   58 #include <sys/umtx.h>
   59 #include <machine/pcb.h>
   60 #include <machine/smp.h>
   61 
   62 #ifdef HWPMC_HOOKS
   63 #include <sys/pmckern.h>
   64 #endif
   65 
   66 #ifdef KDTRACE_HOOKS
   67 #include <sys/dtrace_bsd.h>
   68 int                             dtrace_vtime_active;
   69 dtrace_vtime_switch_func_t      dtrace_vtime_switch_func;
   70 #endif
   71 
   72 /*
   73  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
   74  * the range 100-256 Hz (approximately).
   75  */
   76 #define ESTCPULIM(e) \
   77     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
   78     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
   79 #ifdef SMP
   80 #define INVERSE_ESTCPU_WEIGHT   (8 * smp_cpus)
   81 #else
   82 #define INVERSE_ESTCPU_WEIGHT   8       /* 1 / (priorities per estcpu level). */
   83 #endif
   84 #define NICE_WEIGHT             1       /* Priorities per nice level. */
   85 
   86 #define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
   87 
   88 /*
   89  * The schedulable entity that runs a context.
   90  * This is  an extension to the thread structure and is tailored to
   91  * the requirements of this scheduler
   92  */
   93 struct td_sched {
   94         fixpt_t         ts_pctcpu;      /* (j) %cpu during p_swtime. */
   95         int             ts_cpticks;     /* (j) Ticks of cpu time. */
   96         int             ts_slptime;     /* (j) Seconds !RUNNING. */
   97         int             ts_slice;       /* Remaining part of time slice. */
   98         int             ts_flags;
   99         struct runq     *ts_runq;       /* runq the thread is currently on */
  100 #ifdef KTR
  101         char            ts_name[TS_NAME_LEN];
  102 #endif
  103 };
  104 
  105 /* flags kept in td_flags */
  106 #define TDF_DIDRUN      TDF_SCHED0      /* thread actually ran. */
  107 #define TDF_BOUND       TDF_SCHED1      /* Bound to one CPU. */
  108 #define TDF_SLICEEND    TDF_SCHED2      /* Thread time slice is over. */
  109 
  110 /* flags kept in ts_flags */
  111 #define TSF_AFFINITY    0x0001          /* Has a non-"full" CPU set. */
  112 
  113 #define SKE_RUNQ_PCPU(ts)                                               \
  114     ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
  115 
  116 #define THREAD_CAN_SCHED(td, cpu)       \
  117     CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
  118 
  119 static struct td_sched td_sched0;
  120 struct mtx sched_lock;
  121 
  122 static int      realstathz = 127; /* stathz is sometimes 0 and run off of hz. */
  123 static int      sched_tdcnt;    /* Total runnable threads in the system. */
  124 static int      sched_slice = 12; /* Thread run time before rescheduling. */
  125 
  126 static void     setup_runqs(void);
  127 static void     schedcpu(void);
  128 static void     schedcpu_thread(void);
  129 static void     sched_priority(struct thread *td, u_char prio);
  130 static void     sched_setup(void *dummy);
  131 static void     maybe_resched(struct thread *td);
  132 static void     updatepri(struct thread *td);
  133 static void     resetpriority(struct thread *td);
  134 static void     resetpriority_thread(struct thread *td);
  135 #ifdef SMP
  136 static int      sched_pickcpu(struct thread *td);
  137 static int      forward_wakeup(int cpunum);
  138 static void     kick_other_cpu(int pri, int cpuid);
  139 #endif
  140 
  141 static struct kproc_desc sched_kp = {
  142         "schedcpu",
  143         schedcpu_thread,
  144         NULL
  145 };
  146 SYSINIT(schedcpu, SI_SUB_LAST, SI_ORDER_FIRST, kproc_start,
  147     &sched_kp);
  148 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
  149 
  150 static void sched_initticks(void *dummy);
  151 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
  152     NULL);
  153 
  154 /*
  155  * Global run queue.
  156  */
  157 static struct runq runq;
  158 
  159 #ifdef SMP
  160 /*
  161  * Per-CPU run queues
  162  */
  163 static struct runq runq_pcpu[MAXCPU];
  164 long runq_length[MAXCPU];
  165 
  166 static cpuset_t idle_cpus_mask;
  167 #endif
  168 
  169 struct pcpuidlestat {
  170         u_int idlecalls;
  171         u_int oldidlecalls;
  172 };
  173 static DPCPU_DEFINE(struct pcpuidlestat, idlestat);
  174 
  175 static void
  176 setup_runqs(void)
  177 {
  178 #ifdef SMP
  179         int i;
  180 
  181         for (i = 0; i < MAXCPU; ++i)
  182                 runq_init(&runq_pcpu[i]);
  183 #endif
  184 
  185         runq_init(&runq);
  186 }
  187 
  188 static int
  189 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
  190 {
  191         int error, new_val, period;
  192 
  193         period = 1000000 / realstathz;
  194         new_val = period * sched_slice;
  195         error = sysctl_handle_int(oidp, &new_val, 0, req);
  196         if (error != 0 || req->newptr == NULL)
  197                 return (error);
  198         if (new_val <= 0)
  199                 return (EINVAL);
  200         sched_slice = imax(1, (new_val + period / 2) / period);
  201         hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
  202             realstathz);
  203         return (0);
  204 }
  205 
  206 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
  207 
  208 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
  209     "Scheduler name");
  210 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
  211     NULL, 0, sysctl_kern_quantum, "I",
  212     "Quantum for timeshare threads in microseconds");
  213 SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
  214     "Quantum for timeshare threads in stathz ticks");
  215 #ifdef SMP
  216 /* Enable forwarding of wakeups to all other cpus */
  217 static SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL,
  218     "Kernel SMP");
  219 
  220 static int runq_fuzz = 1;
  221 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
  222 
  223 static int forward_wakeup_enabled = 1;
  224 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
  225            &forward_wakeup_enabled, 0,
  226            "Forwarding of wakeup to idle CPUs");
  227 
  228 static int forward_wakeups_requested = 0;
  229 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
  230            &forward_wakeups_requested, 0,
  231            "Requests for Forwarding of wakeup to idle CPUs");
  232 
  233 static int forward_wakeups_delivered = 0;
  234 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
  235            &forward_wakeups_delivered, 0,
  236            "Completed Forwarding of wakeup to idle CPUs");
  237 
  238 static int forward_wakeup_use_mask = 1;
  239 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
  240            &forward_wakeup_use_mask, 0,
  241            "Use the mask of idle cpus");
  242 
  243 static int forward_wakeup_use_loop = 0;
  244 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
  245            &forward_wakeup_use_loop, 0,
  246            "Use a loop to find idle cpus");
  247 
  248 #endif
  249 #if 0
  250 static int sched_followon = 0;
  251 SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
  252            &sched_followon, 0,
  253            "allow threads to share a quantum");
  254 #endif
  255 
  256 SDT_PROVIDER_DEFINE(sched);
  257 
  258 SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *", 
  259     "struct proc *", "uint8_t");
  260 SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *", 
  261     "struct proc *", "void *");
  262 SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *", 
  263     "struct proc *", "void *", "int");
  264 SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *", 
  265     "struct proc *", "uint8_t", "struct thread *");
  266 SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int");
  267 SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *",
  268     "struct proc *");
  269 SDT_PROBE_DEFINE(sched, , , on__cpu);
  270 SDT_PROBE_DEFINE(sched, , , remain__cpu);
  271 SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *",
  272     "struct proc *");
  273 
  274 static __inline void
  275 sched_load_add(void)
  276 {
  277 
  278         sched_tdcnt++;
  279         KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
  280         SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt);
  281 }
  282 
  283 static __inline void
  284 sched_load_rem(void)
  285 {
  286 
  287         sched_tdcnt--;
  288         KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
  289         SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt);
  290 }
  291 /*
  292  * Arrange to reschedule if necessary, taking the priorities and
  293  * schedulers into account.
  294  */
  295 static void
  296 maybe_resched(struct thread *td)
  297 {
  298 
  299         THREAD_LOCK_ASSERT(td, MA_OWNED);
  300         if (td->td_priority < curthread->td_priority)
  301                 curthread->td_flags |= TDF_NEEDRESCHED;
  302 }
  303 
  304 /*
  305  * This function is called when a thread is about to be put on run queue
  306  * because it has been made runnable or its priority has been adjusted.  It
  307  * determines if the new thread should be immediately preempted to.  If so,
  308  * it switches to it and eventually returns true.  If not, it returns false
  309  * so that the caller may place the thread on an appropriate run queue.
  310  */
  311 int
  312 maybe_preempt(struct thread *td)
  313 {
  314 #ifdef PREEMPTION
  315         struct thread *ctd;
  316         int cpri, pri;
  317 
  318         /*
  319          * The new thread should not preempt the current thread if any of the
  320          * following conditions are true:
  321          *
  322          *  - The kernel is in the throes of crashing (panicstr).
  323          *  - The current thread has a higher (numerically lower) or
  324          *    equivalent priority.  Note that this prevents curthread from
  325          *    trying to preempt to itself.
  326          *  - It is too early in the boot for context switches (cold is set).
  327          *  - The current thread has an inhibitor set or is in the process of
  328          *    exiting.  In this case, the current thread is about to switch
  329          *    out anyways, so there's no point in preempting.  If we did,
  330          *    the current thread would not be properly resumed as well, so
  331          *    just avoid that whole landmine.
  332          *  - If the new thread's priority is not a realtime priority and
  333          *    the current thread's priority is not an idle priority and
  334          *    FULL_PREEMPTION is disabled.
  335          *
  336          * If all of these conditions are false, but the current thread is in
  337          * a nested critical section, then we have to defer the preemption
  338          * until we exit the critical section.  Otherwise, switch immediately
  339          * to the new thread.
  340          */
  341         ctd = curthread;
  342         THREAD_LOCK_ASSERT(td, MA_OWNED);
  343         KASSERT((td->td_inhibitors == 0),
  344                         ("maybe_preempt: trying to run inhibited thread"));
  345         pri = td->td_priority;
  346         cpri = ctd->td_priority;
  347         if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
  348             TD_IS_INHIBITED(ctd))
  349                 return (0);
  350 #ifndef FULL_PREEMPTION
  351         if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
  352                 return (0);
  353 #endif
  354 
  355         if (ctd->td_critnest > 1) {
  356                 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
  357                     ctd->td_critnest);
  358                 ctd->td_owepreempt = 1;
  359                 return (0);
  360         }
  361         /*
  362          * Thread is runnable but not yet put on system run queue.
  363          */
  364         MPASS(ctd->td_lock == td->td_lock);
  365         MPASS(TD_ON_RUNQ(td));
  366         TD_SET_RUNNING(td);
  367         CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
  368             td->td_proc->p_pid, td->td_name);
  369         mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td);
  370         /*
  371          * td's lock pointer may have changed.  We have to return with it
  372          * locked.
  373          */
  374         spinlock_enter();
  375         thread_unlock(ctd);
  376         thread_lock(td);
  377         spinlock_exit();
  378         return (1);
  379 #else
  380         return (0);
  381 #endif
  382 }
  383 
  384 /*
  385  * Constants for digital decay and forget:
  386  *      90% of (td_estcpu) usage in 5 * loadav time
  387  *      95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
  388  *          Note that, as ps(1) mentions, this can let percentages
  389  *          total over 100% (I've seen 137.9% for 3 processes).
  390  *
  391  * Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
  392  *
  393  * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
  394  * That is, the system wants to compute a value of decay such
  395  * that the following for loop:
  396  *      for (i = 0; i < (5 * loadavg); i++)
  397  *              td_estcpu *= decay;
  398  * will compute
  399  *      td_estcpu *= 0.1;
  400  * for all values of loadavg:
  401  *
  402  * Mathematically this loop can be expressed by saying:
  403  *      decay ** (5 * loadavg) ~= .1
  404  *
  405  * The system computes decay as:
  406  *      decay = (2 * loadavg) / (2 * loadavg + 1)
  407  *
  408  * We wish to prove that the system's computation of decay
  409  * will always fulfill the equation:
  410  *      decay ** (5 * loadavg) ~= .1
  411  *
  412  * If we compute b as:
  413  *      b = 2 * loadavg
  414  * then
  415  *      decay = b / (b + 1)
  416  *
  417  * We now need to prove two things:
  418  *      1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
  419  *      2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
  420  *
  421  * Facts:
  422  *         For x close to zero, exp(x) =~ 1 + x, since
  423  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
  424  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
  425  *         For x close to zero, ln(1+x) =~ x, since
  426  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
  427  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
  428  *         ln(.1) =~ -2.30
  429  *
  430  * Proof of (1):
  431  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
  432  *      solving for factor,
  433  *      ln(factor) =~ (-2.30/5*loadav), or
  434  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
  435  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
  436  *
  437  * Proof of (2):
  438  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
  439  *      solving for power,
  440  *      power*ln(b/(b+1)) =~ -2.30, or
  441  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
  442  *
  443  * Actual power values for the implemented algorithm are as follows:
  444  *      loadav: 1       2       3       4
  445  *      power:  5.68    10.32   14.94   19.55
  446  */
  447 
  448 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
  449 #define loadfactor(loadav)      (2 * (loadav))
  450 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
  451 
  452 /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
  453 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
  454 SYSCTL_UINT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
  455 
  456 /*
  457  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
  458  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
  459  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
  460  *
  461  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
  462  *      1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
  463  *
  464  * If you don't want to bother with the faster/more-accurate formula, you
  465  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
  466  * (more general) method of calculating the %age of CPU used by a process.
  467  */
  468 #define CCPU_SHIFT      11
  469 
  470 /*
  471  * Recompute process priorities, every hz ticks.
  472  * MP-safe, called without the Giant mutex.
  473  */
  474 /* ARGSUSED */
  475 static void
  476 schedcpu(void)
  477 {
  478         register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
  479         struct thread *td;
  480         struct proc *p;
  481         struct td_sched *ts;
  482         int awake;
  483 
  484         sx_slock(&allproc_lock);
  485         FOREACH_PROC_IN_SYSTEM(p) {
  486                 PROC_LOCK(p);
  487                 if (p->p_state == PRS_NEW) {
  488                         PROC_UNLOCK(p);
  489                         continue;
  490                 }
  491                 FOREACH_THREAD_IN_PROC(p, td) {
  492                         awake = 0;
  493                         thread_lock(td);
  494                         ts = td->td_sched;
  495                         /*
  496                          * Increment sleep time (if sleeping).  We
  497                          * ignore overflow, as above.
  498                          */
  499                         /*
  500                          * The td_sched slptimes are not touched in wakeup
  501                          * because the thread may not HAVE everything in
  502                          * memory? XXX I think this is out of date.
  503                          */
  504                         if (TD_ON_RUNQ(td)) {
  505                                 awake = 1;
  506                                 td->td_flags &= ~TDF_DIDRUN;
  507                         } else if (TD_IS_RUNNING(td)) {
  508                                 awake = 1;
  509                                 /* Do not clear TDF_DIDRUN */
  510                         } else if (td->td_flags & TDF_DIDRUN) {
  511                                 awake = 1;
  512                                 td->td_flags &= ~TDF_DIDRUN;
  513                         }
  514 
  515                         /*
  516                          * ts_pctcpu is only for ps and ttyinfo().
  517                          */
  518                         ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
  519                         /*
  520                          * If the td_sched has been idle the entire second,
  521                          * stop recalculating its priority until
  522                          * it wakes up.
  523                          */
  524                         if (ts->ts_cpticks != 0) {
  525 #if     (FSHIFT >= CCPU_SHIFT)
  526                                 ts->ts_pctcpu += (realstathz == 100)
  527                                     ? ((fixpt_t) ts->ts_cpticks) <<
  528                                     (FSHIFT - CCPU_SHIFT) :
  529                                     100 * (((fixpt_t) ts->ts_cpticks)
  530                                     << (FSHIFT - CCPU_SHIFT)) / realstathz;
  531 #else
  532                                 ts->ts_pctcpu += ((FSCALE - ccpu) *
  533                                     (ts->ts_cpticks *
  534                                     FSCALE / realstathz)) >> FSHIFT;
  535 #endif
  536                                 ts->ts_cpticks = 0;
  537                         }
  538                         /*
  539                          * If there are ANY running threads in this process,
  540                          * then don't count it as sleeping.
  541                          * XXX: this is broken.
  542                          */
  543                         if (awake) {
  544                                 if (ts->ts_slptime > 1) {
  545                                         /*
  546                                          * In an ideal world, this should not
  547                                          * happen, because whoever woke us
  548                                          * up from the long sleep should have
  549                                          * unwound the slptime and reset our
  550                                          * priority before we run at the stale
  551                                          * priority.  Should KASSERT at some
  552                                          * point when all the cases are fixed.
  553                                          */
  554                                         updatepri(td);
  555                                 }
  556                                 ts->ts_slptime = 0;
  557                         } else
  558                                 ts->ts_slptime++;
  559                         if (ts->ts_slptime > 1) {
  560                                 thread_unlock(td);
  561                                 continue;
  562                         }
  563                         td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
  564                         resetpriority(td);
  565                         resetpriority_thread(td);
  566                         thread_unlock(td);
  567                 }
  568                 PROC_UNLOCK(p);
  569         }
  570         sx_sunlock(&allproc_lock);
  571 }
  572 
  573 /*
  574  * Main loop for a kthread that executes schedcpu once a second.
  575  */
  576 static void
  577 schedcpu_thread(void)
  578 {
  579 
  580         for (;;) {
  581                 schedcpu();
  582                 pause("-", hz);
  583         }
  584 }
  585 
  586 /*
  587  * Recalculate the priority of a process after it has slept for a while.
  588  * For all load averages >= 1 and max td_estcpu of 255, sleeping for at
  589  * least six times the loadfactor will decay td_estcpu to zero.
  590  */
  591 static void
  592 updatepri(struct thread *td)
  593 {
  594         struct td_sched *ts;
  595         fixpt_t loadfac;
  596         unsigned int newcpu;
  597 
  598         ts = td->td_sched;
  599         loadfac = loadfactor(averunnable.ldavg[0]);
  600         if (ts->ts_slptime > 5 * loadfac)
  601                 td->td_estcpu = 0;
  602         else {
  603                 newcpu = td->td_estcpu;
  604                 ts->ts_slptime--;       /* was incremented in schedcpu() */
  605                 while (newcpu && --ts->ts_slptime)
  606                         newcpu = decay_cpu(loadfac, newcpu);
  607                 td->td_estcpu = newcpu;
  608         }
  609 }
  610 
  611 /*
  612  * Compute the priority of a process when running in user mode.
  613  * Arrange to reschedule if the resulting priority is better
  614  * than that of the current process.
  615  */
  616 static void
  617 resetpriority(struct thread *td)
  618 {
  619         register unsigned int newpriority;
  620 
  621         if (td->td_pri_class == PRI_TIMESHARE) {
  622                 newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
  623                     NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
  624                 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
  625                     PRI_MAX_TIMESHARE);
  626                 sched_user_prio(td, newpriority);
  627         }
  628 }
  629 
  630 /*
  631  * Update the thread's priority when the associated process's user
  632  * priority changes.
  633  */
  634 static void
  635 resetpriority_thread(struct thread *td)
  636 {
  637 
  638         /* Only change threads with a time sharing user priority. */
  639         if (td->td_priority < PRI_MIN_TIMESHARE ||
  640             td->td_priority > PRI_MAX_TIMESHARE)
  641                 return;
  642 
  643         /* XXX the whole needresched thing is broken, but not silly. */
  644         maybe_resched(td);
  645 
  646         sched_prio(td, td->td_user_pri);
  647 }
  648 
  649 /* ARGSUSED */
  650 static void
  651 sched_setup(void *dummy)
  652 {
  653 
  654         setup_runqs();
  655 
  656         /* Account for thread0. */
  657         sched_load_add();
  658 }
  659 
  660 /*
  661  * This routine determines time constants after stathz and hz are setup.
  662  */
  663 static void
  664 sched_initticks(void *dummy)
  665 {
  666 
  667         realstathz = stathz ? stathz : hz;
  668         sched_slice = realstathz / 10;  /* ~100ms */
  669         hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
  670             realstathz);
  671 }
  672 
  673 /* External interfaces start here */
  674 
  675 /*
  676  * Very early in the boot some setup of scheduler-specific
  677  * parts of proc0 and of some scheduler resources needs to be done.
  678  * Called from:
  679  *  proc0_init()
  680  */
  681 void
  682 schedinit(void)
  683 {
  684         /*
  685          * Set up the scheduler specific parts of proc0.
  686          */
  687         proc0.p_sched = NULL; /* XXX */
  688         thread0.td_sched = &td_sched0;
  689         thread0.td_lock = &sched_lock;
  690         td_sched0.ts_slice = sched_slice;
  691         mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
  692 }
  693 
  694 int
  695 sched_runnable(void)
  696 {
  697 #ifdef SMP
  698         return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
  699 #else
  700         return runq_check(&runq);
  701 #endif
  702 }
  703 
  704 int
  705 sched_rr_interval(void)
  706 {
  707 
  708         /* Convert sched_slice from stathz to hz. */
  709         return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz));
  710 }
  711 
  712 /*
  713  * We adjust the priority of the current process.  The priority of
  714  * a process gets worse as it accumulates CPU time.  The cpu usage
  715  * estimator (td_estcpu) is increased here.  resetpriority() will
  716  * compute a different priority each time td_estcpu increases by
  717  * INVERSE_ESTCPU_WEIGHT
  718  * (until MAXPRI is reached).  The cpu usage estimator ramps up
  719  * quite quickly when the process is running (linearly), and decays
  720  * away exponentially, at a rate which is proportionally slower when
  721  * the system is busy.  The basic principle is that the system will
  722  * 90% forget that the process used a lot of CPU time in 5 * loadav
  723  * seconds.  This causes the system to favor processes which haven't
  724  * run much recently, and to round-robin among other processes.
  725  */
  726 void
  727 sched_clock(struct thread *td)
  728 {
  729         struct pcpuidlestat *stat;
  730         struct td_sched *ts;
  731 
  732         THREAD_LOCK_ASSERT(td, MA_OWNED);
  733         ts = td->td_sched;
  734 
  735         ts->ts_cpticks++;
  736         td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
  737         if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
  738                 resetpriority(td);
  739                 resetpriority_thread(td);
  740         }
  741 
  742         /*
  743          * Force a context switch if the current thread has used up a full
  744          * time slice (default is 100ms).
  745          */
  746         if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
  747                 ts->ts_slice = sched_slice;
  748                 td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
  749         }
  750 
  751         stat = DPCPU_PTR(idlestat);
  752         stat->oldidlecalls = stat->idlecalls;
  753         stat->idlecalls = 0;
  754 }
  755 
  756 /*
  757  * Charge child's scheduling CPU usage to parent.
  758  */
  759 void
  760 sched_exit(struct proc *p, struct thread *td)
  761 {
  762 
  763         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
  764             "prio:%d", td->td_priority);
  765 
  766         PROC_LOCK_ASSERT(p, MA_OWNED);
  767         sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
  768 }
  769 
  770 void
  771 sched_exit_thread(struct thread *td, struct thread *child)
  772 {
  773 
  774         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
  775             "prio:%d", child->td_priority);
  776         thread_lock(td);
  777         td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
  778         thread_unlock(td);
  779         thread_lock(child);
  780         if ((child->td_flags & TDF_NOLOAD) == 0)
  781                 sched_load_rem();
  782         thread_unlock(child);
  783 }
  784 
  785 void
  786 sched_fork(struct thread *td, struct thread *childtd)
  787 {
  788         sched_fork_thread(td, childtd);
  789 }
  790 
  791 void
  792 sched_fork_thread(struct thread *td, struct thread *childtd)
  793 {
  794         struct td_sched *ts;
  795 
  796         childtd->td_estcpu = td->td_estcpu;
  797         childtd->td_lock = &sched_lock;
  798         childtd->td_cpuset = cpuset_ref(td->td_cpuset);
  799         childtd->td_priority = childtd->td_base_pri;
  800         ts = childtd->td_sched;
  801         bzero(ts, sizeof(*ts));
  802         ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
  803         ts->ts_slice = 1;
  804 }
  805 
  806 void
  807 sched_nice(struct proc *p, int nice)
  808 {
  809         struct thread *td;
  810 
  811         PROC_LOCK_ASSERT(p, MA_OWNED);
  812         p->p_nice = nice;
  813         FOREACH_THREAD_IN_PROC(p, td) {
  814                 thread_lock(td);
  815                 resetpriority(td);
  816                 resetpriority_thread(td);
  817                 thread_unlock(td);
  818         }
  819 }
  820 
  821 void
  822 sched_class(struct thread *td, int class)
  823 {
  824         THREAD_LOCK_ASSERT(td, MA_OWNED);
  825         td->td_pri_class = class;
  826 }
  827 
  828 /*
  829  * Adjust the priority of a thread.
  830  */
  831 static void
  832 sched_priority(struct thread *td, u_char prio)
  833 {
  834 
  835 
  836         KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
  837             "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
  838             sched_tdname(curthread));
  839         SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
  840         if (td != curthread && prio > td->td_priority) {
  841                 KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
  842                     "lend prio", "prio:%d", td->td_priority, "new prio:%d",
  843                     prio, KTR_ATTR_LINKED, sched_tdname(td));
  844                 SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio, 
  845                     curthread);
  846         }
  847         THREAD_LOCK_ASSERT(td, MA_OWNED);
  848         if (td->td_priority == prio)
  849                 return;
  850         td->td_priority = prio;
  851         if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
  852                 sched_rem(td);
  853                 sched_add(td, SRQ_BORING);
  854         }
  855 }
  856 
  857 /*
  858  * Update a thread's priority when it is lent another thread's
  859  * priority.
  860  */
  861 void
  862 sched_lend_prio(struct thread *td, u_char prio)
  863 {
  864 
  865         td->td_flags |= TDF_BORROWING;
  866         sched_priority(td, prio);
  867 }
  868 
  869 /*
  870  * Restore a thread's priority when priority propagation is
  871  * over.  The prio argument is the minimum priority the thread
  872  * needs to have to satisfy other possible priority lending
  873  * requests.  If the thread's regulary priority is less
  874  * important than prio the thread will keep a priority boost
  875  * of prio.
  876  */
  877 void
  878 sched_unlend_prio(struct thread *td, u_char prio)
  879 {
  880         u_char base_pri;
  881 
  882         if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
  883             td->td_base_pri <= PRI_MAX_TIMESHARE)
  884                 base_pri = td->td_user_pri;
  885         else
  886                 base_pri = td->td_base_pri;
  887         if (prio >= base_pri) {
  888                 td->td_flags &= ~TDF_BORROWING;
  889                 sched_prio(td, base_pri);
  890         } else
  891                 sched_lend_prio(td, prio);
  892 }
  893 
  894 void
  895 sched_prio(struct thread *td, u_char prio)
  896 {
  897         u_char oldprio;
  898 
  899         /* First, update the base priority. */
  900         td->td_base_pri = prio;
  901 
  902         /*
  903          * If the thread is borrowing another thread's priority, don't ever
  904          * lower the priority.
  905          */
  906         if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
  907                 return;
  908 
  909         /* Change the real priority. */
  910         oldprio = td->td_priority;
  911         sched_priority(td, prio);
  912 
  913         /*
  914          * If the thread is on a turnstile, then let the turnstile update
  915          * its state.
  916          */
  917         if (TD_ON_LOCK(td) && oldprio != prio)
  918                 turnstile_adjust(td, oldprio);
  919 }
  920 
  921 void
  922 sched_user_prio(struct thread *td, u_char prio)
  923 {
  924 
  925         THREAD_LOCK_ASSERT(td, MA_OWNED);
  926         td->td_base_user_pri = prio;
  927         if (td->td_lend_user_pri <= prio)
  928                 return;
  929         td->td_user_pri = prio;
  930 }
  931 
  932 void
  933 sched_lend_user_prio(struct thread *td, u_char prio)
  934 {
  935 
  936         THREAD_LOCK_ASSERT(td, MA_OWNED);
  937         td->td_lend_user_pri = prio;
  938         td->td_user_pri = min(prio, td->td_base_user_pri);
  939         if (td->td_priority > td->td_user_pri)
  940                 sched_prio(td, td->td_user_pri);
  941         else if (td->td_priority != td->td_user_pri)
  942                 td->td_flags |= TDF_NEEDRESCHED;
  943 }
  944 
  945 void
  946 sched_sleep(struct thread *td, int pri)
  947 {
  948 
  949         THREAD_LOCK_ASSERT(td, MA_OWNED);
  950         td->td_slptick = ticks;
  951         td->td_sched->ts_slptime = 0;
  952         if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
  953                 sched_prio(td, pri);
  954         if (TD_IS_SUSPENDED(td) || pri >= PSOCK)
  955                 td->td_flags |= TDF_CANSWAP;
  956 }
  957 
  958 void
  959 sched_switch(struct thread *td, struct thread *newtd, int flags)
  960 {
  961         struct mtx *tmtx;
  962         struct td_sched *ts;
  963         struct proc *p;
  964         int preempted;
  965 
  966         tmtx = NULL;
  967         ts = td->td_sched;
  968         p = td->td_proc;
  969 
  970         THREAD_LOCK_ASSERT(td, MA_OWNED);
  971 
  972         /* 
  973          * Switch to the sched lock to fix things up and pick
  974          * a new thread.
  975          * Block the td_lock in order to avoid breaking the critical path.
  976          */
  977         if (td->td_lock != &sched_lock) {
  978                 mtx_lock_spin(&sched_lock);
  979                 tmtx = thread_lock_block(td);
  980         }
  981 
  982         if ((td->td_flags & TDF_NOLOAD) == 0)
  983                 sched_load_rem();
  984 
  985         td->td_lastcpu = td->td_oncpu;
  986         preempted = !((td->td_flags & TDF_SLICEEND) ||
  987             (flags & SWT_RELINQUISH));
  988         td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
  989         td->td_owepreempt = 0;
  990         td->td_oncpu = NOCPU;
  991 
  992         /*
  993          * At the last moment, if this thread is still marked RUNNING,
  994          * then put it back on the run queue as it has not been suspended
  995          * or stopped or any thing else similar.  We never put the idle
  996          * threads on the run queue, however.
  997          */
  998         if (td->td_flags & TDF_IDLETD) {
  999                 TD_SET_CAN_RUN(td);
 1000 #ifdef SMP
 1001                 CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask);
 1002 #endif
 1003         } else {
 1004                 if (TD_IS_RUNNING(td)) {
 1005                         /* Put us back on the run queue. */
 1006                         sched_add(td, preempted ?
 1007                             SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
 1008                             SRQ_OURSELF|SRQ_YIELDING);
 1009                 }
 1010         }
 1011         if (newtd) {
 1012                 /*
 1013                  * The thread we are about to run needs to be counted
 1014                  * as if it had been added to the run queue and selected.
 1015                  * It came from:
 1016                  * * A preemption
 1017                  * * An upcall
 1018                  * * A followon
 1019                  */
 1020                 KASSERT((newtd->td_inhibitors == 0),
 1021                         ("trying to run inhibited thread"));
 1022                 newtd->td_flags |= TDF_DIDRUN;
 1023                 TD_SET_RUNNING(newtd);
 1024                 if ((newtd->td_flags & TDF_NOLOAD) == 0)
 1025                         sched_load_add();
 1026         } else {
 1027                 newtd = choosethread();
 1028                 MPASS(newtd->td_lock == &sched_lock);
 1029         }
 1030 
 1031         if (td != newtd) {
 1032 #ifdef  HWPMC_HOOKS
 1033                 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
 1034                         PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
 1035 #endif
 1036 
 1037                 SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
 1038 
 1039                 /* I feel sleepy */
 1040                 lock_profile_release_lock(&sched_lock.lock_object);
 1041 #ifdef KDTRACE_HOOKS
 1042                 /*
 1043                  * If DTrace has set the active vtime enum to anything
 1044                  * other than INACTIVE (0), then it should have set the
 1045                  * function to call.
 1046                  */
 1047                 if (dtrace_vtime_active)
 1048                         (*dtrace_vtime_switch_func)(newtd);
 1049 #endif
 1050 
 1051                 cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
 1052                 lock_profile_obtain_lock_success(&sched_lock.lock_object,
 1053                     0, 0, __FILE__, __LINE__);
 1054                 /*
 1055                  * Where am I?  What year is it?
 1056                  * We are in the same thread that went to sleep above,
 1057                  * but any amount of time may have passed. All our context
 1058                  * will still be available as will local variables.
 1059                  * PCPU values however may have changed as we may have
 1060                  * changed CPU so don't trust cached values of them.
 1061                  * New threads will go to fork_exit() instead of here
 1062                  * so if you change things here you may need to change
 1063                  * things there too.
 1064                  *
 1065                  * If the thread above was exiting it will never wake
 1066                  * up again here, so either it has saved everything it
 1067                  * needed to, or the thread_wait() or wait() will
 1068                  * need to reap it.
 1069                  */
 1070 
 1071                 SDT_PROBE0(sched, , , on__cpu);
 1072 #ifdef  HWPMC_HOOKS
 1073                 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
 1074                         PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
 1075 #endif
 1076         } else
 1077                 SDT_PROBE0(sched, , , remain__cpu);
 1078 
 1079 #ifdef SMP
 1080         if (td->td_flags & TDF_IDLETD)
 1081                 CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask);
 1082 #endif
 1083         sched_lock.mtx_lock = (uintptr_t)td;
 1084         td->td_oncpu = PCPU_GET(cpuid);
 1085         MPASS(td->td_lock == &sched_lock);
 1086 }
 1087 
 1088 void
 1089 sched_wakeup(struct thread *td)
 1090 {
 1091         struct td_sched *ts;
 1092 
 1093         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1094         ts = td->td_sched;
 1095         td->td_flags &= ~TDF_CANSWAP;
 1096         if (ts->ts_slptime > 1) {
 1097                 updatepri(td);
 1098                 resetpriority(td);
 1099         }
 1100         td->td_slptick = 0;
 1101         ts->ts_slptime = 0;
 1102         ts->ts_slice = sched_slice;
 1103         sched_add(td, SRQ_BORING);
 1104 }
 1105 
 1106 #ifdef SMP
 1107 static int
 1108 forward_wakeup(int cpunum)
 1109 {
 1110         struct pcpu *pc;
 1111         cpuset_t dontuse, map, map2;
 1112         u_int id, me;
 1113         int iscpuset;
 1114 
 1115         mtx_assert(&sched_lock, MA_OWNED);
 1116 
 1117         CTR0(KTR_RUNQ, "forward_wakeup()");
 1118 
 1119         if ((!forward_wakeup_enabled) ||
 1120              (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
 1121                 return (0);
 1122         if (!smp_started || cold || panicstr)
 1123                 return (0);
 1124 
 1125         forward_wakeups_requested++;
 1126 
 1127         /*
 1128          * Check the idle mask we received against what we calculated
 1129          * before in the old version.
 1130          */
 1131         me = PCPU_GET(cpuid);
 1132 
 1133         /* Don't bother if we should be doing it ourself. */
 1134         if (CPU_ISSET(me, &idle_cpus_mask) &&
 1135             (cpunum == NOCPU || me == cpunum))
 1136                 return (0);
 1137 
 1138         CPU_SETOF(me, &dontuse);
 1139         CPU_OR(&dontuse, &stopped_cpus);
 1140         CPU_OR(&dontuse, &hlt_cpus_mask);
 1141         CPU_ZERO(&map2);
 1142         if (forward_wakeup_use_loop) {
 1143                 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
 1144                         id = pc->pc_cpuid;
 1145                         if (!CPU_ISSET(id, &dontuse) &&
 1146                             pc->pc_curthread == pc->pc_idlethread) {
 1147                                 CPU_SET(id, &map2);
 1148                         }
 1149                 }
 1150         }
 1151 
 1152         if (forward_wakeup_use_mask) {
 1153                 map = idle_cpus_mask;
 1154                 CPU_NAND(&map, &dontuse);
 1155 
 1156                 /* If they are both on, compare and use loop if different. */
 1157                 if (forward_wakeup_use_loop) {
 1158                         if (CPU_CMP(&map, &map2)) {
 1159                                 printf("map != map2, loop method preferred\n");
 1160                                 map = map2;
 1161                         }
 1162                 }
 1163         } else {
 1164                 map = map2;
 1165         }
 1166 
 1167         /* If we only allow a specific CPU, then mask off all the others. */
 1168         if (cpunum != NOCPU) {
 1169                 KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
 1170                 iscpuset = CPU_ISSET(cpunum, &map);
 1171                 if (iscpuset == 0)
 1172                         CPU_ZERO(&map);
 1173                 else
 1174                         CPU_SETOF(cpunum, &map);
 1175         }
 1176         if (!CPU_EMPTY(&map)) {
 1177                 forward_wakeups_delivered++;
 1178                 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
 1179                         id = pc->pc_cpuid;
 1180                         if (!CPU_ISSET(id, &map))
 1181                                 continue;
 1182                         if (cpu_idle_wakeup(pc->pc_cpuid))
 1183                                 CPU_CLR(id, &map);
 1184                 }
 1185                 if (!CPU_EMPTY(&map))
 1186                         ipi_selected(map, IPI_AST);
 1187                 return (1);
 1188         }
 1189         if (cpunum == NOCPU)
 1190                 printf("forward_wakeup: Idle processor not found\n");
 1191         return (0);
 1192 }
 1193 
 1194 static void
 1195 kick_other_cpu(int pri, int cpuid)
 1196 {
 1197         struct pcpu *pcpu;
 1198         int cpri;
 1199 
 1200         pcpu = pcpu_find(cpuid);
 1201         if (CPU_ISSET(cpuid, &idle_cpus_mask)) {
 1202                 forward_wakeups_delivered++;
 1203                 if (!cpu_idle_wakeup(cpuid))
 1204                         ipi_cpu(cpuid, IPI_AST);
 1205                 return;
 1206         }
 1207 
 1208         cpri = pcpu->pc_curthread->td_priority;
 1209         if (pri >= cpri)
 1210                 return;
 1211 
 1212 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
 1213 #if !defined(FULL_PREEMPTION)
 1214         if (pri <= PRI_MAX_ITHD)
 1215 #endif /* ! FULL_PREEMPTION */
 1216         {
 1217                 ipi_cpu(cpuid, IPI_PREEMPT);
 1218                 return;
 1219         }
 1220 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
 1221 
 1222         pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
 1223         ipi_cpu(cpuid, IPI_AST);
 1224         return;
 1225 }
 1226 #endif /* SMP */
 1227 
 1228 #ifdef SMP
 1229 static int
 1230 sched_pickcpu(struct thread *td)
 1231 {
 1232         int best, cpu;
 1233 
 1234         mtx_assert(&sched_lock, MA_OWNED);
 1235 
 1236         if (THREAD_CAN_SCHED(td, td->td_lastcpu))
 1237                 best = td->td_lastcpu;
 1238         else
 1239                 best = NOCPU;
 1240         CPU_FOREACH(cpu) {
 1241                 if (!THREAD_CAN_SCHED(td, cpu))
 1242                         continue;
 1243         
 1244                 if (best == NOCPU)
 1245                         best = cpu;
 1246                 else if (runq_length[cpu] < runq_length[best])
 1247                         best = cpu;
 1248         }
 1249         KASSERT(best != NOCPU, ("no valid CPUs"));
 1250 
 1251         return (best);
 1252 }
 1253 #endif
 1254 
 1255 void
 1256 sched_add(struct thread *td, int flags)
 1257 #ifdef SMP
 1258 {
 1259         cpuset_t tidlemsk;
 1260         struct td_sched *ts;
 1261         u_int cpu, cpuid;
 1262         int forwarded = 0;
 1263         int single_cpu = 0;
 1264 
 1265         ts = td->td_sched;
 1266         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1267         KASSERT((td->td_inhibitors == 0),
 1268             ("sched_add: trying to run inhibited thread"));
 1269         KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
 1270             ("sched_add: bad thread state"));
 1271         KASSERT(td->td_flags & TDF_INMEM,
 1272             ("sched_add: thread swapped out"));
 1273 
 1274         KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
 1275             "prio:%d", td->td_priority, KTR_ATTR_LINKED,
 1276             sched_tdname(curthread));
 1277         KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
 1278             KTR_ATTR_LINKED, sched_tdname(td));
 1279         SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL, 
 1280             flags & SRQ_PREEMPTED);
 1281 
 1282 
 1283         /*
 1284          * Now that the thread is moving to the run-queue, set the lock
 1285          * to the scheduler's lock.
 1286          */
 1287         if (td->td_lock != &sched_lock) {
 1288                 mtx_lock_spin(&sched_lock);
 1289                 thread_lock_set(td, &sched_lock);
 1290         }
 1291         TD_SET_RUNQ(td);
 1292 
 1293         /*
 1294          * If SMP is started and the thread is pinned or otherwise limited to
 1295          * a specific set of CPUs, queue the thread to a per-CPU run queue.
 1296          * Otherwise, queue the thread to the global run queue.
 1297          *
 1298          * If SMP has not yet been started we must use the global run queue
 1299          * as per-CPU state may not be initialized yet and we may crash if we
 1300          * try to access the per-CPU run queues.
 1301          */
 1302         if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND ||
 1303             ts->ts_flags & TSF_AFFINITY)) {
 1304                 if (td->td_pinned != 0)
 1305                         cpu = td->td_lastcpu;
 1306                 else if (td->td_flags & TDF_BOUND) {
 1307                         /* Find CPU from bound runq. */
 1308                         KASSERT(SKE_RUNQ_PCPU(ts),
 1309                             ("sched_add: bound td_sched not on cpu runq"));
 1310                         cpu = ts->ts_runq - &runq_pcpu[0];
 1311                 } else
 1312                         /* Find a valid CPU for our cpuset */
 1313                         cpu = sched_pickcpu(td);
 1314                 ts->ts_runq = &runq_pcpu[cpu];
 1315                 single_cpu = 1;
 1316                 CTR3(KTR_RUNQ,
 1317                     "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
 1318                     cpu);
 1319         } else {
 1320                 CTR2(KTR_RUNQ,
 1321                     "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
 1322                     td);
 1323                 cpu = NOCPU;
 1324                 ts->ts_runq = &runq;
 1325         }
 1326 
 1327         cpuid = PCPU_GET(cpuid);
 1328         if (single_cpu && cpu != cpuid) {
 1329                 kick_other_cpu(td->td_priority, cpu);
 1330         } else {
 1331                 if (!single_cpu) {
 1332                         tidlemsk = idle_cpus_mask;
 1333                         CPU_NAND(&tidlemsk, &hlt_cpus_mask);
 1334                         CPU_CLR(cpuid, &tidlemsk);
 1335 
 1336                         if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&
 1337                             ((flags & SRQ_INTR) == 0) &&
 1338                             !CPU_EMPTY(&tidlemsk))
 1339                                 forwarded = forward_wakeup(cpu);
 1340                 }
 1341 
 1342                 if (!forwarded) {
 1343                         if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
 1344                                 return;
 1345                         else
 1346                                 maybe_resched(td);
 1347                 }
 1348         }
 1349 
 1350         if ((td->td_flags & TDF_NOLOAD) == 0)
 1351                 sched_load_add();
 1352         runq_add(ts->ts_runq, td, flags);
 1353         if (cpu != NOCPU)
 1354                 runq_length[cpu]++;
 1355 }
 1356 #else /* SMP */
 1357 {
 1358         struct td_sched *ts;
 1359 
 1360         ts = td->td_sched;
 1361         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1362         KASSERT((td->td_inhibitors == 0),
 1363             ("sched_add: trying to run inhibited thread"));
 1364         KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
 1365             ("sched_add: bad thread state"));
 1366         KASSERT(td->td_flags & TDF_INMEM,
 1367             ("sched_add: thread swapped out"));
 1368         KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
 1369             "prio:%d", td->td_priority, KTR_ATTR_LINKED,
 1370             sched_tdname(curthread));
 1371         KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
 1372             KTR_ATTR_LINKED, sched_tdname(td));
 1373         SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL, 
 1374             flags & SRQ_PREEMPTED);
 1375 
 1376         /*
 1377          * Now that the thread is moving to the run-queue, set the lock
 1378          * to the scheduler's lock.
 1379          */
 1380         if (td->td_lock != &sched_lock) {
 1381                 mtx_lock_spin(&sched_lock);
 1382                 thread_lock_set(td, &sched_lock);
 1383         }
 1384         TD_SET_RUNQ(td);
 1385         CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
 1386         ts->ts_runq = &runq;
 1387 
 1388         /*
 1389          * If we are yielding (on the way out anyhow) or the thread
 1390          * being saved is US, then don't try be smart about preemption
 1391          * or kicking off another CPU as it won't help and may hinder.
 1392          * In the YIEDLING case, we are about to run whoever is being
 1393          * put in the queue anyhow, and in the OURSELF case, we are
 1394          * puting ourself on the run queue which also only happens
 1395          * when we are about to yield.
 1396          */
 1397         if ((flags & SRQ_YIELDING) == 0) {
 1398                 if (maybe_preempt(td))
 1399                         return;
 1400         }
 1401         if ((td->td_flags & TDF_NOLOAD) == 0)
 1402                 sched_load_add();
 1403         runq_add(ts->ts_runq, td, flags);
 1404         maybe_resched(td);
 1405 }
 1406 #endif /* SMP */
 1407 
 1408 void
 1409 sched_rem(struct thread *td)
 1410 {
 1411         struct td_sched *ts;
 1412 
 1413         ts = td->td_sched;
 1414         KASSERT(td->td_flags & TDF_INMEM,
 1415             ("sched_rem: thread swapped out"));
 1416         KASSERT(TD_ON_RUNQ(td),
 1417             ("sched_rem: thread not on run queue"));
 1418         mtx_assert(&sched_lock, MA_OWNED);
 1419         KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
 1420             "prio:%d", td->td_priority, KTR_ATTR_LINKED,
 1421             sched_tdname(curthread));
 1422         SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
 1423 
 1424         if ((td->td_flags & TDF_NOLOAD) == 0)
 1425                 sched_load_rem();
 1426 #ifdef SMP
 1427         if (ts->ts_runq != &runq)
 1428                 runq_length[ts->ts_runq - runq_pcpu]--;
 1429 #endif
 1430         runq_remove(ts->ts_runq, td);
 1431         TD_SET_CAN_RUN(td);
 1432 }
 1433 
 1434 /*
 1435  * Select threads to run.  Note that running threads still consume a
 1436  * slot.
 1437  */
 1438 struct thread *
 1439 sched_choose(void)
 1440 {
 1441         struct thread *td;
 1442         struct runq *rq;
 1443 
 1444         mtx_assert(&sched_lock,  MA_OWNED);
 1445 #ifdef SMP
 1446         struct thread *tdcpu;
 1447 
 1448         rq = &runq;
 1449         td = runq_choose_fuzz(&runq, runq_fuzz);
 1450         tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
 1451 
 1452         if (td == NULL ||
 1453             (tdcpu != NULL &&
 1454              tdcpu->td_priority < td->td_priority)) {
 1455                 CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
 1456                      PCPU_GET(cpuid));
 1457                 td = tdcpu;
 1458                 rq = &runq_pcpu[PCPU_GET(cpuid)];
 1459         } else {
 1460                 CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
 1461         }
 1462 
 1463 #else
 1464         rq = &runq;
 1465         td = runq_choose(&runq);
 1466 #endif
 1467 
 1468         if (td) {
 1469 #ifdef SMP
 1470                 if (td == tdcpu)
 1471                         runq_length[PCPU_GET(cpuid)]--;
 1472 #endif
 1473                 runq_remove(rq, td);
 1474                 td->td_flags |= TDF_DIDRUN;
 1475 
 1476                 KASSERT(td->td_flags & TDF_INMEM,
 1477                     ("sched_choose: thread swapped out"));
 1478                 return (td);
 1479         }
 1480         return (PCPU_GET(idlethread));
 1481 }
 1482 
 1483 void
 1484 sched_preempt(struct thread *td)
 1485 {
 1486 
 1487         SDT_PROBE2(sched, , , surrender, td, td->td_proc);
 1488         thread_lock(td);
 1489         if (td->td_critnest > 1)
 1490                 td->td_owepreempt = 1;
 1491         else
 1492                 mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL);
 1493         thread_unlock(td);
 1494 }
 1495 
 1496 void
 1497 sched_userret(struct thread *td)
 1498 {
 1499         /*
 1500          * XXX we cheat slightly on the locking here to avoid locking in
 1501          * the usual case.  Setting td_priority here is essentially an
 1502          * incomplete workaround for not setting it properly elsewhere.
 1503          * Now that some interrupt handlers are threads, not setting it
 1504          * properly elsewhere can clobber it in the window between setting
 1505          * it here and returning to user mode, so don't waste time setting
 1506          * it perfectly here.
 1507          */
 1508         KASSERT((td->td_flags & TDF_BORROWING) == 0,
 1509             ("thread with borrowed priority returning to userland"));
 1510         if (td->td_priority != td->td_user_pri) {
 1511                 thread_lock(td);
 1512                 td->td_priority = td->td_user_pri;
 1513                 td->td_base_pri = td->td_user_pri;
 1514                 thread_unlock(td);
 1515         }
 1516 }
 1517 
 1518 void
 1519 sched_bind(struct thread *td, int cpu)
 1520 {
 1521         struct td_sched *ts;
 1522 
 1523         THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
 1524         KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
 1525 
 1526         ts = td->td_sched;
 1527 
 1528         td->td_flags |= TDF_BOUND;
 1529 #ifdef SMP
 1530         ts->ts_runq = &runq_pcpu[cpu];
 1531         if (PCPU_GET(cpuid) == cpu)
 1532                 return;
 1533 
 1534         mi_switch(SW_VOL, NULL);
 1535 #endif
 1536 }
 1537 
 1538 void
 1539 sched_unbind(struct thread* td)
 1540 {
 1541         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1542         KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
 1543         td->td_flags &= ~TDF_BOUND;
 1544 }
 1545 
 1546 int
 1547 sched_is_bound(struct thread *td)
 1548 {
 1549         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1550         return (td->td_flags & TDF_BOUND);
 1551 }
 1552 
 1553 void
 1554 sched_relinquish(struct thread *td)
 1555 {
 1556         thread_lock(td);
 1557         mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
 1558         thread_unlock(td);
 1559 }
 1560 
 1561 int
 1562 sched_load(void)
 1563 {
 1564         return (sched_tdcnt);
 1565 }
 1566 
 1567 int
 1568 sched_sizeof_proc(void)
 1569 {
 1570         return (sizeof(struct proc));
 1571 }
 1572 
 1573 int
 1574 sched_sizeof_thread(void)
 1575 {
 1576         return (sizeof(struct thread) + sizeof(struct td_sched));
 1577 }
 1578 
 1579 fixpt_t
 1580 sched_pctcpu(struct thread *td)
 1581 {
 1582         struct td_sched *ts;
 1583 
 1584         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1585         ts = td->td_sched;
 1586         return (ts->ts_pctcpu);
 1587 }
 1588 
 1589 #ifdef  RACCT
 1590 /*
 1591  * Calculates the contribution to the thread cpu usage for the latest
 1592  * (unfinished) second.
 1593  */
 1594 fixpt_t
 1595 sched_pctcpu_delta(struct thread *td)
 1596 {
 1597         struct td_sched *ts;
 1598         fixpt_t delta;
 1599         int realstathz;
 1600 
 1601         THREAD_LOCK_ASSERT(td, MA_OWNED);
 1602         ts = td->td_sched;
 1603         delta = 0;
 1604         realstathz = stathz ? stathz : hz;
 1605         if (ts->ts_cpticks != 0) {
 1606 #if     (FSHIFT >= CCPU_SHIFT)
 1607                 delta = (realstathz == 100)
 1608                     ? ((fixpt_t) ts->ts_cpticks) <<
 1609                     (FSHIFT - CCPU_SHIFT) :
 1610                     100 * (((fixpt_t) ts->ts_cpticks)
 1611                     << (FSHIFT - CCPU_SHIFT)) / realstathz;
 1612 #else
 1613                 delta = ((FSCALE - ccpu) *
 1614                     (ts->ts_cpticks *
 1615                     FSCALE / realstathz)) >> FSHIFT;
 1616 #endif
 1617         }
 1618 
 1619         return (delta);
 1620 }
 1621 #endif
 1622 
 1623 void
 1624 sched_tick(int cnt)
 1625 {
 1626 }
 1627 
 1628 /*
 1629  * The actual idle process.
 1630  */
 1631 void
 1632 sched_idletd(void *dummy)
 1633 {
 1634         struct pcpuidlestat *stat;
 1635 
 1636         THREAD_NO_SLEEPING();
 1637         stat = DPCPU_PTR(idlestat);
 1638         for (;;) {
 1639                 mtx_assert(&Giant, MA_NOTOWNED);
 1640 
 1641                 while (sched_runnable() == 0) {
 1642                         cpu_idle(stat->idlecalls + stat->oldidlecalls > 64);
 1643                         stat->idlecalls++;
 1644                 }
 1645 
 1646                 mtx_lock_spin(&sched_lock);
 1647                 mi_switch(SW_VOL | SWT_IDLE, NULL);
 1648                 mtx_unlock_spin(&sched_lock);
 1649         }
 1650 }
 1651 
 1652 /*
 1653  * A CPU is entering for the first time or a thread is exiting.
 1654  */
 1655 void
 1656 sched_throw(struct thread *td)
 1657 {
 1658         /*
 1659          * Correct spinlock nesting.  The idle thread context that we are
 1660          * borrowing was created so that it would start out with a single
 1661          * spin lock (sched_lock) held in fork_trampoline().  Since we've
 1662          * explicitly acquired locks in this function, the nesting count
 1663          * is now 2 rather than 1.  Since we are nested, calling
 1664          * spinlock_exit() will simply adjust the counts without allowing
 1665          * spin lock using code to interrupt us.
 1666          */
 1667         if (td == NULL) {
 1668                 mtx_lock_spin(&sched_lock);
 1669                 spinlock_exit();
 1670                 PCPU_SET(switchtime, cpu_ticks());
 1671                 PCPU_SET(switchticks, ticks);
 1672         } else {
 1673                 lock_profile_release_lock(&sched_lock.lock_object);
 1674                 MPASS(td->td_lock == &sched_lock);
 1675         }
 1676         mtx_assert(&sched_lock, MA_OWNED);
 1677         KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
 1678         cpu_throw(td, choosethread());  /* doesn't return */
 1679 }
 1680 
 1681 void
 1682 sched_fork_exit(struct thread *td)
 1683 {
 1684 
 1685         /*
 1686          * Finish setting up thread glue so that it begins execution in a
 1687          * non-nested critical section with sched_lock held but not recursed.
 1688          */
 1689         td->td_oncpu = PCPU_GET(cpuid);
 1690         sched_lock.mtx_lock = (uintptr_t)td;
 1691         lock_profile_obtain_lock_success(&sched_lock.lock_object,
 1692             0, 0, __FILE__, __LINE__);
 1693         THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
 1694 }
 1695 
 1696 char *
 1697 sched_tdname(struct thread *td)
 1698 {
 1699 #ifdef KTR
 1700         struct td_sched *ts;
 1701 
 1702         ts = td->td_sched;
 1703         if (ts->ts_name[0] == '\0')
 1704                 snprintf(ts->ts_name, sizeof(ts->ts_name),
 1705                     "%s tid %d", td->td_name, td->td_tid);
 1706         return (ts->ts_name);
 1707 #else   
 1708         return (td->td_name);
 1709 #endif
 1710 }
 1711 
 1712 #ifdef KTR
 1713 void
 1714 sched_clear_tdname(struct thread *td)
 1715 {
 1716         struct td_sched *ts;
 1717 
 1718         ts = td->td_sched;
 1719         ts->ts_name[0] = '\0';
 1720 }
 1721 #endif
 1722 
 1723 void
 1724 sched_affinity(struct thread *td)
 1725 {
 1726 #ifdef SMP
 1727         struct td_sched *ts;
 1728         int cpu;
 1729 
 1730         THREAD_LOCK_ASSERT(td, MA_OWNED);       
 1731 
 1732         /*
 1733          * Set the TSF_AFFINITY flag if there is at least one CPU this
 1734          * thread can't run on.
 1735          */
 1736         ts = td->td_sched;
 1737         ts->ts_flags &= ~TSF_AFFINITY;
 1738         CPU_FOREACH(cpu) {
 1739                 if (!THREAD_CAN_SCHED(td, cpu)) {
 1740                         ts->ts_flags |= TSF_AFFINITY;
 1741                         break;
 1742                 }
 1743         }
 1744 
 1745         /*
 1746          * If this thread can run on all CPUs, nothing else to do.
 1747          */
 1748         if (!(ts->ts_flags & TSF_AFFINITY))
 1749                 return;
 1750 
 1751         /* Pinned threads and bound threads should be left alone. */
 1752         if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
 1753                 return;
 1754 
 1755         switch (td->td_state) {
 1756         case TDS_RUNQ:
 1757                 /*
 1758                  * If we are on a per-CPU runqueue that is in the set,
 1759                  * then nothing needs to be done.
 1760                  */
 1761                 if (ts->ts_runq != &runq &&
 1762                     THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
 1763                         return;
 1764 
 1765                 /* Put this thread on a valid per-CPU runqueue. */
 1766                 sched_rem(td);
 1767                 sched_add(td, SRQ_BORING);
 1768                 break;
 1769         case TDS_RUNNING:
 1770                 /*
 1771                  * See if our current CPU is in the set.  If not, force a
 1772                  * context switch.
 1773                  */
 1774                 if (THREAD_CAN_SCHED(td, td->td_oncpu))
 1775                         return;
 1776 
 1777                 td->td_flags |= TDF_NEEDRESCHED;
 1778                 if (td != curthread)
 1779                         ipi_cpu(cpu, IPI_AST);
 1780                 break;
 1781         default:
 1782                 break;
 1783         }
 1784 #endif
 1785 }

Cache object: a4b1b9f72ca2fd52c8c0b0b8e54743b7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.