FreeBSD/Linux Kernel Cross Reference
sys/kern/sched_4bsd.c
1 /*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/8.0/sys/kern/sched_4bsd.c 187679 2009-01-25 07:35:10Z jeff $");
37
38 #include "opt_hwpmc_hooks.h"
39 #include "opt_sched.h"
40 #include "opt_kdtrace.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/cpuset.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/kthread.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sched.h>
53 #include <sys/smp.h>
54 #include <sys/sysctl.h>
55 #include <sys/sx.h>
56 #include <sys/turnstile.h>
57 #include <sys/umtx.h>
58 #include <machine/pcb.h>
59 #include <machine/smp.h>
60
61 #ifdef HWPMC_HOOKS
62 #include <sys/pmckern.h>
63 #endif
64
65 #ifdef KDTRACE_HOOKS
66 #include <sys/dtrace_bsd.h>
67 int dtrace_vtime_active;
68 dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
69 #endif
70
71 /*
72 * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
73 * the range 100-256 Hz (approximately).
74 */
75 #define ESTCPULIM(e) \
76 min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
77 RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
78 #ifdef SMP
79 #define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus)
80 #else
81 #define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */
82 #endif
83 #define NICE_WEIGHT 1 /* Priorities per nice level. */
84
85 #define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
86
87 /*
88 * The schedulable entity that runs a context.
89 * This is an extension to the thread structure and is tailored to
90 * the requirements of this scheduler
91 */
92 struct td_sched {
93 fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
94 int ts_cpticks; /* (j) Ticks of cpu time. */
95 int ts_slptime; /* (j) Seconds !RUNNING. */
96 int ts_flags;
97 struct runq *ts_runq; /* runq the thread is currently on */
98 #ifdef KTR
99 char ts_name[TS_NAME_LEN];
100 #endif
101 };
102
103 /* flags kept in td_flags */
104 #define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */
105 #define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */
106
107 /* flags kept in ts_flags */
108 #define TSF_AFFINITY 0x0001 /* Has a non-"full" CPU set. */
109
110 #define SKE_RUNQ_PCPU(ts) \
111 ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
112
113 #define THREAD_CAN_SCHED(td, cpu) \
114 CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
115
116 static struct td_sched td_sched0;
117 struct mtx sched_lock;
118
119 static int sched_tdcnt; /* Total runnable threads in the system. */
120 static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
121 #define SCHED_QUANTUM (hz / 10) /* Default sched quantum */
122
123 static void setup_runqs(void);
124 static void schedcpu(void);
125 static void schedcpu_thread(void);
126 static void sched_priority(struct thread *td, u_char prio);
127 static void sched_setup(void *dummy);
128 static void maybe_resched(struct thread *td);
129 static void updatepri(struct thread *td);
130 static void resetpriority(struct thread *td);
131 static void resetpriority_thread(struct thread *td);
132 #ifdef SMP
133 static int sched_pickcpu(struct thread *td);
134 static int forward_wakeup(int cpunum);
135 static void kick_other_cpu(int pri, int cpuid);
136 #endif
137
138 static struct kproc_desc sched_kp = {
139 "schedcpu",
140 schedcpu_thread,
141 NULL
142 };
143 SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start,
144 &sched_kp);
145 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
146
147 /*
148 * Global run queue.
149 */
150 static struct runq runq;
151
152 #ifdef SMP
153 /*
154 * Per-CPU run queues
155 */
156 static struct runq runq_pcpu[MAXCPU];
157 long runq_length[MAXCPU];
158 #endif
159
160 static void
161 setup_runqs(void)
162 {
163 #ifdef SMP
164 int i;
165
166 for (i = 0; i < MAXCPU; ++i)
167 runq_init(&runq_pcpu[i]);
168 #endif
169
170 runq_init(&runq);
171 }
172
173 static int
174 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
175 {
176 int error, new_val;
177
178 new_val = sched_quantum * tick;
179 error = sysctl_handle_int(oidp, &new_val, 0, req);
180 if (error != 0 || req->newptr == NULL)
181 return (error);
182 if (new_val < tick)
183 return (EINVAL);
184 sched_quantum = new_val / tick;
185 hogticks = 2 * sched_quantum;
186 return (0);
187 }
188
189 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
190
191 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
192 "Scheduler name");
193
194 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
195 0, sizeof sched_quantum, sysctl_kern_quantum, "I",
196 "Roundrobin scheduling quantum in microseconds");
197
198 #ifdef SMP
199 /* Enable forwarding of wakeups to all other cpus */
200 SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
201
202 static int runq_fuzz = 1;
203 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
204
205 static int forward_wakeup_enabled = 1;
206 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
207 &forward_wakeup_enabled, 0,
208 "Forwarding of wakeup to idle CPUs");
209
210 static int forward_wakeups_requested = 0;
211 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
212 &forward_wakeups_requested, 0,
213 "Requests for Forwarding of wakeup to idle CPUs");
214
215 static int forward_wakeups_delivered = 0;
216 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
217 &forward_wakeups_delivered, 0,
218 "Completed Forwarding of wakeup to idle CPUs");
219
220 static int forward_wakeup_use_mask = 1;
221 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
222 &forward_wakeup_use_mask, 0,
223 "Use the mask of idle cpus");
224
225 static int forward_wakeup_use_loop = 0;
226 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
227 &forward_wakeup_use_loop, 0,
228 "Use a loop to find idle cpus");
229
230 static int forward_wakeup_use_single = 0;
231 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
232 &forward_wakeup_use_single, 0,
233 "Only signal one idle cpu");
234
235 static int forward_wakeup_use_htt = 0;
236 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
237 &forward_wakeup_use_htt, 0,
238 "account for htt");
239
240 #endif
241 #if 0
242 static int sched_followon = 0;
243 SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
244 &sched_followon, 0,
245 "allow threads to share a quantum");
246 #endif
247
248 static __inline void
249 sched_load_add(void)
250 {
251
252 sched_tdcnt++;
253 KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
254 }
255
256 static __inline void
257 sched_load_rem(void)
258 {
259
260 sched_tdcnt--;
261 KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
262 }
263 /*
264 * Arrange to reschedule if necessary, taking the priorities and
265 * schedulers into account.
266 */
267 static void
268 maybe_resched(struct thread *td)
269 {
270
271 THREAD_LOCK_ASSERT(td, MA_OWNED);
272 if (td->td_priority < curthread->td_priority)
273 curthread->td_flags |= TDF_NEEDRESCHED;
274 }
275
276 /*
277 * This function is called when a thread is about to be put on run queue
278 * because it has been made runnable or its priority has been adjusted. It
279 * determines if the new thread should be immediately preempted to. If so,
280 * it switches to it and eventually returns true. If not, it returns false
281 * so that the caller may place the thread on an appropriate run queue.
282 */
283 int
284 maybe_preempt(struct thread *td)
285 {
286 #ifdef PREEMPTION
287 struct thread *ctd;
288 int cpri, pri;
289
290 /*
291 * The new thread should not preempt the current thread if any of the
292 * following conditions are true:
293 *
294 * - The kernel is in the throes of crashing (panicstr).
295 * - The current thread has a higher (numerically lower) or
296 * equivalent priority. Note that this prevents curthread from
297 * trying to preempt to itself.
298 * - It is too early in the boot for context switches (cold is set).
299 * - The current thread has an inhibitor set or is in the process of
300 * exiting. In this case, the current thread is about to switch
301 * out anyways, so there's no point in preempting. If we did,
302 * the current thread would not be properly resumed as well, so
303 * just avoid that whole landmine.
304 * - If the new thread's priority is not a realtime priority and
305 * the current thread's priority is not an idle priority and
306 * FULL_PREEMPTION is disabled.
307 *
308 * If all of these conditions are false, but the current thread is in
309 * a nested critical section, then we have to defer the preemption
310 * until we exit the critical section. Otherwise, switch immediately
311 * to the new thread.
312 */
313 ctd = curthread;
314 THREAD_LOCK_ASSERT(td, MA_OWNED);
315 KASSERT((td->td_inhibitors == 0),
316 ("maybe_preempt: trying to run inhibited thread"));
317 pri = td->td_priority;
318 cpri = ctd->td_priority;
319 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
320 TD_IS_INHIBITED(ctd))
321 return (0);
322 #ifndef FULL_PREEMPTION
323 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
324 return (0);
325 #endif
326
327 if (ctd->td_critnest > 1) {
328 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
329 ctd->td_critnest);
330 ctd->td_owepreempt = 1;
331 return (0);
332 }
333 /*
334 * Thread is runnable but not yet put on system run queue.
335 */
336 MPASS(ctd->td_lock == td->td_lock);
337 MPASS(TD_ON_RUNQ(td));
338 TD_SET_RUNNING(td);
339 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
340 td->td_proc->p_pid, td->td_name);
341 mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td);
342 /*
343 * td's lock pointer may have changed. We have to return with it
344 * locked.
345 */
346 spinlock_enter();
347 thread_unlock(ctd);
348 thread_lock(td);
349 spinlock_exit();
350 return (1);
351 #else
352 return (0);
353 #endif
354 }
355
356 /*
357 * Constants for digital decay and forget:
358 * 90% of (td_estcpu) usage in 5 * loadav time
359 * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
360 * Note that, as ps(1) mentions, this can let percentages
361 * total over 100% (I've seen 137.9% for 3 processes).
362 *
363 * Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
364 *
365 * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
366 * That is, the system wants to compute a value of decay such
367 * that the following for loop:
368 * for (i = 0; i < (5 * loadavg); i++)
369 * td_estcpu *= decay;
370 * will compute
371 * td_estcpu *= 0.1;
372 * for all values of loadavg:
373 *
374 * Mathematically this loop can be expressed by saying:
375 * decay ** (5 * loadavg) ~= .1
376 *
377 * The system computes decay as:
378 * decay = (2 * loadavg) / (2 * loadavg + 1)
379 *
380 * We wish to prove that the system's computation of decay
381 * will always fulfill the equation:
382 * decay ** (5 * loadavg) ~= .1
383 *
384 * If we compute b as:
385 * b = 2 * loadavg
386 * then
387 * decay = b / (b + 1)
388 *
389 * We now need to prove two things:
390 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
391 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
392 *
393 * Facts:
394 * For x close to zero, exp(x) =~ 1 + x, since
395 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
396 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
397 * For x close to zero, ln(1+x) =~ x, since
398 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
399 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
400 * ln(.1) =~ -2.30
401 *
402 * Proof of (1):
403 * Solve (factor)**(power) =~ .1 given power (5*loadav):
404 * solving for factor,
405 * ln(factor) =~ (-2.30/5*loadav), or
406 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
407 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
408 *
409 * Proof of (2):
410 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
411 * solving for power,
412 * power*ln(b/(b+1)) =~ -2.30, or
413 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
414 *
415 * Actual power values for the implemented algorithm are as follows:
416 * loadav: 1 2 3 4
417 * power: 5.68 10.32 14.94 19.55
418 */
419
420 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
421 #define loadfactor(loadav) (2 * (loadav))
422 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
423
424 /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
425 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
426 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
427
428 /*
429 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
430 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
431 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
432 *
433 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
434 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
435 *
436 * If you don't want to bother with the faster/more-accurate formula, you
437 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
438 * (more general) method of calculating the %age of CPU used by a process.
439 */
440 #define CCPU_SHIFT 11
441
442 /*
443 * Recompute process priorities, every hz ticks.
444 * MP-safe, called without the Giant mutex.
445 */
446 /* ARGSUSED */
447 static void
448 schedcpu(void)
449 {
450 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
451 struct thread *td;
452 struct proc *p;
453 struct td_sched *ts;
454 int awake, realstathz;
455
456 realstathz = stathz ? stathz : hz;
457 sx_slock(&allproc_lock);
458 FOREACH_PROC_IN_SYSTEM(p) {
459 PROC_LOCK(p);
460 FOREACH_THREAD_IN_PROC(p, td) {
461 awake = 0;
462 thread_lock(td);
463 ts = td->td_sched;
464 /*
465 * Increment sleep time (if sleeping). We
466 * ignore overflow, as above.
467 */
468 /*
469 * The td_sched slptimes are not touched in wakeup
470 * because the thread may not HAVE everything in
471 * memory? XXX I think this is out of date.
472 */
473 if (TD_ON_RUNQ(td)) {
474 awake = 1;
475 td->td_flags &= ~TDF_DIDRUN;
476 } else if (TD_IS_RUNNING(td)) {
477 awake = 1;
478 /* Do not clear TDF_DIDRUN */
479 } else if (td->td_flags & TDF_DIDRUN) {
480 awake = 1;
481 td->td_flags &= ~TDF_DIDRUN;
482 }
483
484 /*
485 * ts_pctcpu is only for ps and ttyinfo().
486 */
487 ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
488 /*
489 * If the td_sched has been idle the entire second,
490 * stop recalculating its priority until
491 * it wakes up.
492 */
493 if (ts->ts_cpticks != 0) {
494 #if (FSHIFT >= CCPU_SHIFT)
495 ts->ts_pctcpu += (realstathz == 100)
496 ? ((fixpt_t) ts->ts_cpticks) <<
497 (FSHIFT - CCPU_SHIFT) :
498 100 * (((fixpt_t) ts->ts_cpticks)
499 << (FSHIFT - CCPU_SHIFT)) / realstathz;
500 #else
501 ts->ts_pctcpu += ((FSCALE - ccpu) *
502 (ts->ts_cpticks *
503 FSCALE / realstathz)) >> FSHIFT;
504 #endif
505 ts->ts_cpticks = 0;
506 }
507 /*
508 * If there are ANY running threads in this process,
509 * then don't count it as sleeping.
510 * XXX: this is broken.
511 */
512 if (awake) {
513 if (ts->ts_slptime > 1) {
514 /*
515 * In an ideal world, this should not
516 * happen, because whoever woke us
517 * up from the long sleep should have
518 * unwound the slptime and reset our
519 * priority before we run at the stale
520 * priority. Should KASSERT at some
521 * point when all the cases are fixed.
522 */
523 updatepri(td);
524 }
525 ts->ts_slptime = 0;
526 } else
527 ts->ts_slptime++;
528 if (ts->ts_slptime > 1) {
529 thread_unlock(td);
530 continue;
531 }
532 td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
533 resetpriority(td);
534 resetpriority_thread(td);
535 thread_unlock(td);
536 }
537 PROC_UNLOCK(p);
538 }
539 sx_sunlock(&allproc_lock);
540 }
541
542 /*
543 * Main loop for a kthread that executes schedcpu once a second.
544 */
545 static void
546 schedcpu_thread(void)
547 {
548
549 for (;;) {
550 schedcpu();
551 pause("-", hz);
552 }
553 }
554
555 /*
556 * Recalculate the priority of a process after it has slept for a while.
557 * For all load averages >= 1 and max td_estcpu of 255, sleeping for at
558 * least six times the loadfactor will decay td_estcpu to zero.
559 */
560 static void
561 updatepri(struct thread *td)
562 {
563 struct td_sched *ts;
564 fixpt_t loadfac;
565 unsigned int newcpu;
566
567 ts = td->td_sched;
568 loadfac = loadfactor(averunnable.ldavg[0]);
569 if (ts->ts_slptime > 5 * loadfac)
570 td->td_estcpu = 0;
571 else {
572 newcpu = td->td_estcpu;
573 ts->ts_slptime--; /* was incremented in schedcpu() */
574 while (newcpu && --ts->ts_slptime)
575 newcpu = decay_cpu(loadfac, newcpu);
576 td->td_estcpu = newcpu;
577 }
578 }
579
580 /*
581 * Compute the priority of a process when running in user mode.
582 * Arrange to reschedule if the resulting priority is better
583 * than that of the current process.
584 */
585 static void
586 resetpriority(struct thread *td)
587 {
588 register unsigned int newpriority;
589
590 if (td->td_pri_class == PRI_TIMESHARE) {
591 newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
592 NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
593 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
594 PRI_MAX_TIMESHARE);
595 sched_user_prio(td, newpriority);
596 }
597 }
598
599 /*
600 * Update the thread's priority when the associated process's user
601 * priority changes.
602 */
603 static void
604 resetpriority_thread(struct thread *td)
605 {
606
607 /* Only change threads with a time sharing user priority. */
608 if (td->td_priority < PRI_MIN_TIMESHARE ||
609 td->td_priority > PRI_MAX_TIMESHARE)
610 return;
611
612 /* XXX the whole needresched thing is broken, but not silly. */
613 maybe_resched(td);
614
615 sched_prio(td, td->td_user_pri);
616 }
617
618 /* ARGSUSED */
619 static void
620 sched_setup(void *dummy)
621 {
622 setup_runqs();
623
624 if (sched_quantum == 0)
625 sched_quantum = SCHED_QUANTUM;
626 hogticks = 2 * sched_quantum;
627
628 /* Account for thread0. */
629 sched_load_add();
630 }
631
632 /* External interfaces start here */
633
634 /*
635 * Very early in the boot some setup of scheduler-specific
636 * parts of proc0 and of some scheduler resources needs to be done.
637 * Called from:
638 * proc0_init()
639 */
640 void
641 schedinit(void)
642 {
643 /*
644 * Set up the scheduler specific parts of proc0.
645 */
646 proc0.p_sched = NULL; /* XXX */
647 thread0.td_sched = &td_sched0;
648 thread0.td_lock = &sched_lock;
649 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
650 }
651
652 int
653 sched_runnable(void)
654 {
655 #ifdef SMP
656 return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
657 #else
658 return runq_check(&runq);
659 #endif
660 }
661
662 int
663 sched_rr_interval(void)
664 {
665 if (sched_quantum == 0)
666 sched_quantum = SCHED_QUANTUM;
667 return (sched_quantum);
668 }
669
670 /*
671 * We adjust the priority of the current process. The priority of
672 * a process gets worse as it accumulates CPU time. The cpu usage
673 * estimator (td_estcpu) is increased here. resetpriority() will
674 * compute a different priority each time td_estcpu increases by
675 * INVERSE_ESTCPU_WEIGHT
676 * (until MAXPRI is reached). The cpu usage estimator ramps up
677 * quite quickly when the process is running (linearly), and decays
678 * away exponentially, at a rate which is proportionally slower when
679 * the system is busy. The basic principle is that the system will
680 * 90% forget that the process used a lot of CPU time in 5 * loadav
681 * seconds. This causes the system to favor processes which haven't
682 * run much recently, and to round-robin among other processes.
683 */
684 void
685 sched_clock(struct thread *td)
686 {
687 struct td_sched *ts;
688
689 THREAD_LOCK_ASSERT(td, MA_OWNED);
690 ts = td->td_sched;
691
692 ts->ts_cpticks++;
693 td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
694 if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
695 resetpriority(td);
696 resetpriority_thread(td);
697 }
698
699 /*
700 * Force a context switch if the current thread has used up a full
701 * quantum (default quantum is 100ms).
702 */
703 if (!TD_IS_IDLETHREAD(td) &&
704 ticks - PCPU_GET(switchticks) >= sched_quantum)
705 td->td_flags |= TDF_NEEDRESCHED;
706 }
707
708 /*
709 * Charge child's scheduling CPU usage to parent.
710 */
711 void
712 sched_exit(struct proc *p, struct thread *td)
713 {
714
715 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
716 "prio:td", td->td_priority);
717
718 PROC_LOCK_ASSERT(p, MA_OWNED);
719 sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
720 }
721
722 void
723 sched_exit_thread(struct thread *td, struct thread *child)
724 {
725
726 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
727 "prio:td", child->td_priority);
728 thread_lock(td);
729 td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
730 thread_unlock(td);
731 mtx_lock_spin(&sched_lock);
732 if ((child->td_proc->p_flag & P_NOLOAD) == 0)
733 sched_load_rem();
734 mtx_unlock_spin(&sched_lock);
735 }
736
737 void
738 sched_fork(struct thread *td, struct thread *childtd)
739 {
740 sched_fork_thread(td, childtd);
741 }
742
743 void
744 sched_fork_thread(struct thread *td, struct thread *childtd)
745 {
746 struct td_sched *ts;
747
748 childtd->td_estcpu = td->td_estcpu;
749 childtd->td_lock = &sched_lock;
750 childtd->td_cpuset = cpuset_ref(td->td_cpuset);
751 ts = childtd->td_sched;
752 bzero(ts, sizeof(*ts));
753 ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
754 }
755
756 void
757 sched_nice(struct proc *p, int nice)
758 {
759 struct thread *td;
760
761 PROC_LOCK_ASSERT(p, MA_OWNED);
762 p->p_nice = nice;
763 FOREACH_THREAD_IN_PROC(p, td) {
764 thread_lock(td);
765 resetpriority(td);
766 resetpriority_thread(td);
767 thread_unlock(td);
768 }
769 }
770
771 void
772 sched_class(struct thread *td, int class)
773 {
774 THREAD_LOCK_ASSERT(td, MA_OWNED);
775 td->td_pri_class = class;
776 }
777
778 /*
779 * Adjust the priority of a thread.
780 */
781 static void
782 sched_priority(struct thread *td, u_char prio)
783 {
784
785
786 KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
787 "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
788 sched_tdname(curthread));
789 if (td != curthread && prio > td->td_priority) {
790 KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
791 "lend prio", "prio:%d", td->td_priority, "new prio:%d",
792 prio, KTR_ATTR_LINKED, sched_tdname(td));
793 }
794 THREAD_LOCK_ASSERT(td, MA_OWNED);
795 if (td->td_priority == prio)
796 return;
797 td->td_priority = prio;
798 if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
799 sched_rem(td);
800 sched_add(td, SRQ_BORING);
801 }
802 }
803
804 /*
805 * Update a thread's priority when it is lent another thread's
806 * priority.
807 */
808 void
809 sched_lend_prio(struct thread *td, u_char prio)
810 {
811
812 td->td_flags |= TDF_BORROWING;
813 sched_priority(td, prio);
814 }
815
816 /*
817 * Restore a thread's priority when priority propagation is
818 * over. The prio argument is the minimum priority the thread
819 * needs to have to satisfy other possible priority lending
820 * requests. If the thread's regulary priority is less
821 * important than prio the thread will keep a priority boost
822 * of prio.
823 */
824 void
825 sched_unlend_prio(struct thread *td, u_char prio)
826 {
827 u_char base_pri;
828
829 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
830 td->td_base_pri <= PRI_MAX_TIMESHARE)
831 base_pri = td->td_user_pri;
832 else
833 base_pri = td->td_base_pri;
834 if (prio >= base_pri) {
835 td->td_flags &= ~TDF_BORROWING;
836 sched_prio(td, base_pri);
837 } else
838 sched_lend_prio(td, prio);
839 }
840
841 void
842 sched_prio(struct thread *td, u_char prio)
843 {
844 u_char oldprio;
845
846 /* First, update the base priority. */
847 td->td_base_pri = prio;
848
849 /*
850 * If the thread is borrowing another thread's priority, don't ever
851 * lower the priority.
852 */
853 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
854 return;
855
856 /* Change the real priority. */
857 oldprio = td->td_priority;
858 sched_priority(td, prio);
859
860 /*
861 * If the thread is on a turnstile, then let the turnstile update
862 * its state.
863 */
864 if (TD_ON_LOCK(td) && oldprio != prio)
865 turnstile_adjust(td, oldprio);
866 }
867
868 void
869 sched_user_prio(struct thread *td, u_char prio)
870 {
871 u_char oldprio;
872
873 THREAD_LOCK_ASSERT(td, MA_OWNED);
874 td->td_base_user_pri = prio;
875 if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
876 return;
877 oldprio = td->td_user_pri;
878 td->td_user_pri = prio;
879 }
880
881 void
882 sched_lend_user_prio(struct thread *td, u_char prio)
883 {
884 u_char oldprio;
885
886 THREAD_LOCK_ASSERT(td, MA_OWNED);
887 td->td_flags |= TDF_UBORROWING;
888 oldprio = td->td_user_pri;
889 td->td_user_pri = prio;
890 }
891
892 void
893 sched_unlend_user_prio(struct thread *td, u_char prio)
894 {
895 u_char base_pri;
896
897 THREAD_LOCK_ASSERT(td, MA_OWNED);
898 base_pri = td->td_base_user_pri;
899 if (prio >= base_pri) {
900 td->td_flags &= ~TDF_UBORROWING;
901 sched_user_prio(td, base_pri);
902 } else {
903 sched_lend_user_prio(td, prio);
904 }
905 }
906
907 void
908 sched_sleep(struct thread *td, int pri)
909 {
910
911 THREAD_LOCK_ASSERT(td, MA_OWNED);
912 td->td_slptick = ticks;
913 td->td_sched->ts_slptime = 0;
914 if (pri)
915 sched_prio(td, pri);
916 if (TD_IS_SUSPENDED(td) || pri <= PSOCK)
917 td->td_flags |= TDF_CANSWAP;
918 }
919
920 void
921 sched_switch(struct thread *td, struct thread *newtd, int flags)
922 {
923 struct td_sched *ts;
924 struct proc *p;
925
926 ts = td->td_sched;
927 p = td->td_proc;
928
929 THREAD_LOCK_ASSERT(td, MA_OWNED);
930
931 /*
932 * Switch to the sched lock to fix things up and pick
933 * a new thread.
934 */
935 if (td->td_lock != &sched_lock) {
936 mtx_lock_spin(&sched_lock);
937 thread_unlock(td);
938 }
939
940 if ((p->p_flag & P_NOLOAD) == 0)
941 sched_load_rem();
942
943 if (newtd)
944 newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
945
946 td->td_lastcpu = td->td_oncpu;
947 td->td_flags &= ~TDF_NEEDRESCHED;
948 td->td_owepreempt = 0;
949 td->td_oncpu = NOCPU;
950
951 /*
952 * At the last moment, if this thread is still marked RUNNING,
953 * then put it back on the run queue as it has not been suspended
954 * or stopped or any thing else similar. We never put the idle
955 * threads on the run queue, however.
956 */
957 if (td->td_flags & TDF_IDLETD) {
958 TD_SET_CAN_RUN(td);
959 #ifdef SMP
960 idle_cpus_mask &= ~PCPU_GET(cpumask);
961 #endif
962 } else {
963 if (TD_IS_RUNNING(td)) {
964 /* Put us back on the run queue. */
965 sched_add(td, (flags & SW_PREEMPT) ?
966 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
967 SRQ_OURSELF|SRQ_YIELDING);
968 }
969 }
970 if (newtd) {
971 /*
972 * The thread we are about to run needs to be counted
973 * as if it had been added to the run queue and selected.
974 * It came from:
975 * * A preemption
976 * * An upcall
977 * * A followon
978 */
979 KASSERT((newtd->td_inhibitors == 0),
980 ("trying to run inhibited thread"));
981 newtd->td_flags |= TDF_DIDRUN;
982 TD_SET_RUNNING(newtd);
983 if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
984 sched_load_add();
985 } else {
986 newtd = choosethread();
987 }
988 MPASS(newtd->td_lock == &sched_lock);
989
990 if (td != newtd) {
991 #ifdef HWPMC_HOOKS
992 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
993 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
994 #endif
995 /* I feel sleepy */
996 lock_profile_release_lock(&sched_lock.lock_object);
997 #ifdef KDTRACE_HOOKS
998 /*
999 * If DTrace has set the active vtime enum to anything
1000 * other than INACTIVE (0), then it should have set the
1001 * function to call.
1002 */
1003 if (dtrace_vtime_active)
1004 (*dtrace_vtime_switch_func)(newtd);
1005 #endif
1006
1007 cpu_switch(td, newtd, td->td_lock);
1008 lock_profile_obtain_lock_success(&sched_lock.lock_object,
1009 0, 0, __FILE__, __LINE__);
1010 /*
1011 * Where am I? What year is it?
1012 * We are in the same thread that went to sleep above,
1013 * but any amount of time may have passed. All our context
1014 * will still be available as will local variables.
1015 * PCPU values however may have changed as we may have
1016 * changed CPU so don't trust cached values of them.
1017 * New threads will go to fork_exit() instead of here
1018 * so if you change things here you may need to change
1019 * things there too.
1020 *
1021 * If the thread above was exiting it will never wake
1022 * up again here, so either it has saved everything it
1023 * needed to, or the thread_wait() or wait() will
1024 * need to reap it.
1025 */
1026 #ifdef HWPMC_HOOKS
1027 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1028 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1029 #endif
1030 }
1031
1032 #ifdef SMP
1033 if (td->td_flags & TDF_IDLETD)
1034 idle_cpus_mask |= PCPU_GET(cpumask);
1035 #endif
1036 sched_lock.mtx_lock = (uintptr_t)td;
1037 td->td_oncpu = PCPU_GET(cpuid);
1038 MPASS(td->td_lock == &sched_lock);
1039 }
1040
1041 void
1042 sched_wakeup(struct thread *td)
1043 {
1044 struct td_sched *ts;
1045
1046 THREAD_LOCK_ASSERT(td, MA_OWNED);
1047 ts = td->td_sched;
1048 td->td_flags &= ~TDF_CANSWAP;
1049 if (ts->ts_slptime > 1) {
1050 updatepri(td);
1051 resetpriority(td);
1052 }
1053 td->td_slptick = ticks;
1054 ts->ts_slptime = 0;
1055 sched_add(td, SRQ_BORING);
1056 }
1057
1058 #ifdef SMP
1059 static int
1060 forward_wakeup(int cpunum)
1061 {
1062 struct pcpu *pc;
1063 cpumask_t dontuse, id, map, map2, map3, me;
1064
1065 mtx_assert(&sched_lock, MA_OWNED);
1066
1067 CTR0(KTR_RUNQ, "forward_wakeup()");
1068
1069 if ((!forward_wakeup_enabled) ||
1070 (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
1071 return (0);
1072 if (!smp_started || cold || panicstr)
1073 return (0);
1074
1075 forward_wakeups_requested++;
1076
1077 /*
1078 * Check the idle mask we received against what we calculated
1079 * before in the old version.
1080 */
1081 me = PCPU_GET(cpumask);
1082
1083 /* Don't bother if we should be doing it ourself. */
1084 if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
1085 return (0);
1086
1087 dontuse = me | stopped_cpus | hlt_cpus_mask;
1088 map3 = 0;
1089 if (forward_wakeup_use_loop) {
1090 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1091 id = pc->pc_cpumask;
1092 if ((id & dontuse) == 0 &&
1093 pc->pc_curthread == pc->pc_idlethread) {
1094 map3 |= id;
1095 }
1096 }
1097 }
1098
1099 if (forward_wakeup_use_mask) {
1100 map = 0;
1101 map = idle_cpus_mask & ~dontuse;
1102
1103 /* If they are both on, compare and use loop if different. */
1104 if (forward_wakeup_use_loop) {
1105 if (map != map3) {
1106 printf("map (%02X) != map3 (%02X)\n", map,
1107 map3);
1108 map = map3;
1109 }
1110 }
1111 } else {
1112 map = map3;
1113 }
1114
1115 /* If we only allow a specific CPU, then mask off all the others. */
1116 if (cpunum != NOCPU) {
1117 KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
1118 map &= (1 << cpunum);
1119 } else {
1120 /* Try choose an idle die. */
1121 if (forward_wakeup_use_htt) {
1122 map2 = (map & (map >> 1)) & 0x5555;
1123 if (map2) {
1124 map = map2;
1125 }
1126 }
1127
1128 /* Set only one bit. */
1129 if (forward_wakeup_use_single) {
1130 map = map & ((~map) + 1);
1131 }
1132 }
1133 if (map) {
1134 forward_wakeups_delivered++;
1135 ipi_selected(map, IPI_AST);
1136 return (1);
1137 }
1138 if (cpunum == NOCPU)
1139 printf("forward_wakeup: Idle processor not found\n");
1140 return (0);
1141 }
1142
1143 static void
1144 kick_other_cpu(int pri, int cpuid)
1145 {
1146 struct pcpu *pcpu;
1147 int cpri;
1148
1149 pcpu = pcpu_find(cpuid);
1150 if (idle_cpus_mask & pcpu->pc_cpumask) {
1151 forward_wakeups_delivered++;
1152 ipi_selected(pcpu->pc_cpumask, IPI_AST);
1153 return;
1154 }
1155
1156 cpri = pcpu->pc_curthread->td_priority;
1157 if (pri >= cpri)
1158 return;
1159
1160 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
1161 #if !defined(FULL_PREEMPTION)
1162 if (pri <= PRI_MAX_ITHD)
1163 #endif /* ! FULL_PREEMPTION */
1164 {
1165 ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT);
1166 return;
1167 }
1168 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
1169
1170 pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
1171 ipi_selected(pcpu->pc_cpumask, IPI_AST);
1172 return;
1173 }
1174 #endif /* SMP */
1175
1176 #ifdef SMP
1177 static int
1178 sched_pickcpu(struct thread *td)
1179 {
1180 int best, cpu;
1181
1182 mtx_assert(&sched_lock, MA_OWNED);
1183
1184 if (THREAD_CAN_SCHED(td, td->td_lastcpu))
1185 best = td->td_lastcpu;
1186 else
1187 best = NOCPU;
1188 for (cpu = 0; cpu <= mp_maxid; cpu++) {
1189 if (CPU_ABSENT(cpu))
1190 continue;
1191 if (!THREAD_CAN_SCHED(td, cpu))
1192 continue;
1193
1194 if (best == NOCPU)
1195 best = cpu;
1196 else if (runq_length[cpu] < runq_length[best])
1197 best = cpu;
1198 }
1199 KASSERT(best != NOCPU, ("no valid CPUs"));
1200
1201 return (best);
1202 }
1203 #endif
1204
1205 void
1206 sched_add(struct thread *td, int flags)
1207 #ifdef SMP
1208 {
1209 struct td_sched *ts;
1210 int forwarded = 0;
1211 int cpu;
1212 int single_cpu = 0;
1213
1214 ts = td->td_sched;
1215 THREAD_LOCK_ASSERT(td, MA_OWNED);
1216 KASSERT((td->td_inhibitors == 0),
1217 ("sched_add: trying to run inhibited thread"));
1218 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1219 ("sched_add: bad thread state"));
1220 KASSERT(td->td_flags & TDF_INMEM,
1221 ("sched_add: thread swapped out"));
1222
1223 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1224 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1225 sched_tdname(curthread));
1226 KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
1227 KTR_ATTR_LINKED, sched_tdname(td));
1228
1229
1230 /*
1231 * Now that the thread is moving to the run-queue, set the lock
1232 * to the scheduler's lock.
1233 */
1234 if (td->td_lock != &sched_lock) {
1235 mtx_lock_spin(&sched_lock);
1236 thread_lock_set(td, &sched_lock);
1237 }
1238 TD_SET_RUNQ(td);
1239
1240 if (td->td_pinned != 0) {
1241 cpu = td->td_lastcpu;
1242 ts->ts_runq = &runq_pcpu[cpu];
1243 single_cpu = 1;
1244 CTR3(KTR_RUNQ,
1245 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1246 cpu);
1247 } else if (td->td_flags & TDF_BOUND) {
1248 /* Find CPU from bound runq. */
1249 KASSERT(SKE_RUNQ_PCPU(ts),
1250 ("sched_add: bound td_sched not on cpu runq"));
1251 cpu = ts->ts_runq - &runq_pcpu[0];
1252 single_cpu = 1;
1253 CTR3(KTR_RUNQ,
1254 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1255 cpu);
1256 } else if (ts->ts_flags & TSF_AFFINITY) {
1257 /* Find a valid CPU for our cpuset */
1258 cpu = sched_pickcpu(td);
1259 ts->ts_runq = &runq_pcpu[cpu];
1260 single_cpu = 1;
1261 CTR3(KTR_RUNQ,
1262 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1263 cpu);
1264 } else {
1265 CTR2(KTR_RUNQ,
1266 "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
1267 td);
1268 cpu = NOCPU;
1269 ts->ts_runq = &runq;
1270 }
1271
1272 if (single_cpu && (cpu != PCPU_GET(cpuid))) {
1273 kick_other_cpu(td->td_priority, cpu);
1274 } else {
1275 if (!single_cpu) {
1276 cpumask_t me = PCPU_GET(cpumask);
1277 cpumask_t idle = idle_cpus_mask & me;
1278
1279 if (!idle && ((flags & SRQ_INTR) == 0) &&
1280 (idle_cpus_mask & ~(hlt_cpus_mask | me)))
1281 forwarded = forward_wakeup(cpu);
1282 }
1283
1284 if (!forwarded) {
1285 if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
1286 return;
1287 else
1288 maybe_resched(td);
1289 }
1290 }
1291
1292 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1293 sched_load_add();
1294 runq_add(ts->ts_runq, td, flags);
1295 if (cpu != NOCPU)
1296 runq_length[cpu]++;
1297 }
1298 #else /* SMP */
1299 {
1300 struct td_sched *ts;
1301
1302 ts = td->td_sched;
1303 THREAD_LOCK_ASSERT(td, MA_OWNED);
1304 KASSERT((td->td_inhibitors == 0),
1305 ("sched_add: trying to run inhibited thread"));
1306 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1307 ("sched_add: bad thread state"));
1308 KASSERT(td->td_flags & TDF_INMEM,
1309 ("sched_add: thread swapped out"));
1310 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1311 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1312 sched_tdname(curthread));
1313 KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
1314 KTR_ATTR_LINKED, sched_tdname(td));
1315
1316 /*
1317 * Now that the thread is moving to the run-queue, set the lock
1318 * to the scheduler's lock.
1319 */
1320 if (td->td_lock != &sched_lock) {
1321 mtx_lock_spin(&sched_lock);
1322 thread_lock_set(td, &sched_lock);
1323 }
1324 TD_SET_RUNQ(td);
1325 CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1326 ts->ts_runq = &runq;
1327
1328 /*
1329 * If we are yielding (on the way out anyhow) or the thread
1330 * being saved is US, then don't try be smart about preemption
1331 * or kicking off another CPU as it won't help and may hinder.
1332 * In the YIEDLING case, we are about to run whoever is being
1333 * put in the queue anyhow, and in the OURSELF case, we are
1334 * puting ourself on the run queue which also only happens
1335 * when we are about to yield.
1336 */
1337 if ((flags & SRQ_YIELDING) == 0) {
1338 if (maybe_preempt(td))
1339 return;
1340 }
1341 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1342 sched_load_add();
1343 runq_add(ts->ts_runq, td, flags);
1344 maybe_resched(td);
1345 }
1346 #endif /* SMP */
1347
1348 void
1349 sched_rem(struct thread *td)
1350 {
1351 struct td_sched *ts;
1352
1353 ts = td->td_sched;
1354 KASSERT(td->td_flags & TDF_INMEM,
1355 ("sched_rem: thread swapped out"));
1356 KASSERT(TD_ON_RUNQ(td),
1357 ("sched_rem: thread not on run queue"));
1358 mtx_assert(&sched_lock, MA_OWNED);
1359 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
1360 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1361 sched_tdname(curthread));
1362
1363 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1364 sched_load_rem();
1365 #ifdef SMP
1366 if (ts->ts_runq != &runq)
1367 runq_length[ts->ts_runq - runq_pcpu]--;
1368 #endif
1369 runq_remove(ts->ts_runq, td);
1370 TD_SET_CAN_RUN(td);
1371 }
1372
1373 /*
1374 * Select threads to run. Note that running threads still consume a
1375 * slot.
1376 */
1377 struct thread *
1378 sched_choose(void)
1379 {
1380 struct thread *td;
1381 struct runq *rq;
1382
1383 mtx_assert(&sched_lock, MA_OWNED);
1384 #ifdef SMP
1385 struct thread *tdcpu;
1386
1387 rq = &runq;
1388 td = runq_choose_fuzz(&runq, runq_fuzz);
1389 tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1390
1391 if (td == NULL ||
1392 (tdcpu != NULL &&
1393 tdcpu->td_priority < td->td_priority)) {
1394 CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
1395 PCPU_GET(cpuid));
1396 td = tdcpu;
1397 rq = &runq_pcpu[PCPU_GET(cpuid)];
1398 } else {
1399 CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
1400 }
1401
1402 #else
1403 rq = &runq;
1404 td = runq_choose(&runq);
1405 #endif
1406
1407 if (td) {
1408 #ifdef SMP
1409 if (td == tdcpu)
1410 runq_length[PCPU_GET(cpuid)]--;
1411 #endif
1412 runq_remove(rq, td);
1413 td->td_flags |= TDF_DIDRUN;
1414
1415 KASSERT(td->td_flags & TDF_INMEM,
1416 ("sched_choose: thread swapped out"));
1417 return (td);
1418 }
1419 return (PCPU_GET(idlethread));
1420 }
1421
1422 void
1423 sched_preempt(struct thread *td)
1424 {
1425 thread_lock(td);
1426 if (td->td_critnest > 1)
1427 td->td_owepreempt = 1;
1428 else
1429 mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL);
1430 thread_unlock(td);
1431 }
1432
1433 void
1434 sched_userret(struct thread *td)
1435 {
1436 /*
1437 * XXX we cheat slightly on the locking here to avoid locking in
1438 * the usual case. Setting td_priority here is essentially an
1439 * incomplete workaround for not setting it properly elsewhere.
1440 * Now that some interrupt handlers are threads, not setting it
1441 * properly elsewhere can clobber it in the window between setting
1442 * it here and returning to user mode, so don't waste time setting
1443 * it perfectly here.
1444 */
1445 KASSERT((td->td_flags & TDF_BORROWING) == 0,
1446 ("thread with borrowed priority returning to userland"));
1447 if (td->td_priority != td->td_user_pri) {
1448 thread_lock(td);
1449 td->td_priority = td->td_user_pri;
1450 td->td_base_pri = td->td_user_pri;
1451 thread_unlock(td);
1452 }
1453 }
1454
1455 void
1456 sched_bind(struct thread *td, int cpu)
1457 {
1458 struct td_sched *ts;
1459
1460 THREAD_LOCK_ASSERT(td, MA_OWNED);
1461 KASSERT(TD_IS_RUNNING(td),
1462 ("sched_bind: cannot bind non-running thread"));
1463
1464 ts = td->td_sched;
1465
1466 td->td_flags |= TDF_BOUND;
1467 #ifdef SMP
1468 ts->ts_runq = &runq_pcpu[cpu];
1469 if (PCPU_GET(cpuid) == cpu)
1470 return;
1471
1472 mi_switch(SW_VOL, NULL);
1473 #endif
1474 }
1475
1476 void
1477 sched_unbind(struct thread* td)
1478 {
1479 THREAD_LOCK_ASSERT(td, MA_OWNED);
1480 td->td_flags &= ~TDF_BOUND;
1481 }
1482
1483 int
1484 sched_is_bound(struct thread *td)
1485 {
1486 THREAD_LOCK_ASSERT(td, MA_OWNED);
1487 return (td->td_flags & TDF_BOUND);
1488 }
1489
1490 void
1491 sched_relinquish(struct thread *td)
1492 {
1493 thread_lock(td);
1494 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
1495 thread_unlock(td);
1496 }
1497
1498 int
1499 sched_load(void)
1500 {
1501 return (sched_tdcnt);
1502 }
1503
1504 int
1505 sched_sizeof_proc(void)
1506 {
1507 return (sizeof(struct proc));
1508 }
1509
1510 int
1511 sched_sizeof_thread(void)
1512 {
1513 return (sizeof(struct thread) + sizeof(struct td_sched));
1514 }
1515
1516 fixpt_t
1517 sched_pctcpu(struct thread *td)
1518 {
1519 struct td_sched *ts;
1520
1521 ts = td->td_sched;
1522 return (ts->ts_pctcpu);
1523 }
1524
1525 void
1526 sched_tick(void)
1527 {
1528 }
1529
1530 /*
1531 * The actual idle process.
1532 */
1533 void
1534 sched_idletd(void *dummy)
1535 {
1536
1537 for (;;) {
1538 mtx_assert(&Giant, MA_NOTOWNED);
1539
1540 while (sched_runnable() == 0)
1541 cpu_idle(0);
1542
1543 mtx_lock_spin(&sched_lock);
1544 mi_switch(SW_VOL | SWT_IDLE, NULL);
1545 mtx_unlock_spin(&sched_lock);
1546 }
1547 }
1548
1549 /*
1550 * A CPU is entering for the first time or a thread is exiting.
1551 */
1552 void
1553 sched_throw(struct thread *td)
1554 {
1555 /*
1556 * Correct spinlock nesting. The idle thread context that we are
1557 * borrowing was created so that it would start out with a single
1558 * spin lock (sched_lock) held in fork_trampoline(). Since we've
1559 * explicitly acquired locks in this function, the nesting count
1560 * is now 2 rather than 1. Since we are nested, calling
1561 * spinlock_exit() will simply adjust the counts without allowing
1562 * spin lock using code to interrupt us.
1563 */
1564 if (td == NULL) {
1565 mtx_lock_spin(&sched_lock);
1566 spinlock_exit();
1567 } else {
1568 lock_profile_release_lock(&sched_lock.lock_object);
1569 MPASS(td->td_lock == &sched_lock);
1570 }
1571 mtx_assert(&sched_lock, MA_OWNED);
1572 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
1573 PCPU_SET(switchtime, cpu_ticks());
1574 PCPU_SET(switchticks, ticks);
1575 cpu_throw(td, choosethread()); /* doesn't return */
1576 }
1577
1578 void
1579 sched_fork_exit(struct thread *td)
1580 {
1581
1582 /*
1583 * Finish setting up thread glue so that it begins execution in a
1584 * non-nested critical section with sched_lock held but not recursed.
1585 */
1586 td->td_oncpu = PCPU_GET(cpuid);
1587 sched_lock.mtx_lock = (uintptr_t)td;
1588 lock_profile_obtain_lock_success(&sched_lock.lock_object,
1589 0, 0, __FILE__, __LINE__);
1590 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
1591 }
1592
1593 char *
1594 sched_tdname(struct thread *td)
1595 {
1596 #ifdef KTR
1597 struct td_sched *ts;
1598
1599 ts = td->td_sched;
1600 if (ts->ts_name[0] == '\0')
1601 snprintf(ts->ts_name, sizeof(ts->ts_name),
1602 "%s tid %d", td->td_name, td->td_tid);
1603 return (ts->ts_name);
1604 #else
1605 return (td->td_name);
1606 #endif
1607 }
1608
1609 void
1610 sched_affinity(struct thread *td)
1611 {
1612 #ifdef SMP
1613 struct td_sched *ts;
1614 int cpu;
1615
1616 THREAD_LOCK_ASSERT(td, MA_OWNED);
1617
1618 /*
1619 * Set the TSF_AFFINITY flag if there is at least one CPU this
1620 * thread can't run on.
1621 */
1622 ts = td->td_sched;
1623 ts->ts_flags &= ~TSF_AFFINITY;
1624 for (cpu = 0; cpu <= mp_maxid; cpu++) {
1625 if (CPU_ABSENT(cpu))
1626 continue;
1627 if (!THREAD_CAN_SCHED(td, cpu)) {
1628 ts->ts_flags |= TSF_AFFINITY;
1629 break;
1630 }
1631 }
1632
1633 /*
1634 * If this thread can run on all CPUs, nothing else to do.
1635 */
1636 if (!(ts->ts_flags & TSF_AFFINITY))
1637 return;
1638
1639 /* Pinned threads and bound threads should be left alone. */
1640 if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
1641 return;
1642
1643 switch (td->td_state) {
1644 case TDS_RUNQ:
1645 /*
1646 * If we are on a per-CPU runqueue that is in the set,
1647 * then nothing needs to be done.
1648 */
1649 if (ts->ts_runq != &runq &&
1650 THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
1651 return;
1652
1653 /* Put this thread on a valid per-CPU runqueue. */
1654 sched_rem(td);
1655 sched_add(td, SRQ_BORING);
1656 break;
1657 case TDS_RUNNING:
1658 /*
1659 * See if our current CPU is in the set. If not, force a
1660 * context switch.
1661 */
1662 if (THREAD_CAN_SCHED(td, td->td_oncpu))
1663 return;
1664
1665 td->td_flags |= TDF_NEEDRESCHED;
1666 if (td != curthread)
1667 ipi_selected(1 << cpu, IPI_AST);
1668 break;
1669 default:
1670 break;
1671 }
1672 #endif
1673 }
Cache object: fcb3026c79b7104f0a34ad1ae04e975f
|