FreeBSD/Linux Kernel Cross Reference
sys/kern/sched_4bsd.c
1 /*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/8.3/sys/kern/sched_4bsd.c 230080 2012-01-13 20:15:49Z jhb $");
37
38 #include "opt_hwpmc_hooks.h"
39 #include "opt_sched.h"
40 #include "opt_kdtrace.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/cpuset.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/kthread.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sched.h>
53 #include <sys/smp.h>
54 #include <sys/sysctl.h>
55 #include <sys/sx.h>
56 #include <sys/turnstile.h>
57 #include <sys/umtx.h>
58 #include <machine/pcb.h>
59 #include <machine/smp.h>
60
61 #ifdef HWPMC_HOOKS
62 #include <sys/pmckern.h>
63 #endif
64
65 #ifdef KDTRACE_HOOKS
66 #include <sys/dtrace_bsd.h>
67 int dtrace_vtime_active;
68 dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
69 #endif
70
71 /*
72 * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
73 * the range 100-256 Hz (approximately).
74 */
75 #define ESTCPULIM(e) \
76 min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
77 RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
78 #ifdef SMP
79 #define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus)
80 #else
81 #define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */
82 #endif
83 #define NICE_WEIGHT 1 /* Priorities per nice level. */
84
85 #define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
86
87 /*
88 * The schedulable entity that runs a context.
89 * This is an extension to the thread structure and is tailored to
90 * the requirements of this scheduler
91 */
92 struct td_sched {
93 fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
94 int ts_cpticks; /* (j) Ticks of cpu time. */
95 int ts_slptime; /* (j) Seconds !RUNNING. */
96 int ts_flags;
97 struct runq *ts_runq; /* runq the thread is currently on */
98 #ifdef KTR
99 char ts_name[TS_NAME_LEN];
100 #endif
101 };
102
103 /* flags kept in td_flags */
104 #define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */
105 #define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */
106
107 /* flags kept in ts_flags */
108 #define TSF_AFFINITY 0x0001 /* Has a non-"full" CPU set. */
109
110 #define SKE_RUNQ_PCPU(ts) \
111 ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
112
113 #define THREAD_CAN_SCHED(td, cpu) \
114 CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
115
116 static struct td_sched td_sched0;
117 struct mtx sched_lock;
118
119 static int sched_tdcnt; /* Total runnable threads in the system. */
120 static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
121 #define SCHED_QUANTUM (hz / 10) /* Default sched quantum */
122
123 static void setup_runqs(void);
124 static void schedcpu(void);
125 static void schedcpu_thread(void);
126 static void sched_priority(struct thread *td, u_char prio);
127 static void sched_setup(void *dummy);
128 static void maybe_resched(struct thread *td);
129 static void updatepri(struct thread *td);
130 static void resetpriority(struct thread *td);
131 static void resetpriority_thread(struct thread *td);
132 #ifdef SMP
133 static int sched_pickcpu(struct thread *td);
134 static int forward_wakeup(int cpunum);
135 static void kick_other_cpu(int pri, int cpuid);
136 #endif
137
138 static struct kproc_desc sched_kp = {
139 "schedcpu",
140 schedcpu_thread,
141 NULL
142 };
143 SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start,
144 &sched_kp);
145 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
146
147 /*
148 * Global run queue.
149 */
150 static struct runq runq;
151
152 #ifdef SMP
153 /*
154 * Per-CPU run queues
155 */
156 static struct runq runq_pcpu[MAXCPU];
157 long runq_length[MAXCPU];
158 #endif
159
160 static void
161 setup_runqs(void)
162 {
163 #ifdef SMP
164 int i;
165
166 for (i = 0; i < MAXCPU; ++i)
167 runq_init(&runq_pcpu[i]);
168 #endif
169
170 runq_init(&runq);
171 }
172
173 static int
174 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
175 {
176 int error, new_val;
177
178 new_val = sched_quantum * tick;
179 error = sysctl_handle_int(oidp, &new_val, 0, req);
180 if (error != 0 || req->newptr == NULL)
181 return (error);
182 if (new_val < tick)
183 return (EINVAL);
184 sched_quantum = new_val / tick;
185 hogticks = 2 * sched_quantum;
186 return (0);
187 }
188
189 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
190
191 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
192 "Scheduler name");
193
194 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
195 0, sizeof sched_quantum, sysctl_kern_quantum, "I",
196 "Roundrobin scheduling quantum in microseconds");
197
198 #ifdef SMP
199 /* Enable forwarding of wakeups to all other cpus */
200 SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
201
202 static int runq_fuzz = 1;
203 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
204
205 static int forward_wakeup_enabled = 1;
206 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
207 &forward_wakeup_enabled, 0,
208 "Forwarding of wakeup to idle CPUs");
209
210 static int forward_wakeups_requested = 0;
211 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
212 &forward_wakeups_requested, 0,
213 "Requests for Forwarding of wakeup to idle CPUs");
214
215 static int forward_wakeups_delivered = 0;
216 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
217 &forward_wakeups_delivered, 0,
218 "Completed Forwarding of wakeup to idle CPUs");
219
220 static int forward_wakeup_use_mask = 1;
221 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
222 &forward_wakeup_use_mask, 0,
223 "Use the mask of idle cpus");
224
225 static int forward_wakeup_use_loop = 0;
226 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
227 &forward_wakeup_use_loop, 0,
228 "Use a loop to find idle cpus");
229
230 static int forward_wakeup_use_single = 0;
231 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
232 &forward_wakeup_use_single, 0,
233 "Only signal one idle cpu");
234
235 static int forward_wakeup_use_htt = 0;
236 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
237 &forward_wakeup_use_htt, 0,
238 "account for htt");
239
240 #endif
241 #if 0
242 static int sched_followon = 0;
243 SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
244 &sched_followon, 0,
245 "allow threads to share a quantum");
246 #endif
247
248 static __inline void
249 sched_load_add(void)
250 {
251
252 sched_tdcnt++;
253 KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
254 }
255
256 static __inline void
257 sched_load_rem(void)
258 {
259
260 sched_tdcnt--;
261 KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
262 }
263 /*
264 * Arrange to reschedule if necessary, taking the priorities and
265 * schedulers into account.
266 */
267 static void
268 maybe_resched(struct thread *td)
269 {
270
271 THREAD_LOCK_ASSERT(td, MA_OWNED);
272 if (td->td_priority < curthread->td_priority)
273 curthread->td_flags |= TDF_NEEDRESCHED;
274 }
275
276 /*
277 * This function is called when a thread is about to be put on run queue
278 * because it has been made runnable or its priority has been adjusted. It
279 * determines if the new thread should be immediately preempted to. If so,
280 * it switches to it and eventually returns true. If not, it returns false
281 * so that the caller may place the thread on an appropriate run queue.
282 */
283 int
284 maybe_preempt(struct thread *td)
285 {
286 #ifdef PREEMPTION
287 struct thread *ctd;
288 int cpri, pri;
289
290 /*
291 * The new thread should not preempt the current thread if any of the
292 * following conditions are true:
293 *
294 * - The kernel is in the throes of crashing (panicstr).
295 * - The current thread has a higher (numerically lower) or
296 * equivalent priority. Note that this prevents curthread from
297 * trying to preempt to itself.
298 * - It is too early in the boot for context switches (cold is set).
299 * - The current thread has an inhibitor set or is in the process of
300 * exiting. In this case, the current thread is about to switch
301 * out anyways, so there's no point in preempting. If we did,
302 * the current thread would not be properly resumed as well, so
303 * just avoid that whole landmine.
304 * - If the new thread's priority is not a realtime priority and
305 * the current thread's priority is not an idle priority and
306 * FULL_PREEMPTION is disabled.
307 *
308 * If all of these conditions are false, but the current thread is in
309 * a nested critical section, then we have to defer the preemption
310 * until we exit the critical section. Otherwise, switch immediately
311 * to the new thread.
312 */
313 ctd = curthread;
314 THREAD_LOCK_ASSERT(td, MA_OWNED);
315 KASSERT((td->td_inhibitors == 0),
316 ("maybe_preempt: trying to run inhibited thread"));
317 pri = td->td_priority;
318 cpri = ctd->td_priority;
319 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
320 TD_IS_INHIBITED(ctd))
321 return (0);
322 #ifndef FULL_PREEMPTION
323 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
324 return (0);
325 #endif
326
327 if (ctd->td_critnest > 1) {
328 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
329 ctd->td_critnest);
330 ctd->td_owepreempt = 1;
331 return (0);
332 }
333 /*
334 * Thread is runnable but not yet put on system run queue.
335 */
336 MPASS(ctd->td_lock == td->td_lock);
337 MPASS(TD_ON_RUNQ(td));
338 TD_SET_RUNNING(td);
339 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
340 td->td_proc->p_pid, td->td_name);
341 mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td);
342 /*
343 * td's lock pointer may have changed. We have to return with it
344 * locked.
345 */
346 spinlock_enter();
347 thread_unlock(ctd);
348 thread_lock(td);
349 spinlock_exit();
350 return (1);
351 #else
352 return (0);
353 #endif
354 }
355
356 /*
357 * Constants for digital decay and forget:
358 * 90% of (td_estcpu) usage in 5 * loadav time
359 * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
360 * Note that, as ps(1) mentions, this can let percentages
361 * total over 100% (I've seen 137.9% for 3 processes).
362 *
363 * Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
364 *
365 * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
366 * That is, the system wants to compute a value of decay such
367 * that the following for loop:
368 * for (i = 0; i < (5 * loadavg); i++)
369 * td_estcpu *= decay;
370 * will compute
371 * td_estcpu *= 0.1;
372 * for all values of loadavg:
373 *
374 * Mathematically this loop can be expressed by saying:
375 * decay ** (5 * loadavg) ~= .1
376 *
377 * The system computes decay as:
378 * decay = (2 * loadavg) / (2 * loadavg + 1)
379 *
380 * We wish to prove that the system's computation of decay
381 * will always fulfill the equation:
382 * decay ** (5 * loadavg) ~= .1
383 *
384 * If we compute b as:
385 * b = 2 * loadavg
386 * then
387 * decay = b / (b + 1)
388 *
389 * We now need to prove two things:
390 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
391 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
392 *
393 * Facts:
394 * For x close to zero, exp(x) =~ 1 + x, since
395 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
396 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
397 * For x close to zero, ln(1+x) =~ x, since
398 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
399 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
400 * ln(.1) =~ -2.30
401 *
402 * Proof of (1):
403 * Solve (factor)**(power) =~ .1 given power (5*loadav):
404 * solving for factor,
405 * ln(factor) =~ (-2.30/5*loadav), or
406 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
407 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
408 *
409 * Proof of (2):
410 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
411 * solving for power,
412 * power*ln(b/(b+1)) =~ -2.30, or
413 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
414 *
415 * Actual power values for the implemented algorithm are as follows:
416 * loadav: 1 2 3 4
417 * power: 5.68 10.32 14.94 19.55
418 */
419
420 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
421 #define loadfactor(loadav) (2 * (loadav))
422 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
423
424 /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
425 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
426 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
427
428 /*
429 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
430 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
431 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
432 *
433 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
434 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
435 *
436 * If you don't want to bother with the faster/more-accurate formula, you
437 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
438 * (more general) method of calculating the %age of CPU used by a process.
439 */
440 #define CCPU_SHIFT 11
441
442 /*
443 * Recompute process priorities, every hz ticks.
444 * MP-safe, called without the Giant mutex.
445 */
446 /* ARGSUSED */
447 static void
448 schedcpu(void)
449 {
450 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
451 struct thread *td;
452 struct proc *p;
453 struct td_sched *ts;
454 int awake, realstathz;
455
456 realstathz = stathz ? stathz : hz;
457 sx_slock(&allproc_lock);
458 FOREACH_PROC_IN_SYSTEM(p) {
459 PROC_LOCK(p);
460 if (p->p_state == PRS_NEW) {
461 PROC_UNLOCK(p);
462 continue;
463 }
464 FOREACH_THREAD_IN_PROC(p, td) {
465 awake = 0;
466 thread_lock(td);
467 ts = td->td_sched;
468 /*
469 * Increment sleep time (if sleeping). We
470 * ignore overflow, as above.
471 */
472 /*
473 * The td_sched slptimes are not touched in wakeup
474 * because the thread may not HAVE everything in
475 * memory? XXX I think this is out of date.
476 */
477 if (TD_ON_RUNQ(td)) {
478 awake = 1;
479 td->td_flags &= ~TDF_DIDRUN;
480 } else if (TD_IS_RUNNING(td)) {
481 awake = 1;
482 /* Do not clear TDF_DIDRUN */
483 } else if (td->td_flags & TDF_DIDRUN) {
484 awake = 1;
485 td->td_flags &= ~TDF_DIDRUN;
486 }
487
488 /*
489 * ts_pctcpu is only for ps and ttyinfo().
490 */
491 ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
492 /*
493 * If the td_sched has been idle the entire second,
494 * stop recalculating its priority until
495 * it wakes up.
496 */
497 if (ts->ts_cpticks != 0) {
498 #if (FSHIFT >= CCPU_SHIFT)
499 ts->ts_pctcpu += (realstathz == 100)
500 ? ((fixpt_t) ts->ts_cpticks) <<
501 (FSHIFT - CCPU_SHIFT) :
502 100 * (((fixpt_t) ts->ts_cpticks)
503 << (FSHIFT - CCPU_SHIFT)) / realstathz;
504 #else
505 ts->ts_pctcpu += ((FSCALE - ccpu) *
506 (ts->ts_cpticks *
507 FSCALE / realstathz)) >> FSHIFT;
508 #endif
509 ts->ts_cpticks = 0;
510 }
511 /*
512 * If there are ANY running threads in this process,
513 * then don't count it as sleeping.
514 * XXX: this is broken.
515 */
516 if (awake) {
517 if (ts->ts_slptime > 1) {
518 /*
519 * In an ideal world, this should not
520 * happen, because whoever woke us
521 * up from the long sleep should have
522 * unwound the slptime and reset our
523 * priority before we run at the stale
524 * priority. Should KASSERT at some
525 * point when all the cases are fixed.
526 */
527 updatepri(td);
528 }
529 ts->ts_slptime = 0;
530 } else
531 ts->ts_slptime++;
532 if (ts->ts_slptime > 1) {
533 thread_unlock(td);
534 continue;
535 }
536 td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
537 resetpriority(td);
538 resetpriority_thread(td);
539 thread_unlock(td);
540 }
541 PROC_UNLOCK(p);
542 }
543 sx_sunlock(&allproc_lock);
544 }
545
546 /*
547 * Main loop for a kthread that executes schedcpu once a second.
548 */
549 static void
550 schedcpu_thread(void)
551 {
552
553 for (;;) {
554 schedcpu();
555 pause("-", hz);
556 }
557 }
558
559 /*
560 * Recalculate the priority of a process after it has slept for a while.
561 * For all load averages >= 1 and max td_estcpu of 255, sleeping for at
562 * least six times the loadfactor will decay td_estcpu to zero.
563 */
564 static void
565 updatepri(struct thread *td)
566 {
567 struct td_sched *ts;
568 fixpt_t loadfac;
569 unsigned int newcpu;
570
571 ts = td->td_sched;
572 loadfac = loadfactor(averunnable.ldavg[0]);
573 if (ts->ts_slptime > 5 * loadfac)
574 td->td_estcpu = 0;
575 else {
576 newcpu = td->td_estcpu;
577 ts->ts_slptime--; /* was incremented in schedcpu() */
578 while (newcpu && --ts->ts_slptime)
579 newcpu = decay_cpu(loadfac, newcpu);
580 td->td_estcpu = newcpu;
581 }
582 }
583
584 /*
585 * Compute the priority of a process when running in user mode.
586 * Arrange to reschedule if the resulting priority is better
587 * than that of the current process.
588 */
589 static void
590 resetpriority(struct thread *td)
591 {
592 register unsigned int newpriority;
593
594 if (td->td_pri_class == PRI_TIMESHARE) {
595 newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
596 NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
597 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
598 PRI_MAX_TIMESHARE);
599 sched_user_prio(td, newpriority);
600 }
601 }
602
603 /*
604 * Update the thread's priority when the associated process's user
605 * priority changes.
606 */
607 static void
608 resetpriority_thread(struct thread *td)
609 {
610
611 /* Only change threads with a time sharing user priority. */
612 if (td->td_priority < PRI_MIN_TIMESHARE ||
613 td->td_priority > PRI_MAX_TIMESHARE)
614 return;
615
616 /* XXX the whole needresched thing is broken, but not silly. */
617 maybe_resched(td);
618
619 sched_prio(td, td->td_user_pri);
620 }
621
622 /* ARGSUSED */
623 static void
624 sched_setup(void *dummy)
625 {
626 setup_runqs();
627
628 if (sched_quantum == 0)
629 sched_quantum = SCHED_QUANTUM;
630 hogticks = 2 * sched_quantum;
631
632 /* Account for thread0. */
633 sched_load_add();
634 }
635
636 /* External interfaces start here */
637
638 /*
639 * Very early in the boot some setup of scheduler-specific
640 * parts of proc0 and of some scheduler resources needs to be done.
641 * Called from:
642 * proc0_init()
643 */
644 void
645 schedinit(void)
646 {
647 /*
648 * Set up the scheduler specific parts of proc0.
649 */
650 proc0.p_sched = NULL; /* XXX */
651 thread0.td_sched = &td_sched0;
652 thread0.td_lock = &sched_lock;
653 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
654 }
655
656 int
657 sched_runnable(void)
658 {
659 #ifdef SMP
660 return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
661 #else
662 return runq_check(&runq);
663 #endif
664 }
665
666 int
667 sched_rr_interval(void)
668 {
669 if (sched_quantum == 0)
670 sched_quantum = SCHED_QUANTUM;
671 return (sched_quantum);
672 }
673
674 /*
675 * We adjust the priority of the current process. The priority of
676 * a process gets worse as it accumulates CPU time. The cpu usage
677 * estimator (td_estcpu) is increased here. resetpriority() will
678 * compute a different priority each time td_estcpu increases by
679 * INVERSE_ESTCPU_WEIGHT
680 * (until MAXPRI is reached). The cpu usage estimator ramps up
681 * quite quickly when the process is running (linearly), and decays
682 * away exponentially, at a rate which is proportionally slower when
683 * the system is busy. The basic principle is that the system will
684 * 90% forget that the process used a lot of CPU time in 5 * loadav
685 * seconds. This causes the system to favor processes which haven't
686 * run much recently, and to round-robin among other processes.
687 */
688 void
689 sched_clock(struct thread *td)
690 {
691 struct td_sched *ts;
692
693 THREAD_LOCK_ASSERT(td, MA_OWNED);
694 ts = td->td_sched;
695
696 ts->ts_cpticks++;
697 td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
698 if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
699 resetpriority(td);
700 resetpriority_thread(td);
701 }
702
703 /*
704 * Force a context switch if the current thread has used up a full
705 * quantum (default quantum is 100ms).
706 */
707 if (!TD_IS_IDLETHREAD(td) &&
708 ticks - PCPU_GET(switchticks) >= sched_quantum)
709 td->td_flags |= TDF_NEEDRESCHED;
710 }
711
712 /*
713 * Charge child's scheduling CPU usage to parent.
714 */
715 void
716 sched_exit(struct proc *p, struct thread *td)
717 {
718
719 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
720 "prio:%d", td->td_priority);
721
722 PROC_LOCK_ASSERT(p, MA_OWNED);
723 sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
724 }
725
726 void
727 sched_exit_thread(struct thread *td, struct thread *child)
728 {
729
730 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
731 "prio:%d", child->td_priority);
732 thread_lock(td);
733 td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
734 thread_unlock(td);
735 thread_lock(child);
736 if ((child->td_flags & TDF_NOLOAD) == 0)
737 sched_load_rem();
738 thread_unlock(child);
739 }
740
741 void
742 sched_fork(struct thread *td, struct thread *childtd)
743 {
744 sched_fork_thread(td, childtd);
745 }
746
747 void
748 sched_fork_thread(struct thread *td, struct thread *childtd)
749 {
750 struct td_sched *ts;
751
752 childtd->td_estcpu = td->td_estcpu;
753 childtd->td_lock = &sched_lock;
754 childtd->td_cpuset = cpuset_ref(td->td_cpuset);
755 childtd->td_priority = childtd->td_base_pri;
756 ts = childtd->td_sched;
757 bzero(ts, sizeof(*ts));
758 ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
759 }
760
761 void
762 sched_nice(struct proc *p, int nice)
763 {
764 struct thread *td;
765
766 PROC_LOCK_ASSERT(p, MA_OWNED);
767 p->p_nice = nice;
768 FOREACH_THREAD_IN_PROC(p, td) {
769 thread_lock(td);
770 resetpriority(td);
771 resetpriority_thread(td);
772 thread_unlock(td);
773 }
774 }
775
776 void
777 sched_class(struct thread *td, int class)
778 {
779 THREAD_LOCK_ASSERT(td, MA_OWNED);
780 td->td_pri_class = class;
781 }
782
783 /*
784 * Adjust the priority of a thread.
785 */
786 static void
787 sched_priority(struct thread *td, u_char prio)
788 {
789
790
791 KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
792 "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
793 sched_tdname(curthread));
794 if (td != curthread && prio > td->td_priority) {
795 KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
796 "lend prio", "prio:%d", td->td_priority, "new prio:%d",
797 prio, KTR_ATTR_LINKED, sched_tdname(td));
798 }
799 THREAD_LOCK_ASSERT(td, MA_OWNED);
800 if (td->td_priority == prio)
801 return;
802 td->td_priority = prio;
803 if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
804 sched_rem(td);
805 sched_add(td, SRQ_BORING);
806 }
807 }
808
809 /*
810 * Update a thread's priority when it is lent another thread's
811 * priority.
812 */
813 void
814 sched_lend_prio(struct thread *td, u_char prio)
815 {
816
817 td->td_flags |= TDF_BORROWING;
818 sched_priority(td, prio);
819 }
820
821 /*
822 * Restore a thread's priority when priority propagation is
823 * over. The prio argument is the minimum priority the thread
824 * needs to have to satisfy other possible priority lending
825 * requests. If the thread's regulary priority is less
826 * important than prio the thread will keep a priority boost
827 * of prio.
828 */
829 void
830 sched_unlend_prio(struct thread *td, u_char prio)
831 {
832 u_char base_pri;
833
834 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
835 td->td_base_pri <= PRI_MAX_TIMESHARE)
836 base_pri = td->td_user_pri;
837 else
838 base_pri = td->td_base_pri;
839 if (prio >= base_pri) {
840 td->td_flags &= ~TDF_BORROWING;
841 sched_prio(td, base_pri);
842 } else
843 sched_lend_prio(td, prio);
844 }
845
846 void
847 sched_prio(struct thread *td, u_char prio)
848 {
849 u_char oldprio;
850
851 /* First, update the base priority. */
852 td->td_base_pri = prio;
853
854 /*
855 * If the thread is borrowing another thread's priority, don't ever
856 * lower the priority.
857 */
858 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
859 return;
860
861 /* Change the real priority. */
862 oldprio = td->td_priority;
863 sched_priority(td, prio);
864
865 /*
866 * If the thread is on a turnstile, then let the turnstile update
867 * its state.
868 */
869 if (TD_ON_LOCK(td) && oldprio != prio)
870 turnstile_adjust(td, oldprio);
871 }
872
873 void
874 sched_user_prio(struct thread *td, u_char prio)
875 {
876 u_char oldprio;
877
878 THREAD_LOCK_ASSERT(td, MA_OWNED);
879 td->td_base_user_pri = prio;
880 if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
881 return;
882 oldprio = td->td_user_pri;
883 td->td_user_pri = prio;
884 }
885
886 void
887 sched_lend_user_prio(struct thread *td, u_char prio)
888 {
889 u_char oldprio;
890
891 THREAD_LOCK_ASSERT(td, MA_OWNED);
892 td->td_flags |= TDF_UBORROWING;
893 oldprio = td->td_user_pri;
894 td->td_user_pri = prio;
895 }
896
897 void
898 sched_unlend_user_prio(struct thread *td, u_char prio)
899 {
900 u_char base_pri;
901
902 THREAD_LOCK_ASSERT(td, MA_OWNED);
903 base_pri = td->td_base_user_pri;
904 if (prio >= base_pri) {
905 td->td_flags &= ~TDF_UBORROWING;
906 sched_user_prio(td, base_pri);
907 } else {
908 sched_lend_user_prio(td, prio);
909 }
910 }
911
912 void
913 sched_sleep(struct thread *td, int pri)
914 {
915
916 THREAD_LOCK_ASSERT(td, MA_OWNED);
917 td->td_slptick = ticks;
918 td->td_sched->ts_slptime = 0;
919 if (pri)
920 sched_prio(td, pri);
921 if (TD_IS_SUSPENDED(td) || pri >= PSOCK)
922 td->td_flags |= TDF_CANSWAP;
923 }
924
925 void
926 sched_switch(struct thread *td, struct thread *newtd, int flags)
927 {
928 struct mtx *tmtx;
929 struct td_sched *ts;
930 struct proc *p;
931
932 tmtx = NULL;
933 ts = td->td_sched;
934 p = td->td_proc;
935
936 THREAD_LOCK_ASSERT(td, MA_OWNED);
937
938 /*
939 * Switch to the sched lock to fix things up and pick
940 * a new thread.
941 * Block the td_lock in order to avoid breaking the critical path.
942 */
943 if (td->td_lock != &sched_lock) {
944 mtx_lock_spin(&sched_lock);
945 tmtx = thread_lock_block(td);
946 }
947
948 if ((td->td_flags & TDF_NOLOAD) == 0)
949 sched_load_rem();
950
951 td->td_lastcpu = td->td_oncpu;
952 if (!(flags & SW_PREEMPT))
953 td->td_flags &= ~TDF_NEEDRESCHED;
954 td->td_owepreempt = 0;
955 td->td_oncpu = NOCPU;
956
957 /*
958 * At the last moment, if this thread is still marked RUNNING,
959 * then put it back on the run queue as it has not been suspended
960 * or stopped or any thing else similar. We never put the idle
961 * threads on the run queue, however.
962 */
963 if (td->td_flags & TDF_IDLETD) {
964 TD_SET_CAN_RUN(td);
965 #ifdef SMP
966 idle_cpus_mask &= ~PCPU_GET(cpumask);
967 #endif
968 } else {
969 if (TD_IS_RUNNING(td)) {
970 /* Put us back on the run queue. */
971 sched_add(td, (flags & SW_PREEMPT) ?
972 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
973 SRQ_OURSELF|SRQ_YIELDING);
974 }
975 }
976 if (newtd) {
977 /*
978 * The thread we are about to run needs to be counted
979 * as if it had been added to the run queue and selected.
980 * It came from:
981 * * A preemption
982 * * An upcall
983 * * A followon
984 */
985 KASSERT((newtd->td_inhibitors == 0),
986 ("trying to run inhibited thread"));
987 newtd->td_flags |= TDF_DIDRUN;
988 TD_SET_RUNNING(newtd);
989 if ((newtd->td_flags & TDF_NOLOAD) == 0)
990 sched_load_add();
991 } else {
992 newtd = choosethread();
993 MPASS(newtd->td_lock == &sched_lock);
994 }
995
996 if (td != newtd) {
997 #ifdef HWPMC_HOOKS
998 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
999 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1000 #endif
1001 /* I feel sleepy */
1002 lock_profile_release_lock(&sched_lock.lock_object);
1003 #ifdef KDTRACE_HOOKS
1004 /*
1005 * If DTrace has set the active vtime enum to anything
1006 * other than INACTIVE (0), then it should have set the
1007 * function to call.
1008 */
1009 if (dtrace_vtime_active)
1010 (*dtrace_vtime_switch_func)(newtd);
1011 #endif
1012
1013 cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
1014 lock_profile_obtain_lock_success(&sched_lock.lock_object,
1015 0, 0, __FILE__, __LINE__);
1016 /*
1017 * Where am I? What year is it?
1018 * We are in the same thread that went to sleep above,
1019 * but any amount of time may have passed. All our context
1020 * will still be available as will local variables.
1021 * PCPU values however may have changed as we may have
1022 * changed CPU so don't trust cached values of them.
1023 * New threads will go to fork_exit() instead of here
1024 * so if you change things here you may need to change
1025 * things there too.
1026 *
1027 * If the thread above was exiting it will never wake
1028 * up again here, so either it has saved everything it
1029 * needed to, or the thread_wait() or wait() will
1030 * need to reap it.
1031 */
1032 #ifdef HWPMC_HOOKS
1033 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1034 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1035 #endif
1036 }
1037
1038 #ifdef SMP
1039 if (td->td_flags & TDF_IDLETD)
1040 idle_cpus_mask |= PCPU_GET(cpumask);
1041 #endif
1042 sched_lock.mtx_lock = (uintptr_t)td;
1043 td->td_oncpu = PCPU_GET(cpuid);
1044 MPASS(td->td_lock == &sched_lock);
1045 }
1046
1047 void
1048 sched_wakeup(struct thread *td)
1049 {
1050 struct td_sched *ts;
1051
1052 THREAD_LOCK_ASSERT(td, MA_OWNED);
1053 ts = td->td_sched;
1054 td->td_flags &= ~TDF_CANSWAP;
1055 if (ts->ts_slptime > 1) {
1056 updatepri(td);
1057 resetpriority(td);
1058 }
1059 td->td_slptick = 0;
1060 ts->ts_slptime = 0;
1061 sched_add(td, SRQ_BORING);
1062 }
1063
1064 #ifdef SMP
1065 static int
1066 forward_wakeup(int cpunum)
1067 {
1068 struct pcpu *pc;
1069 cpumask_t dontuse, id, map, map2, map3, me;
1070
1071 mtx_assert(&sched_lock, MA_OWNED);
1072
1073 CTR0(KTR_RUNQ, "forward_wakeup()");
1074
1075 if ((!forward_wakeup_enabled) ||
1076 (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
1077 return (0);
1078 if (!smp_started || cold || panicstr)
1079 return (0);
1080
1081 forward_wakeups_requested++;
1082
1083 /*
1084 * Check the idle mask we received against what we calculated
1085 * before in the old version.
1086 */
1087 me = PCPU_GET(cpumask);
1088
1089 /* Don't bother if we should be doing it ourself. */
1090 if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
1091 return (0);
1092
1093 dontuse = me | stopped_cpus | hlt_cpus_mask;
1094 map3 = 0;
1095 if (forward_wakeup_use_loop) {
1096 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1097 id = pc->pc_cpumask;
1098 if ((id & dontuse) == 0 &&
1099 pc->pc_curthread == pc->pc_idlethread) {
1100 map3 |= id;
1101 }
1102 }
1103 }
1104
1105 if (forward_wakeup_use_mask) {
1106 map = 0;
1107 map = idle_cpus_mask & ~dontuse;
1108
1109 /* If they are both on, compare and use loop if different. */
1110 if (forward_wakeup_use_loop) {
1111 if (map != map3) {
1112 printf("map (%02X) != map3 (%02X)\n", map,
1113 map3);
1114 map = map3;
1115 }
1116 }
1117 } else {
1118 map = map3;
1119 }
1120
1121 /* If we only allow a specific CPU, then mask off all the others. */
1122 if (cpunum != NOCPU) {
1123 KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
1124 map &= (1 << cpunum);
1125 } else {
1126 /* Try choose an idle die. */
1127 if (forward_wakeup_use_htt) {
1128 map2 = (map & (map >> 1)) & 0x5555;
1129 if (map2) {
1130 map = map2;
1131 }
1132 }
1133
1134 /* Set only one bit. */
1135 if (forward_wakeup_use_single) {
1136 map = map & ((~map) + 1);
1137 }
1138 }
1139 if (map) {
1140 forward_wakeups_delivered++;
1141 ipi_selected(map, IPI_AST);
1142 return (1);
1143 }
1144 if (cpunum == NOCPU)
1145 printf("forward_wakeup: Idle processor not found\n");
1146 return (0);
1147 }
1148
1149 static void
1150 kick_other_cpu(int pri, int cpuid)
1151 {
1152 struct pcpu *pcpu;
1153 int cpri;
1154
1155 pcpu = pcpu_find(cpuid);
1156 if (idle_cpus_mask & pcpu->pc_cpumask) {
1157 forward_wakeups_delivered++;
1158 ipi_cpu(cpuid, IPI_AST);
1159 return;
1160 }
1161
1162 cpri = pcpu->pc_curthread->td_priority;
1163 if (pri >= cpri)
1164 return;
1165
1166 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
1167 #if !defined(FULL_PREEMPTION)
1168 if (pri <= PRI_MAX_ITHD)
1169 #endif /* ! FULL_PREEMPTION */
1170 {
1171 ipi_cpu(cpuid, IPI_PREEMPT);
1172 return;
1173 }
1174 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
1175
1176 pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
1177 ipi_cpu(cpuid, IPI_AST);
1178 return;
1179 }
1180 #endif /* SMP */
1181
1182 #ifdef SMP
1183 static int
1184 sched_pickcpu(struct thread *td)
1185 {
1186 int best, cpu;
1187
1188 mtx_assert(&sched_lock, MA_OWNED);
1189
1190 if (THREAD_CAN_SCHED(td, td->td_lastcpu))
1191 best = td->td_lastcpu;
1192 else
1193 best = NOCPU;
1194 CPU_FOREACH(cpu) {
1195 if (!THREAD_CAN_SCHED(td, cpu))
1196 continue;
1197
1198 if (best == NOCPU)
1199 best = cpu;
1200 else if (runq_length[cpu] < runq_length[best])
1201 best = cpu;
1202 }
1203 KASSERT(best != NOCPU, ("no valid CPUs"));
1204
1205 return (best);
1206 }
1207 #endif
1208
1209 void
1210 sched_add(struct thread *td, int flags)
1211 #ifdef SMP
1212 {
1213 struct td_sched *ts;
1214 int forwarded = 0;
1215 int cpu;
1216 int single_cpu = 0;
1217
1218 ts = td->td_sched;
1219 THREAD_LOCK_ASSERT(td, MA_OWNED);
1220 KASSERT((td->td_inhibitors == 0),
1221 ("sched_add: trying to run inhibited thread"));
1222 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1223 ("sched_add: bad thread state"));
1224 KASSERT(td->td_flags & TDF_INMEM,
1225 ("sched_add: thread swapped out"));
1226
1227 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1228 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1229 sched_tdname(curthread));
1230 KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
1231 KTR_ATTR_LINKED, sched_tdname(td));
1232
1233
1234 /*
1235 * Now that the thread is moving to the run-queue, set the lock
1236 * to the scheduler's lock.
1237 */
1238 if (td->td_lock != &sched_lock) {
1239 mtx_lock_spin(&sched_lock);
1240 thread_lock_set(td, &sched_lock);
1241 }
1242 TD_SET_RUNQ(td);
1243
1244 /*
1245 * If SMP is started and the thread is pinned or otherwise limited to
1246 * a specific set of CPUs, queue the thread to a per-CPU run queue.
1247 * Otherwise, queue the thread to the global run queue.
1248 *
1249 * If SMP has not yet been started we must use the global run queue
1250 * as per-CPU state may not be initialized yet and we may crash if we
1251 * try to access the per-CPU run queues.
1252 */
1253 if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND ||
1254 ts->ts_flags & TSF_AFFINITY)) {
1255 if (td->td_pinned != 0)
1256 cpu = td->td_lastcpu;
1257 else if (td->td_flags & TDF_BOUND) {
1258 /* Find CPU from bound runq. */
1259 KASSERT(SKE_RUNQ_PCPU(ts),
1260 ("sched_add: bound td_sched not on cpu runq"));
1261 cpu = ts->ts_runq - &runq_pcpu[0];
1262 } else
1263 /* Find a valid CPU for our cpuset */
1264 cpu = sched_pickcpu(td);
1265 ts->ts_runq = &runq_pcpu[cpu];
1266 single_cpu = 1;
1267 CTR3(KTR_RUNQ,
1268 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1269 cpu);
1270 } else {
1271 CTR2(KTR_RUNQ,
1272 "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
1273 td);
1274 cpu = NOCPU;
1275 ts->ts_runq = &runq;
1276 }
1277
1278 if (single_cpu && (cpu != PCPU_GET(cpuid))) {
1279 kick_other_cpu(td->td_priority, cpu);
1280 } else {
1281 if (!single_cpu) {
1282 cpumask_t me = PCPU_GET(cpumask);
1283 cpumask_t idle = idle_cpus_mask & me;
1284
1285 if (!idle && ((flags & SRQ_INTR) == 0) &&
1286 (idle_cpus_mask & ~(hlt_cpus_mask | me)))
1287 forwarded = forward_wakeup(cpu);
1288 }
1289
1290 if (!forwarded) {
1291 if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
1292 return;
1293 else
1294 maybe_resched(td);
1295 }
1296 }
1297
1298 if ((td->td_flags & TDF_NOLOAD) == 0)
1299 sched_load_add();
1300 runq_add(ts->ts_runq, td, flags);
1301 if (cpu != NOCPU)
1302 runq_length[cpu]++;
1303 }
1304 #else /* SMP */
1305 {
1306 struct td_sched *ts;
1307
1308 ts = td->td_sched;
1309 THREAD_LOCK_ASSERT(td, MA_OWNED);
1310 KASSERT((td->td_inhibitors == 0),
1311 ("sched_add: trying to run inhibited thread"));
1312 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1313 ("sched_add: bad thread state"));
1314 KASSERT(td->td_flags & TDF_INMEM,
1315 ("sched_add: thread swapped out"));
1316 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1317 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1318 sched_tdname(curthread));
1319 KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
1320 KTR_ATTR_LINKED, sched_tdname(td));
1321
1322 /*
1323 * Now that the thread is moving to the run-queue, set the lock
1324 * to the scheduler's lock.
1325 */
1326 if (td->td_lock != &sched_lock) {
1327 mtx_lock_spin(&sched_lock);
1328 thread_lock_set(td, &sched_lock);
1329 }
1330 TD_SET_RUNQ(td);
1331 CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1332 ts->ts_runq = &runq;
1333
1334 /*
1335 * If we are yielding (on the way out anyhow) or the thread
1336 * being saved is US, then don't try be smart about preemption
1337 * or kicking off another CPU as it won't help and may hinder.
1338 * In the YIEDLING case, we are about to run whoever is being
1339 * put in the queue anyhow, and in the OURSELF case, we are
1340 * puting ourself on the run queue which also only happens
1341 * when we are about to yield.
1342 */
1343 if ((flags & SRQ_YIELDING) == 0) {
1344 if (maybe_preempt(td))
1345 return;
1346 }
1347 if ((td->td_flags & TDF_NOLOAD) == 0)
1348 sched_load_add();
1349 runq_add(ts->ts_runq, td, flags);
1350 maybe_resched(td);
1351 }
1352 #endif /* SMP */
1353
1354 void
1355 sched_rem(struct thread *td)
1356 {
1357 struct td_sched *ts;
1358
1359 ts = td->td_sched;
1360 KASSERT(td->td_flags & TDF_INMEM,
1361 ("sched_rem: thread swapped out"));
1362 KASSERT(TD_ON_RUNQ(td),
1363 ("sched_rem: thread not on run queue"));
1364 mtx_assert(&sched_lock, MA_OWNED);
1365 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
1366 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1367 sched_tdname(curthread));
1368
1369 if ((td->td_flags & TDF_NOLOAD) == 0)
1370 sched_load_rem();
1371 #ifdef SMP
1372 if (ts->ts_runq != &runq)
1373 runq_length[ts->ts_runq - runq_pcpu]--;
1374 #endif
1375 runq_remove(ts->ts_runq, td);
1376 TD_SET_CAN_RUN(td);
1377 }
1378
1379 /*
1380 * Select threads to run. Note that running threads still consume a
1381 * slot.
1382 */
1383 struct thread *
1384 sched_choose(void)
1385 {
1386 struct thread *td;
1387 struct runq *rq;
1388
1389 mtx_assert(&sched_lock, MA_OWNED);
1390 #ifdef SMP
1391 struct thread *tdcpu;
1392
1393 rq = &runq;
1394 td = runq_choose_fuzz(&runq, runq_fuzz);
1395 tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1396
1397 if (td == NULL ||
1398 (tdcpu != NULL &&
1399 tdcpu->td_priority < td->td_priority)) {
1400 CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
1401 PCPU_GET(cpuid));
1402 td = tdcpu;
1403 rq = &runq_pcpu[PCPU_GET(cpuid)];
1404 } else {
1405 CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
1406 }
1407
1408 #else
1409 rq = &runq;
1410 td = runq_choose(&runq);
1411 #endif
1412
1413 if (td) {
1414 #ifdef SMP
1415 if (td == tdcpu)
1416 runq_length[PCPU_GET(cpuid)]--;
1417 #endif
1418 runq_remove(rq, td);
1419 td->td_flags |= TDF_DIDRUN;
1420
1421 KASSERT(td->td_flags & TDF_INMEM,
1422 ("sched_choose: thread swapped out"));
1423 return (td);
1424 }
1425 return (PCPU_GET(idlethread));
1426 }
1427
1428 void
1429 sched_preempt(struct thread *td)
1430 {
1431 thread_lock(td);
1432 if (td->td_critnest > 1)
1433 td->td_owepreempt = 1;
1434 else
1435 mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL);
1436 thread_unlock(td);
1437 }
1438
1439 void
1440 sched_userret(struct thread *td)
1441 {
1442 /*
1443 * XXX we cheat slightly on the locking here to avoid locking in
1444 * the usual case. Setting td_priority here is essentially an
1445 * incomplete workaround for not setting it properly elsewhere.
1446 * Now that some interrupt handlers are threads, not setting it
1447 * properly elsewhere can clobber it in the window between setting
1448 * it here and returning to user mode, so don't waste time setting
1449 * it perfectly here.
1450 */
1451 KASSERT((td->td_flags & TDF_BORROWING) == 0,
1452 ("thread with borrowed priority returning to userland"));
1453 if (td->td_priority != td->td_user_pri) {
1454 thread_lock(td);
1455 td->td_priority = td->td_user_pri;
1456 td->td_base_pri = td->td_user_pri;
1457 thread_unlock(td);
1458 }
1459 }
1460
1461 void
1462 sched_bind(struct thread *td, int cpu)
1463 {
1464 struct td_sched *ts;
1465
1466 THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
1467 KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
1468
1469 ts = td->td_sched;
1470
1471 td->td_flags |= TDF_BOUND;
1472 #ifdef SMP
1473 ts->ts_runq = &runq_pcpu[cpu];
1474 if (PCPU_GET(cpuid) == cpu)
1475 return;
1476
1477 mi_switch(SW_VOL, NULL);
1478 #endif
1479 }
1480
1481 void
1482 sched_unbind(struct thread* td)
1483 {
1484 THREAD_LOCK_ASSERT(td, MA_OWNED);
1485 KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
1486 td->td_flags &= ~TDF_BOUND;
1487 }
1488
1489 int
1490 sched_is_bound(struct thread *td)
1491 {
1492 THREAD_LOCK_ASSERT(td, MA_OWNED);
1493 return (td->td_flags & TDF_BOUND);
1494 }
1495
1496 void
1497 sched_relinquish(struct thread *td)
1498 {
1499 thread_lock(td);
1500 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
1501 thread_unlock(td);
1502 }
1503
1504 int
1505 sched_load(void)
1506 {
1507 return (sched_tdcnt);
1508 }
1509
1510 int
1511 sched_sizeof_proc(void)
1512 {
1513 return (sizeof(struct proc));
1514 }
1515
1516 int
1517 sched_sizeof_thread(void)
1518 {
1519 return (sizeof(struct thread) + sizeof(struct td_sched));
1520 }
1521
1522 fixpt_t
1523 sched_pctcpu(struct thread *td)
1524 {
1525 struct td_sched *ts;
1526
1527 THREAD_LOCK_ASSERT(td, MA_OWNED);
1528 ts = td->td_sched;
1529 return (ts->ts_pctcpu);
1530 }
1531
1532 void
1533 sched_tick(void)
1534 {
1535 }
1536
1537 /*
1538 * The actual idle process.
1539 */
1540 void
1541 sched_idletd(void *dummy)
1542 {
1543
1544 for (;;) {
1545 mtx_assert(&Giant, MA_NOTOWNED);
1546
1547 while (sched_runnable() == 0)
1548 cpu_idle(0);
1549
1550 mtx_lock_spin(&sched_lock);
1551 mi_switch(SW_VOL | SWT_IDLE, NULL);
1552 mtx_unlock_spin(&sched_lock);
1553 }
1554 }
1555
1556 /*
1557 * A CPU is entering for the first time or a thread is exiting.
1558 */
1559 void
1560 sched_throw(struct thread *td)
1561 {
1562 /*
1563 * Correct spinlock nesting. The idle thread context that we are
1564 * borrowing was created so that it would start out with a single
1565 * spin lock (sched_lock) held in fork_trampoline(). Since we've
1566 * explicitly acquired locks in this function, the nesting count
1567 * is now 2 rather than 1. Since we are nested, calling
1568 * spinlock_exit() will simply adjust the counts without allowing
1569 * spin lock using code to interrupt us.
1570 */
1571 if (td == NULL) {
1572 mtx_lock_spin(&sched_lock);
1573 spinlock_exit();
1574 PCPU_SET(switchtime, cpu_ticks());
1575 PCPU_SET(switchticks, ticks);
1576 } else {
1577 lock_profile_release_lock(&sched_lock.lock_object);
1578 MPASS(td->td_lock == &sched_lock);
1579 }
1580 mtx_assert(&sched_lock, MA_OWNED);
1581 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
1582 cpu_throw(td, choosethread()); /* doesn't return */
1583 }
1584
1585 void
1586 sched_fork_exit(struct thread *td)
1587 {
1588
1589 /*
1590 * Finish setting up thread glue so that it begins execution in a
1591 * non-nested critical section with sched_lock held but not recursed.
1592 */
1593 td->td_oncpu = PCPU_GET(cpuid);
1594 sched_lock.mtx_lock = (uintptr_t)td;
1595 lock_profile_obtain_lock_success(&sched_lock.lock_object,
1596 0, 0, __FILE__, __LINE__);
1597 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
1598 }
1599
1600 char *
1601 sched_tdname(struct thread *td)
1602 {
1603 #ifdef KTR
1604 struct td_sched *ts;
1605
1606 ts = td->td_sched;
1607 if (ts->ts_name[0] == '\0')
1608 snprintf(ts->ts_name, sizeof(ts->ts_name),
1609 "%s tid %d", td->td_name, td->td_tid);
1610 return (ts->ts_name);
1611 #else
1612 return (td->td_name);
1613 #endif
1614 }
1615
1616 void
1617 sched_affinity(struct thread *td)
1618 {
1619 #ifdef SMP
1620 struct td_sched *ts;
1621 int cpu;
1622
1623 THREAD_LOCK_ASSERT(td, MA_OWNED);
1624
1625 /*
1626 * Set the TSF_AFFINITY flag if there is at least one CPU this
1627 * thread can't run on.
1628 */
1629 ts = td->td_sched;
1630 ts->ts_flags &= ~TSF_AFFINITY;
1631 CPU_FOREACH(cpu) {
1632 if (!THREAD_CAN_SCHED(td, cpu)) {
1633 ts->ts_flags |= TSF_AFFINITY;
1634 break;
1635 }
1636 }
1637
1638 /*
1639 * If this thread can run on all CPUs, nothing else to do.
1640 */
1641 if (!(ts->ts_flags & TSF_AFFINITY))
1642 return;
1643
1644 /* Pinned threads and bound threads should be left alone. */
1645 if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
1646 return;
1647
1648 switch (td->td_state) {
1649 case TDS_RUNQ:
1650 /*
1651 * If we are on a per-CPU runqueue that is in the set,
1652 * then nothing needs to be done.
1653 */
1654 if (ts->ts_runq != &runq &&
1655 THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
1656 return;
1657
1658 /* Put this thread on a valid per-CPU runqueue. */
1659 sched_rem(td);
1660 sched_add(td, SRQ_BORING);
1661 break;
1662 case TDS_RUNNING:
1663 /*
1664 * See if our current CPU is in the set. If not, force a
1665 * context switch.
1666 */
1667 if (THREAD_CAN_SCHED(td, td->td_oncpu))
1668 return;
1669
1670 td->td_flags |= TDF_NEEDRESCHED;
1671 if (td != curthread)
1672 ipi_cpu(cpu, IPI_AST);
1673 break;
1674 default:
1675 break;
1676 }
1677 #endif
1678 }
Cache object: a9167d8d93f133e9da3fb271a569cf12
|