1 /*-
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_sched.h"
32
33 #ifndef KERN_SWITCH_INCLUDE
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kdb.h>
37 #include <sys/kernel.h>
38 #include <sys/ktr.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/queue.h>
43 #include <sys/sched.h>
44 #else /* KERN_SWITCH_INCLUDE */
45 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
46 #include <sys/smp.h>
47 #endif
48 #if defined(SMP) && defined(SCHED_4BSD)
49 #include <sys/sysctl.h>
50 #endif
51
52 #include <machine/cpu.h>
53
54 /* Uncomment this to enable logging of critical_enter/exit. */
55 #if 0
56 #define KTR_CRITICAL KTR_SCHED
57 #else
58 #define KTR_CRITICAL 0
59 #endif
60
61 #ifdef FULL_PREEMPTION
62 #ifndef PREEMPTION
63 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
64 #endif
65 #endif
66
67 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
68
69 /*
70 * kern.sched.preemption allows user space to determine if preemption support
71 * is compiled in or not. It is not currently a boot or runtime flag that
72 * can be changed.
73 */
74 #ifdef PREEMPTION
75 static int kern_sched_preemption = 1;
76 #else
77 static int kern_sched_preemption = 0;
78 #endif
79 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
80 &kern_sched_preemption, 0, "Kernel preemption enabled");
81
82 #ifdef SCHED_STATS
83 long switch_preempt;
84 long switch_owepreempt;
85 long switch_turnstile;
86 long switch_sleepq;
87 long switch_sleepqtimo;
88 long switch_relinquish;
89 long switch_needresched;
90 static SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
91 SYSCTL_INT(_kern_sched_stats, OID_AUTO, preempt, CTLFLAG_RD, &switch_preempt, 0, "");
92 SYSCTL_INT(_kern_sched_stats, OID_AUTO, owepreempt, CTLFLAG_RD, &switch_owepreempt, 0, "");
93 SYSCTL_INT(_kern_sched_stats, OID_AUTO, turnstile, CTLFLAG_RD, &switch_turnstile, 0, "");
94 SYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepq, CTLFLAG_RD, &switch_sleepq, 0, "");
95 SYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepqtimo, CTLFLAG_RD, &switch_sleepqtimo, 0, "");
96 SYSCTL_INT(_kern_sched_stats, OID_AUTO, relinquish, CTLFLAG_RD, &switch_relinquish, 0, "");
97 SYSCTL_INT(_kern_sched_stats, OID_AUTO, needresched, CTLFLAG_RD, &switch_needresched, 0, "");
98 static int
99 sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
100 {
101 int error;
102 int val;
103
104 val = 0;
105 error = sysctl_handle_int(oidp, &val, 0, req);
106 if (error != 0 || req->newptr == NULL)
107 return (error);
108 if (val == 0)
109 return (0);
110 switch_preempt = 0;
111 switch_owepreempt = 0;
112 switch_turnstile = 0;
113 switch_sleepq = 0;
114 switch_sleepqtimo = 0;
115 switch_relinquish = 0;
116 switch_needresched = 0;
117
118 return (0);
119 }
120
121 SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
122 0, sysctl_stats_reset, "I", "Reset scheduler statistics");
123 #endif
124
125 /************************************************************************
126 * Functions that manipulate runnability from a thread perspective. *
127 ************************************************************************/
128 /*
129 * Select the thread that will be run next.
130 */
131 struct thread *
132 choosethread(void)
133 {
134 struct thread *td;
135
136 retry:
137 td = sched_choose();
138
139 /*
140 * If we are in panic, only allow system threads,
141 * plus the one we are running in, to be run.
142 */
143 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
144 (td->td_flags & TDF_INPANIC) == 0)) {
145 /* note that it is no longer on the run queue */
146 TD_SET_CAN_RUN(td);
147 goto retry;
148 }
149
150 TD_SET_RUNNING(td);
151 return (td);
152 }
153
154 /*
155 * Kernel thread preemption implementation. Critical sections mark
156 * regions of code in which preemptions are not allowed.
157 */
158 void
159 critical_enter(void)
160 {
161 struct thread *td;
162
163 td = curthread;
164 td->td_critnest++;
165 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
166 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
167 }
168
169 void
170 critical_exit(void)
171 {
172 struct thread *td;
173
174 td = curthread;
175 KASSERT(td->td_critnest != 0,
176 ("critical_exit: td_critnest == 0"));
177
178 if (td->td_critnest == 1) {
179 td->td_critnest = 0;
180 if (td->td_owepreempt) {
181 td->td_critnest = 1;
182 thread_lock(td);
183 td->td_critnest--;
184 SCHED_STAT_INC(switch_owepreempt);
185 mi_switch(SW_INVOL|SW_PREEMPT, NULL);
186 thread_unlock(td);
187 }
188 } else
189 td->td_critnest--;
190
191 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
192 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
193 }
194
195 /*
196 * This function is called when a thread is about to be put on run queue
197 * because it has been made runnable or its priority has been adjusted. It
198 * determines if the new thread should be immediately preempted to. If so,
199 * it switches to it and eventually returns true. If not, it returns false
200 * so that the caller may place the thread on an appropriate run queue.
201 */
202 int
203 maybe_preempt(struct thread *td)
204 {
205 #ifdef PREEMPTION
206 struct thread *ctd;
207 int cpri, pri;
208 #endif
209
210 #ifdef PREEMPTION
211 /*
212 * The new thread should not preempt the current thread if any of the
213 * following conditions are true:
214 *
215 * - The kernel is in the throes of crashing (panicstr).
216 * - The current thread has a higher (numerically lower) or
217 * equivalent priority. Note that this prevents curthread from
218 * trying to preempt to itself.
219 * - It is too early in the boot for context switches (cold is set).
220 * - The current thread has an inhibitor set or is in the process of
221 * exiting. In this case, the current thread is about to switch
222 * out anyways, so there's no point in preempting. If we did,
223 * the current thread would not be properly resumed as well, so
224 * just avoid that whole landmine.
225 * - If the new thread's priority is not a realtime priority and
226 * the current thread's priority is not an idle priority and
227 * FULL_PREEMPTION is disabled.
228 *
229 * If all of these conditions are false, but the current thread is in
230 * a nested critical section, then we have to defer the preemption
231 * until we exit the critical section. Otherwise, switch immediately
232 * to the new thread.
233 */
234 ctd = curthread;
235 THREAD_LOCK_ASSERT(td, MA_OWNED);
236 KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
237 ("thread has no (or wrong) sched-private part."));
238 KASSERT((td->td_inhibitors == 0),
239 ("maybe_preempt: trying to run inhibited thread"));
240 pri = td->td_priority;
241 cpri = ctd->td_priority;
242 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
243 TD_IS_INHIBITED(ctd))
244 return (0);
245 #ifndef FULL_PREEMPTION
246 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
247 return (0);
248 #endif
249
250 if (ctd->td_critnest > 1) {
251 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
252 ctd->td_critnest);
253 ctd->td_owepreempt = 1;
254 return (0);
255 }
256 /*
257 * Thread is runnable but not yet put on system run queue.
258 */
259 MPASS(ctd->td_lock == td->td_lock);
260 MPASS(TD_ON_RUNQ(td));
261 TD_SET_RUNNING(td);
262 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
263 td->td_proc->p_pid, td->td_proc->p_comm);
264 SCHED_STAT_INC(switch_preempt);
265 mi_switch(SW_INVOL|SW_PREEMPT, td);
266 /*
267 * td's lock pointer may have changed. We have to return with it
268 * locked.
269 */
270 spinlock_enter();
271 thread_unlock(ctd);
272 thread_lock(td);
273 spinlock_exit();
274 return (1);
275 #else
276 return (0);
277 #endif
278 }
279
280 #if 0
281 #ifndef PREEMPTION
282 /* XXX: There should be a non-static version of this. */
283 static void
284 printf_caddr_t(void *data)
285 {
286 printf("%s", (char *)data);
287 }
288 static char preempt_warning[] =
289 "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
290 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
291 preempt_warning);
292 #endif
293 #endif
294
295 /************************************************************************
296 * SYSTEM RUN QUEUE manipulations and tests *
297 ************************************************************************/
298 /*
299 * Initialize a run structure.
300 */
301 void
302 runq_init(struct runq *rq)
303 {
304 int i;
305
306 bzero(rq, sizeof *rq);
307 for (i = 0; i < RQ_NQS; i++)
308 TAILQ_INIT(&rq->rq_queues[i]);
309 }
310
311 /*
312 * Clear the status bit of the queue corresponding to priority level pri,
313 * indicating that it is empty.
314 */
315 static __inline void
316 runq_clrbit(struct runq *rq, int pri)
317 {
318 struct rqbits *rqb;
319
320 rqb = &rq->rq_status;
321 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
322 rqb->rqb_bits[RQB_WORD(pri)],
323 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
324 RQB_BIT(pri), RQB_WORD(pri));
325 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
326 }
327
328 /*
329 * Find the index of the first non-empty run queue. This is done by
330 * scanning the status bits, a set bit indicates a non-empty queue.
331 */
332 static __inline int
333 runq_findbit(struct runq *rq)
334 {
335 struct rqbits *rqb;
336 int pri;
337 int i;
338
339 rqb = &rq->rq_status;
340 for (i = 0; i < RQB_LEN; i++)
341 if (rqb->rqb_bits[i]) {
342 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
343 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
344 rqb->rqb_bits[i], i, pri);
345 return (pri);
346 }
347
348 return (-1);
349 }
350
351 static __inline int
352 runq_findbit_from(struct runq *rq, u_char pri)
353 {
354 struct rqbits *rqb;
355 rqb_word_t mask;
356 int i;
357
358 /*
359 * Set the mask for the first word so we ignore priorities before 'pri'.
360 */
361 mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
362 rqb = &rq->rq_status;
363 again:
364 for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
365 mask = rqb->rqb_bits[i] & mask;
366 if (mask == 0)
367 continue;
368 pri = RQB_FFS(mask) + (i << RQB_L2BPW);
369 CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
370 mask, i, pri);
371 return (pri);
372 }
373 if (pri == 0)
374 return (-1);
375 /*
376 * Wrap back around to the beginning of the list just once so we
377 * scan the whole thing.
378 */
379 pri = 0;
380 goto again;
381 }
382
383 /*
384 * Set the status bit of the queue corresponding to priority level pri,
385 * indicating that it is non-empty.
386 */
387 static __inline void
388 runq_setbit(struct runq *rq, int pri)
389 {
390 struct rqbits *rqb;
391
392 rqb = &rq->rq_status;
393 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
394 rqb->rqb_bits[RQB_WORD(pri)],
395 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
396 RQB_BIT(pri), RQB_WORD(pri));
397 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
398 }
399
400 /*
401 * Add the thread to the queue specified by its priority, and set the
402 * corresponding status bit.
403 */
404 void
405 runq_add(struct runq *rq, struct td_sched *ts, int flags)
406 {
407 struct rqhead *rqh;
408 int pri;
409
410 pri = ts->ts_thread->td_priority / RQ_PPQ;
411 ts->ts_rqindex = pri;
412 runq_setbit(rq, pri);
413 rqh = &rq->rq_queues[pri];
414 CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
415 ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
416 if (flags & SRQ_PREEMPTED) {
417 TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
418 } else {
419 TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
420 }
421 }
422
423 void
424 runq_add_pri(struct runq *rq, struct td_sched *ts, u_char pri, int flags)
425 {
426 struct rqhead *rqh;
427
428 KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
429 ts->ts_rqindex = pri;
430 runq_setbit(rq, pri);
431 rqh = &rq->rq_queues[pri];
432 CTR5(KTR_RUNQ, "runq_add_pri: td=%p ke=%p pri=%d idx=%d rqh=%p",
433 ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
434 if (flags & SRQ_PREEMPTED) {
435 TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
436 } else {
437 TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
438 }
439 }
440 /*
441 * Return true if there are runnable processes of any priority on the run
442 * queue, false otherwise. Has no side effects, does not modify the run
443 * queue structure.
444 */
445 int
446 runq_check(struct runq *rq)
447 {
448 struct rqbits *rqb;
449 int i;
450
451 rqb = &rq->rq_status;
452 for (i = 0; i < RQB_LEN; i++)
453 if (rqb->rqb_bits[i]) {
454 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
455 rqb->rqb_bits[i], i);
456 return (1);
457 }
458 CTR0(KTR_RUNQ, "runq_check: empty");
459
460 return (0);
461 }
462
463 #if defined(SMP) && defined(SCHED_4BSD)
464 int runq_fuzz = 1;
465 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
466 #endif
467
468 /*
469 * Find the highest priority process on the run queue.
470 */
471 struct td_sched *
472 runq_choose(struct runq *rq)
473 {
474 struct rqhead *rqh;
475 struct td_sched *ts;
476 int pri;
477
478 while ((pri = runq_findbit(rq)) != -1) {
479 rqh = &rq->rq_queues[pri];
480 #if defined(SMP) && defined(SCHED_4BSD)
481 /* fuzz == 1 is normal.. 0 or less are ignored */
482 if (runq_fuzz > 1) {
483 /*
484 * In the first couple of entries, check if
485 * there is one for our CPU as a preference.
486 */
487 int count = runq_fuzz;
488 int cpu = PCPU_GET(cpuid);
489 struct td_sched *ts2;
490 ts2 = ts = TAILQ_FIRST(rqh);
491
492 while (count-- && ts2) {
493 if (ts2->ts_thread->td_lastcpu == cpu) {
494 ts = ts2;
495 break;
496 }
497 ts2 = TAILQ_NEXT(ts2, ts_procq);
498 }
499 } else
500 #endif
501 ts = TAILQ_FIRST(rqh);
502 KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
503 CTR3(KTR_RUNQ,
504 "runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
505 return (ts);
506 }
507 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
508
509 return (NULL);
510 }
511
512 struct td_sched *
513 runq_choose_from(struct runq *rq, u_char idx)
514 {
515 struct rqhead *rqh;
516 struct td_sched *ts;
517 int pri;
518
519 if ((pri = runq_findbit_from(rq, idx)) != -1) {
520 rqh = &rq->rq_queues[pri];
521 ts = TAILQ_FIRST(rqh);
522 KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
523 CTR4(KTR_RUNQ,
524 "runq_choose_from: pri=%d kse=%p idx=%d rqh=%p",
525 pri, ts, ts->ts_rqindex, rqh);
526 return (ts);
527 }
528 CTR1(KTR_RUNQ, "runq_choose_from: idleproc pri=%d", pri);
529
530 return (NULL);
531 }
532 /*
533 * Remove the thread from the queue specified by its priority, and clear the
534 * corresponding status bit if the queue becomes empty.
535 * Caller must set state afterwards.
536 */
537 void
538 runq_remove(struct runq *rq, struct td_sched *ts)
539 {
540
541 runq_remove_idx(rq, ts, NULL);
542 }
543
544 void
545 runq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx)
546 {
547 struct rqhead *rqh;
548 u_char pri;
549
550 KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
551 ("runq_remove_idx: thread swapped out"));
552 pri = ts->ts_rqindex;
553 KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
554 rqh = &rq->rq_queues[pri];
555 CTR5(KTR_RUNQ, "runq_remove_idx: td=%p, ts=%p pri=%d %d rqh=%p",
556 ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
557 {
558 struct td_sched *nts;
559
560 TAILQ_FOREACH(nts, rqh, ts_procq)
561 if (nts == ts)
562 break;
563 if (ts != nts)
564 panic("runq_remove_idx: ts %p not on rqindex %d",
565 ts, pri);
566 }
567 TAILQ_REMOVE(rqh, ts, ts_procq);
568 if (TAILQ_EMPTY(rqh)) {
569 CTR0(KTR_RUNQ, "runq_remove_idx: empty");
570 runq_clrbit(rq, pri);
571 if (idx != NULL && *idx == pri)
572 *idx = (pri + 1) % RQ_NQS;
573 }
574 }
575
576 /****** functions that are temporarily here ***********/
577 #include <vm/uma.h>
578
579 /*
580 * Allocate scheduler specific per-process resources.
581 * The thread and proc have already been linked in.
582 *
583 * Called from:
584 * proc_init() (UMA init method)
585 */
586 void
587 sched_newproc(struct proc *p, struct thread *td)
588 {
589 }
590
591 /*
592 * thread is being either created or recycled.
593 * Fix up the per-scheduler resources associated with it.
594 * Called from:
595 * sched_fork_thread()
596 * thread_dtor() (*may go away)
597 * thread_init() (*may go away)
598 */
599 void
600 sched_newthread(struct thread *td)
601 {
602 struct td_sched *ts;
603
604 ts = (struct td_sched *) (td + 1);
605 bzero(ts, sizeof(*ts));
606 td->td_sched = ts;
607 ts->ts_thread = td;
608 }
609
610 #endif /* KERN_SWITCH_INCLUDE */
Cache object: 2fa69bbb791819a1153487818ae11a22
|