FreeBSD/Linux Kernel Cross Reference
sys/kern/sched_prim.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: sched_prim.c,v $
29 * Revision 2.24 93/05/15 18:54:09 mrt
30 * machparam.h -> machspl.h
31 *
32 * Revision 2.23 93/01/14 17:36:13 danner
33 * Added assert_wait argument casts.
34 * [93/01/12 danner]
35 *
36 * Added ANSI function prototypes.
37 * [92/12/28 dbg]
38 *
39 * Fixed locking in do_thread_scan: thread lock must be taken
40 * before runq lock. Removed obsolete swap states from
41 * documentation of state machine.
42 * [92/10/29 dbg]
43 * 64bit cleanup. Proper spl typing.
44 * NOTE NOTE NOTE: events must be 'vm_offset_t' and dont you forget it.
45 * [92/12/01 af]
46 *
47 * Added function prototypes.
48 * [92/11/23 dbg]
49 *
50 * Fixed locking in do_thread_scan: thread lock must be taken
51 * before runq lock. Removed obsolete swap states from
52 * documentation of state machine.
53 * [92/10/29 dbg]
54 *
55 * Revision 2.22 92/08/05 18:05:25 jfriedl
56 * Added call to machine_idle in idle loop if power_save option is
57 * selected.
58 * [92/08/05 mrt]
59 *
60 * Revision 2.21 92/07/20 13:32:37 cmaeda
61 * Fast tas support: Check if current thread is in ras during
62 * thread_block.
63 * [92/05/11 14:34:42 cmaeda]
64 *
65 * Revision 2.20 92/05/21 17:15:31 jfriedl
66 * Added void to fcns that yet needed it.
67 * Added init to 'restart_needed' in do_thread_scan().
68 * [92/05/16 jfriedl]
69 *
70 * Revision 2.19 92/02/19 16:06:35 elf
71 * Added argument to compute_priority. We dont always want to do
72 * a reschedule right away.
73 * [92/01/19 rwd]
74 *
75 * Revision 2.18 91/09/04 11:28:26 jsb
76 * Add a temporary hack to thread_dispatch for i860 support:
77 * don't panic if thread->state is TH_WAIT.
78 * [91/09/04 09:24:43 jsb]
79 *
80 * Revision 2.17 91/08/24 11:59:46 af
81 * Final form of do_priority_computation was missing a pair of parenthesis.
82 * [91/07/19 danner]
83 *
84 * Revision 2.16.2.1 91/08/19 13:45:35 danner
85 * Final form of do_priority_computation was missing a pair of parenthesis.
86 * [91/07/19 danner]
87 *
88 * Revision 2.16 91/07/31 17:47:27 dbg
89 * When re-invoking the running thread (thread_block, thread_run):
90 * . Mark the thread interruptible.
91 * . If there is a continuation, call it instead of returning.
92 * [91/07/26 dbg]
93 *
94 * Fix timeout race.
95 * [91/05/23 dbg]
96 *
97 * Revised scheduling state machine.
98 * [91/05/22 dbg]
99 *
100 * Revision 2.15 91/05/18 14:32:58 rpd
101 * Added check_simple_locks to thread_block and thread_run.
102 * [91/05/02 rpd]
103 * Changed recompute_priorities to use a private timer.
104 * Changed thread_timeout_setup to initialize depress_timer.
105 * [91/03/31 rpd]
106 *
107 * Updated thread_invoke to check stack_privilege.
108 * [91/03/30 rpd]
109 *
110 * Revision 2.14 91/05/14 16:46:16 mrt
111 * Correcting copyright
112 *
113 * Revision 2.13 91/05/08 12:48:33 dbg
114 * Distinguish processor sets from run queues in choose_pset_thread!
115 * Remove (long dead) 'someday' code.
116 * [91/04/26 14:43:26 dbg]
117 *
118 * Revision 2.12 91/03/16 14:51:09 rpd
119 * Added idle_thread_continue, sched_thread_continue.
120 * [91/01/20 rpd]
121 *
122 * Allow swapped threads on the run queues.
123 * Added thread_invoke, thread_select.
124 * Reorganized thread_block, thread_run.
125 * Changed the AST interface; idle_thread checks for ASTs now.
126 * [91/01/17 rpd]
127 *
128 * Revision 2.11 91/02/05 17:28:57 mrt
129 * Changed to new Mach copyright
130 * [91/02/01 16:16:51 mrt]
131 *
132 * Revision 2.10 91/01/08 15:16:38 rpd
133 * Added KEEP_STACKS support.
134 * [91/01/06 rpd]
135 * Added thread_continue_calls counter.
136 * [91/01/03 22:07:43 rpd]
137 *
138 * Added continuation argument to thread_run.
139 * [90/12/11 rpd]
140 * Added continuation argument to thread_block/thread_continue.
141 * Removed FAST_CSW conditionals.
142 * [90/12/08 rpd]
143 *
144 * Removed thread_swap_tick.
145 * [90/11/11 rpd]
146 *
147 * Revision 2.9 90/09/09 14:32:36 rpd
148 * Removed do_pset_scan call from sched_thread.
149 * [90/08/30 rpd]
150 *
151 * Revision 2.8 90/08/07 22:22:54 rpd
152 * Fixed casting of a volatile comparison: the non-volatile should be casted or else.
153 * Removed silly set_leds() mips thingy.
154 * [90/08/07 15:56:08 af]
155 *
156 * Revision 2.7 90/08/07 17:58:55 rpd
157 * Removed sched_debug; converted set_pri, update_priority,
158 * and compute_my_priority to real functions.
159 * Picked up fix for thread_block, to check for processor set mismatch.
160 * Record last processor info on all multiprocessors.
161 * [90/08/07 rpd]
162 *
163 * Revision 2.6 90/06/02 14:55:49 rpd
164 * Updated to new scheduling technology.
165 * [90/03/26 22:16:03 rpd]
166 *
167 * Revision 2.5 90/01/11 11:43:51 dbg
168 * Check for cpu shutting down on exit from idle loop - next_thread
169 * will be THREAD_NULL in this case.
170 * [90/01/03 dbg]
171 *
172 * Make sure cpu is marked active on all exits from idle loop.
173 * [89/12/11 dbg]
174 *
175 * Removed more lint.
176 * [89/12/05 dbg]
177 *
178 * DLB's scheduling changes in thread_block don't work if partially
179 * applied; a thread can run in two places at once. Revert to old
180 * code, pending a complete merge.
181 * [89/12/04 dbg]
182 *
183 * Revision 2.4 89/11/29 14:09:11 af
184 * On Mips, delay setting of active_threads inside load_context,
185 * or we might take exceptions in an embarassing state.
186 * [89/11/03 17:00:04 af]
187 *
188 * Long overdue fix: the pointers that the idle thread uses to check
189 * for someone to become runnable are now "volatile". This prevents
190 * smart compilers from overoptimizing (e.g. Mips).
191 *
192 * While looking for someone to run in the idle_thread(), rotate
193 * console lights on Mips to show we're alive [useful when machine
194 * becomes catatonic].
195 * [89/10/28 af]
196 *
197 * Revision 2.3 89/09/08 11:26:22 dbg
198 * Add extra thread state cases to thread_switch, since it may now
199 * be called by a thread about to block.
200 * [89/08/22 dbg]
201 *
202 * 19-Dec-88 David Golub (dbg) at Carnegie-Mellon University
203 * Changes for MACH_KERNEL:
204 * . Import timing definitions from sys/time_out.h.
205 * . Split uses of PMAP_ACTIVATE and PMAP_DEACTIVATE into
206 * separate _USER and _KERNEL macros.
207 *
208 * Revision 2.8 88/12/19 02:46:33 mwyoung
209 * Corrected include file references. Use <kern/macro_help.h>.
210 * [88/11/22 mwyoung]
211 *
212 * In thread_wakeup_with_result(), only lock threads that have the
213 * appropriate wait_event. Both the wait_event and the hash bucket
214 * links are only modified with both the thread *and* hash bucket
215 * locked, so it should be safe to read them with either locked.
216 *
217 * Documented the wait event mechanism.
218 *
219 * Summarized ancient history.
220 * [88/11/21 mwyoung]
221 *
222 * Revision 2.7 88/08/25 18:18:00 mwyoung
223 * Corrected include file references.
224 * [88/08/22 mwyoung]
225 *
226 * Avoid unsigned computation in wait_hash.
227 * [88/08/16 00:29:51 mwyoung]
228 *
229 * Add priority check to thread_check; make queue index unsigned,
230 * so that checking works correctly at all.
231 * [88/08/11 18:47:55 mwyoung]
232 *
233 * 11-Aug-88 David Black (dlb) at Carnegie-Mellon University
234 * Support ast mechanism for threads. Thread from local_runq gets
235 * minimum quantum to start.
236 *
237 * 9-Aug-88 David Black (dlb) at Carnegie-Mellon University
238 * Moved logic to detect and clear next_thread[] dispatch to
239 * idle_thread() from thread_block().
240 * Maintain first_quantum field in thread instead of runrun.
241 * Changed preempt logic in thread_setrun.
242 * Avoid context switch if current thread is still runnable and
243 * processor would go idle as a result.
244 * Added scanner to unstick stuck threads.
245 *
246 * Revision 2.6 88/08/06 18:25:03 rpd
247 * Eliminated use of kern/mach_ipc_defs.h.
248 *
249 * 10-Jul-88 David Golub (dbg) at Carnegie-Mellon University
250 * Check for negative priority (BUG) in thread_setrun.
251 *
252 * Revision 2.5 88/07/20 16:39:35 rpd
253 * Changed "NCPUS > 1" conditionals that were eliminating dead
254 * simple locking code to MACH_SLOCKS conditionals.
255 *
256 * 7-Jul-88 David Golub (dbg) at Carnegie-Mellon University
257 * Split uses of PMAP_ACTIVATE and PMAP_DEACTIVATE into separate
258 * _USER and _KERNEL macros.
259 *
260 * 15-Jun-88 Michael Young (mwyoung) at Carnegie-Mellon University
261 * Removed excessive thread_unlock() occurrences in thread_wakeup.
262 * Problem discovered and solved by Richard Draves.
263 *
264 * Historical summary:
265 *
266 * Redo priority recomputation. [dlb, 29 feb 88]
267 * New accurate timing. [dlb, 19 feb 88]
268 * Simplified choose_thread and thread_block. [dlb, 18 dec 87]
269 * Add machine-dependent hooks in idle loop. [dbg, 24 nov 87]
270 * Quantum scheduling changes. [dlb, 14 oct 87]
271 * Replaced scheduling logic with a state machine, and included
272 * timeout handling. [dbg, 05 oct 87]
273 * Deactivate kernel pmap in idle_thread. [dlb, 23 sep 87]
274 * Favor local_runq in choose_thread. [dlb, 23 sep 87]
275 * Hacks for master processor handling. [rvb, 12 sep 87]
276 * Improved idle cpu and idle threads logic. [dlb, 24 aug 87]
277 * Priority computation improvements. [dlb, 26 jun 87]
278 * Quantum-based scheduling. [avie, dlb, apr 87]
279 * Improved thread swapper. [avie, 13 mar 87]
280 * Lots of bug fixes. [dbg, mar 87]
281 * Accurate timing support. [dlb, 27 feb 87]
282 * Reductions in scheduler lock contention. [dlb, 18 feb 87]
283 * Revise thread suspension mechanism. [avie, 17 feb 87]
284 * Real thread handling [avie, 31 jan 87]
285 * Direct idle cpu dispatching. [dlb, 19 jan 87]
286 * Initial processor binding. [avie, 30 sep 86]
287 * Initial sleep/wakeup. [dbg, 12 jun 86]
288 * Created. [avie, 08 apr 86]
289 */
290 /*
291 * File: sched_prim.c
292 * Author: Avadis Tevanian, Jr.
293 * Date: 1986
294 *
295 * Scheduling primitives
296 *
297 */
298
299 #include <cpus.h>
300 #include <simple_clock.h>
301 #include <mach_fixpri.h>
302 #include <mach_host.h>
303 #include <hw_footprint.h>
304 #include <fast_tas.h>
305 #include <power_save.h>
306
307 #include <mach/machine.h>
308 #include <kern/ast.h>
309 #include <kern/counters.h>
310 #include <kern/cpu_number.h>
311 #include <kern/lock.h>
312 #include <kern/macro_help.h>
313 #include <kern/processor.h>
314 #include <kern/queue.h>
315 #include <kern/sched.h>
316 #include <kern/sched_prim.h>
317 #include <kern/syscall_subr.h>
318 #include <kern/thread.h>
319 #include <kern/thread_swap.h>
320 #include <kern/time_out.h>
321 #include <vm/pmap.h>
322 #include <vm/vm_kern.h>
323 #include <vm/vm_map.h>
324 #include <machine/machspl.h> /* For def'n of splsched() */
325
326 #if MACH_FIXPRI
327 #include <mach/policy.h>
328 #endif /* MACH_FIXPRI */
329
330
331 extern int hz;
332
333 int min_quantum; /* defines max context switch rate */
334
335 unsigned sched_tick;
336
337 #if SIMPLE_CLOCK
338 int sched_usec;
339 #endif /* SIMPLE_CLOCK */
340
341 thread_t sched_thread_id;
342
343 void recompute_priorities(void); /* forward */
344 void update_priority(thread_t);
345 void set_pri(thread_t, int, boolean_t);
346 void do_thread_scan(void);
347
348 thread_t choose_pset_thread();
349
350 timer_elt_data_t recompute_priorities_timer;
351
352 #if DEBUG
353 void checkrq(run_queue_t, char *);
354 void thread_check(thread_t, run_queue_t);
355 #endif
356
357 /*
358 * State machine
359 *
360 * states are combinations of:
361 * R running
362 * W waiting (or on wait queue)
363 * S suspended (or will suspend)
364 * N non-interruptible
365 *
366 * init action
367 * assert_wait thread_block clear_wait suspend resume
368 *
369 * R RW, RWN R; setrun - RS -
370 * RS RWS, RWNS S; wake_active - - R
371 * RN RWN RN; setrun - RNS -
372 * RNS RWNS RNS; setrun - - RN
373 *
374 * RW W R RWS -
375 * RWN WN RN RWNS -
376 * RWS WS; wake_active RS - RW
377 * RWNS WNS RNS - RWN
378 *
379 * W R; setrun WS -
380 * WN RN; setrun WNS -
381 * WNS RNS; setrun - WN
382 *
383 * S - - R
384 * WS S - W
385 *
386 */
387
388 /*
389 * Waiting protocols and implementation:
390 *
391 * Each thread may be waiting for exactly one event; this event
392 * is set using assert_wait(). That thread may be awakened either
393 * by performing a thread_wakeup_prim() on its event,
394 * or by directly waking that thread up with clear_wait().
395 *
396 * The implementation of wait events uses a hash table. Each
397 * bucket is queue of threads having the same hash function
398 * value; the chain for the queue (linked list) is the run queue
399 * field. [It is not possible to be waiting and runnable at the
400 * same time.]
401 *
402 * Locks on both the thread and on the hash buckets govern the
403 * wait event field and the queue chain field. Because wakeup
404 * operations only have the event as an argument, the event hash
405 * bucket must be locked before any thread.
406 *
407 * Scheduling operations may also occur at interrupt level; therefore,
408 * interrupts below splsched() must be prevented when holding
409 * thread or hash bucket locks.
410 *
411 * The wait event hash table declarations are as follows:
412 */
413
414 #define NUMQUEUES 59
415
416 queue_head_t wait_queue[NUMQUEUES];
417 decl_simple_lock_data(, wait_lock[NUMQUEUES])
418
419 /* NOTE: we want a small positive integer out of this */
420 #define wait_hash(event) \
421 ((((int)(event) < 0) ? ~(int)(event) : (int)(event)) % NUMQUEUES)
422
423 void wait_queue_init(void)
424 {
425 register int i;
426
427 for (i = 0; i < NUMQUEUES; i++) {
428 queue_init(&wait_queue[i]);
429 simple_lock_init(&wait_lock[i]);
430 }
431 }
432
433 void sched_init(void)
434 {
435 recompute_priorities_timer.fcn = (int (*)())recompute_priorities;
436 recompute_priorities_timer.param = (char *)0;
437
438 min_quantum = hz / 10; /* context switch 10 times/second */
439 wait_queue_init();
440 pset_sys_bootstrap(); /* initialize processer mgmt. */
441 queue_init(&action_queue);
442 simple_lock_init(&action_lock);
443 sched_tick = 0;
444 #if SIMPLE_CLOCK
445 sched_usec = 0;
446 #endif /* SIMPLE_CLOCK */
447 ast_init();
448 }
449
450 /*
451 * Thread timeout routine, called when timer expires.
452 * Called at splsoftclock.
453 */
454 void thread_timeout(
455 thread_t thread)
456 {
457 assert(thread->timer.set == TELT_UNSET);
458
459 clear_wait(thread, THREAD_TIMED_OUT, FALSE);
460 }
461
462 /*
463 * thread_set_timeout:
464 *
465 * Set a timer for the current thread, if the thread
466 * is ready to wait. Must be called between assert_wait()
467 * and thread_block().
468 */
469
470 void thread_set_timeout(
471 int t) /* timeout interval in ticks */
472 {
473 register thread_t thread = current_thread();
474 register spl_t s;
475
476 s = splsched();
477 thread_lock(thread);
478 if ((thread->state & TH_WAIT) != 0) {
479 set_timeout(&thread->timer, t);
480 }
481 thread_unlock(thread);
482 splx(s);
483 }
484
485 /*
486 * Set up thread timeout element when thread is created.
487 */
488 void thread_timeout_setup(
489 register thread_t thread)
490 {
491 thread->timer.fcn = (int (*)())thread_timeout;
492 thread->timer.param = (char *)thread;
493 thread->depress_timer.fcn = (int (*)())thread_depress_timeout;
494 thread->depress_timer.param = (char *)thread;
495 }
496
497 /*
498 * assert_wait:
499 *
500 * Assert that the current thread is about to go to
501 * sleep until the specified event occurs.
502 */
503 void assert_wait(
504 event_t event,
505 boolean_t interruptible)
506 {
507 register queue_t q;
508 register int index;
509 register thread_t thread;
510 #if MACH_SLOCKS
511 register simple_lock_t lock;
512 #endif /* MACH_SLOCKS */
513 spl_t s;
514
515 thread = current_thread();
516 if (thread->wait_event != 0) {
517 panic("assert_wait: already asserted event %#x\n",
518 thread->wait_event);
519 }
520 s = splsched();
521 if (event != 0) {
522 index = wait_hash(event);
523 q = &wait_queue[index];
524 #if MACH_SLOCKS
525 lock = &wait_lock[index];
526 #endif /* MACH_SLOCKS */
527 simple_lock(lock);
528 thread_lock(thread);
529 enqueue_tail(q, (queue_entry_t) thread);
530 thread->wait_event = event;
531 if (interruptible)
532 thread->state |= TH_WAIT;
533 else
534 thread->state |= TH_WAIT | TH_UNINT;
535 thread_unlock(thread);
536 simple_unlock(lock);
537 }
538 else {
539 thread_lock(thread);
540 if (interruptible)
541 thread->state |= TH_WAIT;
542 else
543 thread->state |= TH_WAIT | TH_UNINT;
544 thread_unlock(thread);
545 }
546 splx(s);
547 }
548
549 /*
550 * clear_wait:
551 *
552 * Clear the wait condition for the specified thread. Start the thread
553 * executing if that is appropriate.
554 *
555 * parameters:
556 * thread thread to awaken
557 * result Wakeup result the thread should see
558 * interrupt_only Don't wake up the thread if it isn't
559 * interruptible.
560 */
561 void clear_wait(
562 register thread_t thread,
563 int result,
564 boolean_t interrupt_only)
565 {
566 register int index;
567 register queue_t q;
568 #if MACH_SLOCKS
569 register simple_lock_t lock;
570 #endif /* MACH_SLOCKS */
571 register event_t event;
572 spl_t s;
573
574 s = splsched();
575 thread_lock(thread);
576 if (interrupt_only && (thread->state & TH_UNINT)) {
577 /*
578 * can`t interrupt thread
579 */
580 thread_unlock(thread);
581 splx(s);
582 return;
583 }
584
585 event = thread->wait_event;
586 if (event != 0) {
587 thread_unlock(thread);
588 index = wait_hash(event);
589 q = &wait_queue[index];
590 #if MACH_SLOCKS
591 lock = &wait_lock[index];
592 #endif /* MACH_SLOCKS */
593 simple_lock(lock);
594 /*
595 * If the thread is still waiting on that event,
596 * then remove it from the list. If it is waiting
597 * on a different event, or no event at all, then
598 * someone else did our job for us.
599 */
600 thread_lock(thread);
601 if (thread->wait_event == event) {
602 remqueue(q, (queue_entry_t)thread);
603 thread->wait_event = 0;
604 event = 0; /* cause to run below */
605 }
606 simple_unlock(lock);
607 }
608 if (event == 0) {
609 register int state = thread->state;
610
611 reset_timeout_check(&thread->timer);
612
613 switch (state & TH_SCHED_STATE) {
614 case TH_WAIT | TH_SUSP | TH_UNINT:
615 case TH_WAIT | TH_UNINT:
616 case TH_WAIT:
617 /*
618 * Sleeping and not suspendable - put
619 * on run queue.
620 */
621 thread->state = (state &~ TH_WAIT) | TH_RUN;
622 thread->wait_result = result;
623 thread_setrun(thread, TRUE);
624 break;
625
626 case TH_WAIT | TH_SUSP:
627 case TH_RUN | TH_WAIT:
628 case TH_RUN | TH_WAIT | TH_SUSP:
629 case TH_RUN | TH_WAIT | TH_UNINT:
630 case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
631 /*
632 * Either already running, or suspended.
633 */
634 thread->state = state &~ TH_WAIT;
635 thread->wait_result = result;
636 break;
637
638 default:
639 /*
640 * Not waiting.
641 */
642 break;
643 }
644 }
645 thread_unlock(thread);
646 splx(s);
647 }
648
649 /*
650 * thread_wakeup_prim:
651 *
652 * Common routine for thread_wakeup, thread_wakeup_with_result,
653 * and thread_wakeup_one.
654 *
655 */
656 void thread_wakeup_prim(
657 event_t event,
658 boolean_t one_thread,
659 int result)
660 {
661 register queue_t q;
662 register int index;
663 register thread_t thread, next_th;
664 #if MACH_SLOCKS
665 register simple_lock_t lock;
666 #endif /* MACH_SLOCKS */
667 spl_t s;
668 register int state;
669
670 index = wait_hash(event);
671 q = &wait_queue[index];
672 s = splsched();
673 #if MACH_SLOCKS
674 lock = &wait_lock[index];
675 #endif /* MACH_SLOCKS */
676 simple_lock(lock);
677 thread = (thread_t) queue_first(q);
678 while (!queue_end(q, (queue_entry_t)thread)) {
679 next_th = (thread_t) queue_next((queue_t) thread);
680
681 if (thread->wait_event == event) {
682 thread_lock(thread);
683 remqueue(q, (queue_entry_t) thread);
684 thread->wait_event = 0;
685 reset_timeout_check(&thread->timer);
686
687 state = thread->state;
688 switch (state & TH_SCHED_STATE) {
689
690 case TH_WAIT | TH_SUSP | TH_UNINT:
691 case TH_WAIT | TH_UNINT:
692 case TH_WAIT:
693 /*
694 * Sleeping and not suspendable - put
695 * on run queue.
696 */
697 thread->state = (state &~ TH_WAIT) | TH_RUN;
698 thread->wait_result = result;
699 thread_setrun(thread, TRUE);
700 break;
701
702 case TH_WAIT | TH_SUSP:
703 case TH_RUN | TH_WAIT:
704 case TH_RUN | TH_WAIT | TH_SUSP:
705 case TH_RUN | TH_WAIT | TH_UNINT:
706 case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
707 /*
708 * Either already running, or suspended.
709 */
710 thread->state = state &~ TH_WAIT;
711 thread->wait_result = result;
712 break;
713
714 default:
715 panic("thread_wakeup");
716 break;
717 }
718 thread_unlock(thread);
719 if (one_thread)
720 break;
721 }
722 thread = next_th;
723 }
724 simple_unlock(lock);
725 splx(s);
726 }
727
728 /*
729 * thread_sleep:
730 *
731 * Cause the current thread to wait until the specified event
732 * occurs. The specified lock is unlocked before releasing
733 * the cpu. (This is a convenient way to sleep without manually
734 * calling assert_wait).
735 */
736 void thread_sleep(
737 event_t event,
738 simple_lock_t lock,
739 boolean_t interruptible)
740 {
741 assert_wait(event, interruptible); /* assert event */
742 simple_unlock(lock); /* release the lock */
743 thread_block((void (*)()) 0); /* block ourselves */
744 }
745
746 /*
747 * thread_bind:
748 *
749 * Force a thread to execute on the specified processor.
750 * If the thread is currently executing, it may wait until its
751 * time slice is up before switching onto the specified processor.
752 *
753 * A processor of PROCESSOR_NULL causes the thread to be unbound.
754 * xxx - DO NOT export this to users.
755 */
756 void thread_bind(
757 register thread_t thread,
758 processor_t processor)
759 {
760 spl_t s;
761
762 s = splsched();
763 thread_lock(thread);
764 thread->bound_processor = processor;
765 thread_unlock(thread);
766 (void) splx(s);
767 }
768
769 /*
770 * Select a thread for this processor (the current processor) to run.
771 * May select the current thread.
772 * Assumes splsched.
773 */
774
775 thread_t thread_select(
776 register processor_t myprocessor)
777 {
778 register thread_t thread;
779
780 myprocessor->first_quantum = TRUE;
781 /*
782 * Check for obvious simple case; local runq is
783 * empty and global runq has entry at hint.
784 */
785 if (myprocessor->runq.count > 0) {
786 thread = choose_thread(myprocessor);
787 myprocessor->quantum = min_quantum;
788 }
789 else {
790 register processor_set_t pset;
791
792 #if MACH_HOST
793 pset = myprocessor->processor_set;
794 #else /* MACH_HOST */
795 pset = &default_pset;
796 #endif /* MACH_HOST */
797 simple_lock(&pset->runq.lock);
798 #if DEBUG
799 checkrq(&pset->runq, "thread_select");
800 #endif /* DEBUG */
801 if (pset->runq.count == 0) {
802 /*
803 * Nothing else runnable. Return if this
804 * thread is still runnable on this processor.
805 * Check for priority update if required.
806 */
807 thread = current_thread();
808 if ((thread->state == TH_RUN) &&
809 #if MACH_HOST
810 (thread->processor_set == pset) &&
811 #endif /* MACH_HOST */
812 ((thread->bound_processor == PROCESSOR_NULL) ||
813 (thread->bound_processor == myprocessor))) {
814
815 simple_unlock(&pset->runq.lock);
816 thread_lock(thread);
817 if (thread->sched_stamp != sched_tick)
818 update_priority(thread);
819 thread_unlock(thread);
820 }
821 else {
822 thread = choose_pset_thread(myprocessor, pset);
823 }
824 }
825 else {
826 register queue_t q;
827
828 /*
829 * If there is a thread at hint, grab it,
830 * else call choose_pset_thread.
831 */
832 q = pset->runq.runq + pset->runq.low;
833
834 if (queue_empty(q)) {
835 pset->runq.low++;
836 thread = choose_pset_thread(myprocessor, pset);
837 }
838 else {
839 thread = (thread_t) dequeue_head(q);
840 thread->runq = RUN_QUEUE_NULL;
841 pset->runq.count--;
842 #if MACH_FIXPRI
843 /*
844 * Cannot lazy evaluate pset->runq.low for
845 * fixed priority policy
846 */
847 if ((pset->runq.count > 0) &&
848 (pset->policies & POLICY_FIXEDPRI)) {
849 while (queue_empty(q)) {
850 pset->runq.low++;
851 q++;
852 }
853 }
854 #endif /* MACH_FIXPRI */
855 #if DEBUG
856 checkrq(&pset->runq, "thread_select: after");
857 #endif /* DEBUG */
858 simple_unlock(&pset->runq.lock);
859 }
860 }
861
862 #if MACH_FIXPRI
863 if (thread->policy == POLICY_TIMESHARE) {
864 #endif /* MACH_FIXPRI */
865 myprocessor->quantum = pset->set_quantum;
866 #if MACH_FIXPRI
867 }
868 else {
869 /*
870 * POLICY_FIXEDPRI
871 */
872 myprocessor->quantum = thread->sched_data;
873 }
874 #endif /* MACH_FIXPRI */
875 }
876
877 return thread;
878 }
879
880 /*
881 * Stop running the current thread and start running the new thread.
882 * If continuation is non-zero, and the current thread is blocked,
883 * then it will resume by executing continuation on a new stack.
884 * Returns TRUE if the hand-off succeeds.
885 * Assumes splsched.
886 */
887
888 boolean_t thread_invoke(
889 register thread_t old_thread,
890 continuation_t continuation,
891 register thread_t new_thread)
892 {
893 /*
894 * Check for invoking the same thread.
895 */
896 if (old_thread == new_thread) {
897 /*
898 * Mark thread interruptible.
899 * Run continuation if there is one.
900 */
901 thread_lock(new_thread);
902 new_thread->state &= ~TH_UNINT;
903 thread_unlock(new_thread);
904
905 if (continuation != (void (*)()) 0) {
906 (void) spl0();
907 call_continuation(continuation);
908 /*NOTREACHED*/
909 }
910 return TRUE;
911 }
912
913 /*
914 * Check for stack-handoff.
915 */
916 thread_lock(new_thread);
917 if ((old_thread->stack_privilege != current_stack()) &&
918 (continuation != (void (*)()) 0))
919 {
920 switch (new_thread->state & TH_SWAP_STATE) {
921 case TH_SWAPPED:
922
923 new_thread->state &= ~(TH_SWAPPED | TH_UNINT);
924 thread_unlock(new_thread);
925
926 #if NCPUS > 1
927 new_thread->last_processor = current_processor();
928 #endif /* NCPUS > 1 */
929
930 /*
931 * Set up ast context of new thread and
932 * switch to its timer.
933 */
934 ast_context(new_thread, cpu_number());
935 timer_switch(&new_thread->system_timer);
936
937 stack_handoff(old_thread, new_thread);
938
939 /*
940 * We can dispatch the old thread now.
941 * This is like thread_dispatch, except
942 * that the old thread is left swapped
943 * *without* freeing its stack.
944 * This path is also much more frequent
945 * than actual calls to thread_dispatch.
946 */
947
948 thread_lock(old_thread);
949 old_thread->swap_func = continuation;
950
951 switch (old_thread->state) {
952 case TH_RUN | TH_SUSP:
953 case TH_RUN | TH_SUSP | TH_HALTED:
954 case TH_RUN | TH_WAIT | TH_SUSP:
955 /*
956 * Suspend the thread
957 */
958 old_thread->state = (old_thread->state & ~TH_RUN)
959 | TH_SWAPPED;
960 if (old_thread->wake_active) {
961 old_thread->wake_active = FALSE;
962 thread_unlock(old_thread);
963 thread_wakeup((event_t)&old_thread->wake_active);
964
965 goto after_old_thread;
966 }
967 break;
968
969 case TH_RUN | TH_SUSP | TH_UNINT:
970 case TH_RUN | TH_UNINT:
971 case TH_RUN:
972 /*
973 * We can`t suspend the thread yet,
974 * or it`s still running.
975 * Put back on a run queue.
976 */
977 old_thread->state |= TH_SWAPPED;
978 thread_setrun(old_thread, FALSE);
979 break;
980
981 case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
982 case TH_RUN | TH_WAIT | TH_UNINT:
983 case TH_RUN | TH_WAIT:
984 /*
985 * Waiting, and not suspendable.
986 */
987 old_thread->state = (old_thread->state & ~TH_RUN)
988 | TH_SWAPPED;
989 break;
990
991 case TH_RUN | TH_IDLE:
992 /*
993 * Drop idle thread -- it is already in
994 * idle_thread_array.
995 */
996 old_thread->state = TH_RUN | TH_IDLE | TH_SWAPPED;
997 break;
998
999 default:
1000 panic("thread_invoke");
1001 }
1002 thread_unlock(old_thread);
1003 after_old_thread:
1004
1005 /*
1006 * call_continuation calls the continuation
1007 * after resetting the current stack pointer
1008 * to recover stack space. If we called
1009 * the continuation directly, we would risk
1010 * running out of stack.
1011 */
1012
1013 counter_always(c_thread_invoke_hits++);
1014 (void) spl0();
1015 call_continuation(new_thread->swap_func);
1016 /*NOTREACHED*/
1017 return TRUE; /* help for the compiler */
1018
1019 case TH_SW_COMING_IN:
1020 /*
1021 * Waiting for a stack
1022 */
1023 thread_swapin(new_thread);
1024 thread_unlock(new_thread);
1025 counter_always(c_thread_invoke_misses++);
1026 return FALSE;
1027
1028 case 0:
1029 /*
1030 * Already has a stack - can`t handoff.
1031 */
1032 break;
1033 }
1034 }
1035
1036 else {
1037 /*
1038 * Check that the thread is swapped-in.
1039 */
1040 if (new_thread->state & TH_SWAPPED) {
1041 if ((new_thread->state & TH_SW_COMING_IN) ||
1042 !stack_alloc_try(new_thread, thread_continue))
1043 {
1044 thread_swapin(new_thread);
1045 thread_unlock(new_thread);
1046 counter_always(c_thread_invoke_misses++);
1047 return FALSE;
1048 }
1049 }
1050 }
1051
1052 new_thread->state &= ~(TH_SWAPPED | TH_UNINT);
1053 thread_unlock(new_thread);
1054
1055 /*
1056 * Thread is now interruptible.
1057 */
1058 #if NCPUS > 1
1059 new_thread->last_processor = current_processor();
1060 #endif /* NCPUS > 1 */
1061
1062 /*
1063 * Set up ast context of new thread and switch to its timer.
1064 */
1065 ast_context(new_thread, cpu_number());
1066 timer_switch(&new_thread->system_timer);
1067
1068 /*
1069 * switch_context is machine-dependent. It does the
1070 * machine-dependent components of a context-switch, like
1071 * changing address spaces. It updates active_threads.
1072 * It returns only if a continuation is not supplied.
1073 */
1074 counter_always(c_thread_invoke_csw++);
1075 old_thread = switch_context(old_thread, continuation, new_thread);
1076
1077 /*
1078 * We're back. Now old_thread is the thread that resumed
1079 * us, and we have to dispatch it.
1080 */
1081 thread_dispatch(old_thread);
1082
1083 return TRUE;
1084 }
1085
1086 /*
1087 * thread_continue:
1088 *
1089 * Called when the current thread is given a new stack.
1090 * Called at splsched.
1091 */
1092 void thread_continue(
1093 register thread_t old_thread)
1094 {
1095 register continuation_t continuation = current_thread()->swap_func;
1096
1097 /*
1098 * We must dispatch the old thread and then
1099 * call the current thread's continuation.
1100 * There might not be an old thread, if we are
1101 * the first thread to run on this processor.
1102 */
1103
1104 if (old_thread != THREAD_NULL)
1105 thread_dispatch(old_thread);
1106 (void) spl0();
1107 (*continuation)();
1108 /*NOTREACHED*/
1109 }
1110
1111
1112 /*
1113 * thread_block:
1114 *
1115 * Block the current thread. If the thread is runnable
1116 * then someone must have woken it up between its request
1117 * to sleep and now. In this case, it goes back on a
1118 * run queue.
1119 *
1120 * If a continuation is specified, then thread_block will
1121 * attempt to discard the thread's kernel stack. When the
1122 * thread resumes, it will execute the continuation function
1123 * on a new kernel stack.
1124 */
1125
1126 void thread_block(
1127 continuation_t continuation)
1128 {
1129 register thread_t thread = current_thread();
1130 register processor_t myprocessor = cpu_to_processor(cpu_number());
1131 register thread_t new_thread;
1132 spl_t s;
1133
1134 check_simple_locks();
1135
1136 s = splsched();
1137
1138 #if FAST_TAS
1139 {
1140 extern void recover_ras();
1141
1142 if (csw_needed(thread, myprocessor))
1143 recover_ras(thread);
1144 }
1145 #endif /* FAST_TAS */
1146
1147 ast_off(cpu_number(), AST_BLOCK);
1148
1149 do
1150 new_thread = thread_select(myprocessor);
1151 while (!thread_invoke(thread, continuation, new_thread));
1152
1153 splx(s);
1154 }
1155
1156 /*
1157 * thread_run:
1158 *
1159 * Switch directly from the current thread to a specified
1160 * thread. Both the current and new threads must be
1161 * runnable.
1162 *
1163 * If a continuation is specified, then thread_block will
1164 * attempt to discard the current thread's kernel stack. When the
1165 * thread resumes, it will execute the continuation function
1166 * on a new kernel stack.
1167 */
1168 void thread_run(
1169 continuation_t continuation,
1170 register thread_t new_thread)
1171 {
1172 register thread_t thread = current_thread();
1173 register processor_t myprocessor = cpu_to_processor(cpu_number());
1174 spl_t s;
1175
1176 check_simple_locks();
1177
1178 s = splsched();
1179
1180 while (!thread_invoke(thread, continuation, new_thread))
1181 new_thread = thread_select(myprocessor);
1182
1183 splx(s);
1184 }
1185
1186 /*
1187 * Dispatches a running thread that is not on a runq.
1188 * Called at splsched.
1189 */
1190
1191 void thread_dispatch(
1192 register thread_t thread)
1193 {
1194 /*
1195 * If we are discarding the thread's stack, we must do it
1196 * before the thread has a chance to run.
1197 */
1198
1199 thread_lock(thread);
1200
1201 if (thread->swap_func != (void (*)()) 0) {
1202 assert((thread->state & TH_SWAP_STATE) == 0);
1203 thread->state |= TH_SWAPPED;
1204 stack_free(thread);
1205 }
1206
1207 switch (thread->state &~ TH_SWAP_STATE) {
1208 case TH_RUN | TH_SUSP:
1209 case TH_RUN | TH_SUSP | TH_HALTED:
1210 case TH_RUN | TH_WAIT | TH_SUSP:
1211 /*
1212 * Suspend the thread
1213 */
1214 thread->state &= ~TH_RUN;
1215 if (thread->wake_active) {
1216 thread->wake_active = FALSE;
1217 thread_unlock(thread);
1218 thread_wakeup((event_t)&thread->wake_active);
1219 return;
1220 }
1221 break;
1222
1223 case TH_RUN | TH_SUSP | TH_UNINT:
1224 case TH_RUN | TH_UNINT:
1225 case TH_RUN:
1226 /*
1227 * No reason to stop. Put back on a run queue.
1228 */
1229 thread_setrun(thread, FALSE);
1230 break;
1231
1232 case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
1233 case TH_RUN | TH_WAIT | TH_UNINT:
1234 case TH_RUN | TH_WAIT:
1235 /*
1236 * Waiting, and not suspended.
1237 */
1238 thread->state &= ~TH_RUN;
1239 break;
1240
1241 case TH_RUN | TH_IDLE:
1242 /*
1243 * Drop idle thread -- it is already in
1244 * idle_thread_array.
1245 */
1246 break;
1247
1248 default:
1249 panic("thread_dispatch");
1250 }
1251 thread_unlock(thread);
1252 }
1253
1254
1255 /*
1256 * Define shifts for simulating (5/8)**n
1257 */
1258
1259 shift_data_t wait_shift[32] = {
1260 {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
1261 {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
1262 {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
1263 {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
1264
1265 /*
1266 * do_priority_computation:
1267 *
1268 * Calculate new priority for thread based on its base priority plus
1269 * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from
1270 * usage to priorities. SCHED_SHIFT converts for the scaling
1271 * of the sched_usage field by SCHED_SCALE. This scaling comes
1272 * from the multiplication by sched_load (thread_timer_delta)
1273 * in sched.h. sched_load is calculated as a scaled overload
1274 * factor in compute_mach_factor (mach_factor.c).
1275 */
1276
1277 #ifdef PRI_SHIFT_2
1278 #if PRI_SHIFT_2 > 0
1279 #define do_priority_computation(th, pri) \
1280 MACRO_BEGIN \
1281 (pri) = (th)->priority /* start with base priority */ \
1282 + ((th)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
1283 + ((th)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \
1284 if ((pri) > 31) (pri) = 31; \
1285 MACRO_END
1286 #else /* PRI_SHIFT_2 */
1287 #define do_priority_computation(th, pri) \
1288 MACRO_BEGIN \
1289 (pri) = (th)->priority /* start with base priority */ \
1290 + ((th)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
1291 - ((th)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \
1292 if ((pri) > 31) (pri) = 31; \
1293 MACRO_END
1294 #endif /* PRI_SHIFT_2 */
1295 #else /* defined(PRI_SHIFT_2) */
1296 #define do_priority_computation(th, pri) \
1297 MACRO_BEGIN \
1298 (pri) = (th)->priority /* start with base priority */ \
1299 + ((th)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \
1300 if ((pri) > 31) (pri) = 31; \
1301 MACRO_END
1302 #endif /* defined(PRI_SHIFT_2) */
1303
1304 /*
1305 * compute_priority:
1306 *
1307 * Compute the effective priority of the specified thread.
1308 * The effective priority computation is as follows:
1309 *
1310 * Take the base priority for this thread and add
1311 * to it an increment derived from its cpu_usage.
1312 *
1313 * The thread *must* be locked by the caller.
1314 */
1315
1316 void compute_priority(
1317 register thread_t thread,
1318 boolean_t resched)
1319 {
1320 register int pri;
1321
1322 #if MACH_FIXPRI
1323 if (thread->policy == POLICY_TIMESHARE) {
1324 #endif /* MACH_FIXPRI */
1325 do_priority_computation(thread, pri);
1326 if (thread->depress_priority < 0)
1327 set_pri(thread, pri, resched);
1328 else
1329 thread->depress_priority = pri;
1330 #if MACH_FIXPRI
1331 }
1332 else {
1333 set_pri(thread, thread->priority, resched);
1334 }
1335 #endif /* MACH_FIXPRI */
1336 }
1337
1338 /*
1339 * compute_my_priority:
1340 *
1341 * Version of compute priority for current thread or thread
1342 * being manipulated by scheduler (going on or off a runq).
1343 * Only used for priority updates. Policy or priority changes
1344 * must call compute_priority above. Caller must have thread
1345 * locked and know it is timesharing and not depressed.
1346 */
1347
1348 void compute_my_priority(
1349 register thread_t thread)
1350 {
1351 register int temp_pri;
1352
1353 do_priority_computation(thread,temp_pri);
1354 thread->sched_pri = temp_pri;
1355 }
1356
1357 /*
1358 * recompute_priorities:
1359 *
1360 * Update the priorities of all threads periodically.
1361 */
1362 void recompute_priorities(void)
1363 {
1364 #if SIMPLE_CLOCK
1365 int new_usec;
1366 #endif /* SIMPLE_CLOCK */
1367
1368 sched_tick++; /* age usage one more time */
1369 set_timeout(&recompute_priorities_timer, hz);
1370 #if SIMPLE_CLOCK
1371 /*
1372 * Compensate for clock drift. sched_usec is an
1373 * exponential average of the number of microseconds in
1374 * a second. It decays in the same fashion as cpu_usage.
1375 */
1376 new_usec = sched_usec_elapsed();
1377 sched_usec = (5*sched_usec + 3*new_usec)/8;
1378 #endif /* SIMPLE_CLOCK */
1379 /*
1380 * Wakeup scheduler thread.
1381 */
1382 if (sched_thread_id != THREAD_NULL) {
1383 clear_wait(sched_thread_id, THREAD_AWAKENED, FALSE);
1384 }
1385 }
1386
1387 /*
1388 * update_priority
1389 *
1390 * Cause the priority computation of a thread that has been
1391 * sleeping or suspended to "catch up" with the system. Thread
1392 * *MUST* be locked by caller. If thread is running, then this
1393 * can only be called by the thread on itself.
1394 */
1395 void update_priority(
1396 register thread_t thread)
1397 {
1398 register unsigned int ticks;
1399 register shift_t shiftp;
1400 register int temp_pri;
1401
1402 ticks = sched_tick - thread->sched_stamp;
1403
1404 assert(ticks != 0);
1405
1406 /*
1407 * If asleep for more than 30 seconds forget all
1408 * cpu_usage, else catch up on missed aging.
1409 * 5/8 ** n is approximated by the two shifts
1410 * in the wait_shift array.
1411 */
1412 thread->sched_stamp += ticks;
1413 thread_timer_delta(thread);
1414 if (ticks > 30) {
1415 thread->cpu_usage = 0;
1416 thread->sched_usage = 0;
1417 }
1418 else {
1419 thread->cpu_usage += thread->cpu_delta;
1420 thread->sched_usage += thread->sched_delta;
1421 shiftp = &wait_shift[ticks];
1422 if (shiftp->shift2 > 0) {
1423 thread->cpu_usage =
1424 (thread->cpu_usage >> shiftp->shift1) +
1425 (thread->cpu_usage >> shiftp->shift2);
1426 thread->sched_usage =
1427 (thread->sched_usage >> shiftp->shift1) +
1428 (thread->sched_usage >> shiftp->shift2);
1429 }
1430 else {
1431 thread->cpu_usage =
1432 (thread->cpu_usage >> shiftp->shift1) -
1433 (thread->cpu_usage >> -(shiftp->shift2));
1434 thread->sched_usage =
1435 (thread->sched_usage >> shiftp->shift1) -
1436 (thread->sched_usage >> -(shiftp->shift2));
1437 }
1438 }
1439 thread->cpu_delta = 0;
1440 thread->sched_delta = 0;
1441 /*
1442 * Recompute priority if appropriate.
1443 */
1444 if (
1445 #if MACH_FIXPRI
1446 (thread->policy == POLICY_TIMESHARE) &&
1447 #endif /* MACH_FIXPRI */
1448 (thread->depress_priority < 0)) {
1449 do_priority_computation(thread, temp_pri);
1450 thread->sched_pri = temp_pri;
1451 }
1452 }
1453
1454 /*
1455 * run_queue_enqueue macro for thread_setrun().
1456 */
1457 #if DEBUG
1458 #define run_queue_enqueue(rq, th) \
1459 MACRO_BEGIN \
1460 register unsigned int whichq; \
1461 \
1462 whichq = (th)->sched_pri; \
1463 if (whichq >= NRQS) { \
1464 printf("thread_setrun: pri too high (%d)\n", (th)->sched_pri); \
1465 whichq = NRQS - 1; \
1466 } \
1467 \
1468 simple_lock(&(rq)->lock); /* lock the run queue */ \
1469 checkrq((rq), "thread_setrun: before adding thread"); \
1470 enqueue_tail(&(rq)->runq[whichq], (queue_entry_t) (th)); \
1471 \
1472 if (whichq < (rq)->low || (rq)->count == 0) \
1473 (rq)->low = whichq; /* minimize */ \
1474 \
1475 (rq)->count++; \
1476 (th)->runq = (rq); \
1477 thread_check((th), (rq)); \
1478 checkrq((rq), "thread_setrun: after adding thread"); \
1479 simple_unlock(&(rq)->lock); \
1480 MACRO_END
1481 #else /* DEBUG */
1482 #define run_queue_enqueue(rq, th) \
1483 MACRO_BEGIN \
1484 register unsigned int whichq; \
1485 \
1486 whichq = (th)->sched_pri; \
1487 if (whichq >= NRQS) { \
1488 printf("thread_setrun: pri too high (%d)\n", (th)->sched_pri); \
1489 whichq = NRQS - 1; \
1490 } \
1491 \
1492 simple_lock(&(rq)->lock); /* lock the run queue */ \
1493 enqueue_tail(&(rq)->runq[whichq], (queue_entry_t) (th)); \
1494 \
1495 if (whichq < (rq)->low || (rq)->count == 0) \
1496 (rq)->low = whichq; /* minimize */ \
1497 \
1498 (rq)->count++; \
1499 (th)->runq = (rq); \
1500 simple_unlock(&(rq)->lock); \
1501 MACRO_END
1502 #endif /* DEBUG */
1503 /*
1504 * thread_setrun:
1505 *
1506 * Make thread runnable; dispatch directly onto an idle processor
1507 * if possible. Else put on appropriate run queue (processor
1508 * if bound, else processor set. Caller must have lock on thread.
1509 * This is always called at splsched.
1510 */
1511
1512 void thread_setrun(
1513 register thread_t th,
1514 boolean_t may_preempt)
1515 {
1516 register processor_t processor;
1517 register run_queue_t rq;
1518 #if NCPUS > 1
1519 register processor_set_t pset;
1520 #endif /* NCPUS > 1 */
1521
1522 /*
1523 * Update priority if needed.
1524 */
1525 if (th->sched_stamp != sched_tick) {
1526 update_priority(th);
1527 }
1528
1529 assert(th->runq == RUN_QUEUE_NULL);
1530
1531 #if NCPUS > 1
1532 /*
1533 * Try to dispatch the thread directly onto an idle processor.
1534 */
1535 if ((processor = th->bound_processor) == PROCESSOR_NULL) {
1536 /*
1537 * Not bound, any processor in the processor set is ok.
1538 */
1539 pset = th->processor_set;
1540 #if HW_FOOTPRINT
1541 /*
1542 * But first check the last processor it ran on.
1543 */
1544 processor = th->last_processor;
1545 if (processor->state == PROCESSOR_IDLE) {
1546 simple_lock(&processor->lock);
1547 simple_lock(&pset->idle_lock);
1548 if ((processor->state == PROCESSOR_IDLE)
1549 #if MACH_HOST
1550 && (processor->processor_set == pset)
1551 #endif /* MACH_HOST */
1552 ) {
1553 queue_remove(&pset->idle_queue, processor,
1554 processor_t, processor_queue);
1555 pset->idle_count--;
1556 processor->next_thread = th;
1557 processor->state = PROCESSOR_DISPATCHING;
1558 simple_unlock(&pset->idle_lock);
1559 simple_unlock(&processor->lock);
1560 return;
1561 }
1562 simple_unlock(&pset->idle_lock);
1563 simple_unlock(&processor->lock);
1564 }
1565 #endif /* HW_FOOTPRINT */
1566
1567 if (pset->idle_count > 0) {
1568 simple_lock(&pset->idle_lock);
1569 if (pset->idle_count > 0) {
1570 processor = (processor_t) queue_first(&pset->idle_queue);
1571 queue_remove(&(pset->idle_queue), processor, processor_t,
1572 processor_queue);
1573 pset->idle_count--;
1574 processor->next_thread = th;
1575 processor->state = PROCESSOR_DISPATCHING;
1576 simple_unlock(&pset->idle_lock);
1577 return;
1578 }
1579 simple_unlock(&pset->idle_lock);
1580 }
1581 rq = &(pset->runq);
1582 run_queue_enqueue(rq,th);
1583 /*
1584 * Preempt check
1585 */
1586 if (may_preempt &&
1587 #if MACH_HOST
1588 (pset == current_processor()->processor_set) &&
1589 #endif /* MACH_HOST */
1590 (current_thread()->sched_pri > th->sched_pri)) {
1591 /*
1592 * Turn off first_quantum to allow csw.
1593 */
1594 current_processor()->first_quantum = FALSE;
1595 ast_on(cpu_number(), AST_BLOCK);
1596 }
1597 }
1598 else {
1599 /*
1600 * Bound, can only run on bound processor. Have to lock
1601 * processor here because it may not be the current one.
1602 */
1603 if (processor->state == PROCESSOR_IDLE) {
1604 simple_lock(&processor->lock);
1605 pset = processor->processor_set;
1606 simple_lock(&pset->idle_lock);
1607 if (processor->state == PROCESSOR_IDLE) {
1608 queue_remove(&pset->idle_queue, processor,
1609 processor_t, processor_queue);
1610 pset->idle_count--;
1611 processor->next_thread = th;
1612 processor->state = PROCESSOR_DISPATCHING;
1613 simple_unlock(&pset->idle_lock);
1614 simple_unlock(&processor->lock);
1615 return;
1616 }
1617 simple_unlock(&pset->idle_lock);
1618 simple_unlock(&processor->lock);
1619 }
1620 rq = &(processor->runq);
1621 run_queue_enqueue(rq,th);
1622
1623 /*
1624 * Cause ast on processor if processor is on line.
1625 *
1626 * XXX Don't do this remotely to master because this will
1627 * XXX send an interprocessor interrupt, and that's too
1628 * XXX expensive for all the unparallelized U*x code.
1629 */
1630 if (processor == current_processor()) {
1631 ast_on(cpu_number(), AST_BLOCK);
1632 }
1633 else if ((processor != master_processor) &&
1634 (processor->state != PROCESSOR_OFF_LINE)) {
1635 cause_ast_check(processor);
1636 }
1637 }
1638 #else /* NCPUS > 1 */
1639 /*
1640 * XXX should replace queue with a boolean in this case.
1641 */
1642 if (default_pset.idle_count > 0) {
1643 processor = (processor_t) queue_first(&default_pset.idle_queue);
1644 queue_remove(&default_pset.idle_queue, processor,
1645 processor_t, processor_queue);
1646 default_pset.idle_count--;
1647 processor->next_thread = th;
1648 processor->state = PROCESSOR_DISPATCHING;
1649 return;
1650 }
1651 if (th->bound_processor == PROCESSOR_NULL) {
1652 rq = &(default_pset.runq);
1653 }
1654 else {
1655 rq = &(master_processor->runq);
1656 ast_on(cpu_number(), AST_BLOCK);
1657 }
1658 run_queue_enqueue(rq,th);
1659
1660 /*
1661 * Preempt check
1662 */
1663 if (may_preempt && (current_thread()->sched_pri > th->sched_pri)) {
1664 /*
1665 * Turn off first_quantum to allow context switch.
1666 */
1667 current_processor()->first_quantum = FALSE;
1668 ast_on(cpu_number(), AST_BLOCK);
1669 }
1670 #endif /* NCPUS > 1 */
1671 }
1672
1673 /*
1674 * set_pri:
1675 *
1676 * Set the priority of the specified thread to the specified
1677 * priority. This may cause the thread to change queues.
1678 *
1679 * The thread *must* be locked by the caller.
1680 */
1681
1682 void set_pri(
1683 thread_t th,
1684 int pri,
1685 boolean_t resched)
1686 {
1687 register struct run_queue *rq;
1688
1689 rq = rem_runq(th);
1690 th->sched_pri = pri;
1691 if (rq != RUN_QUEUE_NULL) {
1692 if (resched)
1693 thread_setrun(th, TRUE);
1694 else
1695 run_queue_enqueue(rq, th);
1696 }
1697 }
1698
1699 /*
1700 * rem_runq:
1701 *
1702 * Remove a thread from its run queue.
1703 * The run queue that the process was on is returned
1704 * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked
1705 * before calling this routine. Unusual locking protocol on runq
1706 * field in thread structure makes this code interesting; see thread.h.
1707 */
1708
1709 struct run_queue *rem_runq(
1710 thread_t th)
1711 {
1712 register struct run_queue *rq;
1713
1714 rq = th->runq;
1715 /*
1716 * If rq is RUN_QUEUE_NULL, the thread will stay out of the
1717 * run_queues because the caller locked the thread. Otherwise
1718 * the thread is on a runq, but could leave.
1719 */
1720 if (rq != RUN_QUEUE_NULL) {
1721 simple_lock(&rq->lock);
1722 #if DEBUG
1723 checkrq(rq, "rem_runq: at entry");
1724 #endif /* DEBUG */
1725 if (rq == th->runq) {
1726 /*
1727 * Thread is in a runq and we have a lock on
1728 * that runq.
1729 */
1730 #if DEBUG
1731 checkrq(rq, "rem_runq: before removing thread");
1732 thread_check(th, rq);
1733 #endif /* DEBUG */
1734 remqueue(&rq->runq[0], (queue_entry_t) th);
1735 rq->count--;
1736 #if DEBUG
1737 checkrq(rq, "rem_runq: after removing thread");
1738 #endif /* DEBUG */
1739 th->runq = RUN_QUEUE_NULL;
1740 simple_unlock(&rq->lock);
1741 }
1742 else {
1743 /*
1744 * The thread left the runq before we could
1745 * lock the runq. It is not on a runq now, and
1746 * can't move again because this routine's
1747 * caller locked the thread.
1748 */
1749 simple_unlock(&rq->lock);
1750 rq = RUN_QUEUE_NULL;
1751 }
1752 }
1753
1754 return rq;
1755 }
1756
1757
1758 /*
1759 * choose_thread:
1760 *
1761 * Choose a thread to execute. The thread chosen is removed
1762 * from its run queue. Note that this requires only that the runq
1763 * lock be held.
1764 *
1765 * Strategy:
1766 * Check processor runq first; if anything found, run it.
1767 * Else check pset runq; if nothing found, return idle thread.
1768 *
1769 * Second line of strategy is implemented by choose_pset_thread.
1770 * This is only called on processor startup and when thread_block
1771 * thinks there's something in the processor runq.
1772 */
1773
1774 thread_t choose_thread(
1775 processor_t myprocessor)
1776 {
1777 thread_t th;
1778 register queue_t q;
1779 register run_queue_t runq;
1780 register int i;
1781 register processor_set_t pset;
1782
1783 runq = &myprocessor->runq;
1784
1785 simple_lock(&runq->lock);
1786 if (runq->count > 0) {
1787 q = runq->runq + runq->low;
1788 for (i = runq->low; i < NRQS ; i++, q++) {
1789 if (!queue_empty(q)) {
1790 th = (thread_t) dequeue_head(q);
1791 th->runq = RUN_QUEUE_NULL;
1792 runq->count--;
1793 runq->low = i;
1794 simple_unlock(&runq->lock);
1795 return th;
1796 }
1797 }
1798 panic("choose_thread");
1799 /*NOTREACHED*/
1800 }
1801 simple_unlock(&runq->lock);
1802
1803 pset = myprocessor->processor_set;
1804
1805 simple_lock(&pset->runq.lock);
1806 return choose_pset_thread(myprocessor,pset);
1807 }
1808
1809 /*
1810 * choose_pset_thread: choose a thread from processor_set runq or
1811 * set processor idle and choose its idle thread.
1812 *
1813 * Caller must be at splsched and have a lock on the runq. This
1814 * lock is released by this routine. myprocessor is always the current
1815 * processor, and pset must be its processor set.
1816 * This routine chooses and removes a thread from the runq if there
1817 * is one (and returns it), else it sets the processor idle and
1818 * returns its idle thread.
1819 */
1820
1821 thread_t choose_pset_thread(
1822 register processor_t myprocessor,
1823 processor_set_t pset)
1824 {
1825 register run_queue_t runq;
1826 register thread_t th;
1827 register queue_t q;
1828 register int i;
1829
1830 runq = &pset->runq;
1831
1832 if (runq->count > 0) {
1833 q = runq->runq + runq->low;
1834 for (i = runq->low; i < NRQS ; i++, q++) {
1835 if (!queue_empty(q)) {
1836 th = (thread_t) dequeue_head(q);
1837 th->runq = RUN_QUEUE_NULL;
1838 runq->count--;
1839 /*
1840 * For POLICY_FIXEDPRI, runq->low must be
1841 * accurate!
1842 */
1843 #if MACH_FIXPRI
1844 if ((runq->count > 0) &&
1845 (pset->policies & POLICY_FIXEDPRI)) {
1846 while (queue_empty(q)) {
1847 q++;
1848 i++;
1849 }
1850 }
1851 #endif /* MACH_FIXPRI */
1852 runq->low = i;
1853 #if DEBUG
1854 checkrq(runq, "choose_pset_thread");
1855 #endif /* DEBUG */
1856 simple_unlock(&runq->lock);
1857 return th;
1858 }
1859 }
1860 panic("choose_pset_thread");
1861 /*NOTREACHED*/
1862 }
1863 simple_unlock(&runq->lock);
1864
1865 /*
1866 * Nothing is runnable, so set this processor idle if it
1867 * was running. If it was in an assignment or shutdown,
1868 * leave it alone. Return its idle thread.
1869 */
1870 simple_lock(&pset->idle_lock);
1871 if (myprocessor->state == PROCESSOR_RUNNING) {
1872 myprocessor->state = PROCESSOR_IDLE;
1873 /*
1874 * XXX Until it goes away, put master on end of queue, others
1875 * XXX on front so master gets used last.
1876 */
1877 if (myprocessor == master_processor) {
1878 queue_enter(&(pset->idle_queue), myprocessor,
1879 processor_t, processor_queue);
1880 }
1881 else {
1882 queue_enter_first(&(pset->idle_queue), myprocessor,
1883 processor_t, processor_queue);
1884 }
1885
1886 pset->idle_count++;
1887 }
1888 simple_unlock(&pset->idle_lock);
1889
1890 return myprocessor->idle_thread;
1891 }
1892
1893 /*
1894 * no_dispatch_count counts number of times processors go non-idle
1895 * without being dispatched. This should be very rare.
1896 */
1897 int no_dispatch_count = 0;
1898
1899 /*
1900 * This is the idle thread, which just looks for other threads
1901 * to execute.
1902 */
1903
1904 void idle_thread_continue(void)
1905 {
1906 register processor_t myprocessor;
1907 register volatile thread_t *threadp;
1908 register volatile int *gcount;
1909 register volatile int *lcount;
1910 register thread_t new_thread;
1911 register int state;
1912 int mycpu;
1913 spl_t s;
1914
1915 mycpu = cpu_number();
1916 myprocessor = current_processor();
1917 threadp = (volatile thread_t *) &myprocessor->next_thread;
1918 lcount = (volatile int *) &myprocessor->runq.count;
1919
1920 while (TRUE) {
1921 #ifdef MARK_CPU_IDLE
1922 MARK_CPU_IDLE(mycpu);
1923 #endif /* MARK_CPU_IDLE */
1924
1925 #if MACH_HOST
1926 gcount = (volatile int *)
1927 &myprocessor->processor_set->runq.count;
1928 #else /* MACH_HOST */
1929 gcount = (volatile int *) &default_pset.runq.count;
1930 #endif /* MACH_HOST */
1931
1932 /*
1933 * This cpu will be dispatched (by thread_setrun) by setting next_thread
1934 * to the value of the thread to run next. Also check runq counts.
1935 */
1936 while ((*threadp == (volatile thread_t)THREAD_NULL) &&
1937 (*gcount == 0) && (*lcount == 0)) {
1938
1939 /* check for ASTs while we wait */
1940
1941 if (need_ast[mycpu] &~ AST_SCHEDULING) {
1942 (void) splsched();
1943 /* don't allow scheduling ASTs */
1944 need_ast[mycpu] &= ~AST_SCHEDULING;
1945 ast_taken();
1946 /* back at spl0 */
1947 }
1948
1949 /*
1950 * machine_idle is a machine dependent function,
1951 * to conserve power.
1952 */
1953 #if POWER_SAVE
1954 machine_idle(mycpu);
1955 #endif /* POWER_SAVE */
1956 }
1957
1958 #ifdef MARK_CPU_ACTIVE
1959 MARK_CPU_ACTIVE(mycpu);
1960 #endif /* MARK_CPU_ACTIVE */
1961
1962 s = splsched();
1963
1964 /*
1965 * This is not a switch statement to avoid the
1966 * bounds checking code in the common case.
1967 */
1968 retry:
1969 state = myprocessor->state;
1970 if (state == PROCESSOR_DISPATCHING) {
1971 /*
1972 * Commmon case -- cpu dispatched.
1973 */
1974 new_thread = (thread_t) *threadp;
1975 *threadp = (volatile thread_t) THREAD_NULL;
1976 myprocessor->state = PROCESSOR_RUNNING;
1977 /*
1978 * set up quantum for new thread.
1979 */
1980 #if MACH_FIXPRI
1981 if (new_thread->policy == POLICY_TIMESHARE) {
1982 #endif /* MACH_FIXPRI */
1983 /*
1984 * Just use set quantum. No point in
1985 * checking for shorter local runq quantum;
1986 * csw_needed will handle correctly.
1987 */
1988 #if MACH_HOST
1989 myprocessor->quantum = new_thread->
1990 processor_set->set_quantum;
1991 #else /* MACH_HOST */
1992 myprocessor->quantum =
1993 default_pset.set_quantum;
1994 #endif /* MACH_HOST */
1995
1996 #if MACH_FIXPRI
1997 }
1998 else {
1999 /*
2000 * POLICY_FIXEDPRI
2001 */
2002 myprocessor->quantum = new_thread->sched_data;
2003 }
2004 #endif /* MACH_FIXPRI */
2005 myprocessor->first_quantum = TRUE;
2006 counter(c_idle_thread_handoff++);
2007 thread_run(idle_thread_continue, new_thread);
2008 }
2009 else if (state == PROCESSOR_IDLE) {
2010 register processor_set_t pset;
2011
2012 pset = myprocessor->processor_set;
2013 simple_lock(&pset->idle_lock);
2014 if (myprocessor->state != PROCESSOR_IDLE) {
2015 /*
2016 * Something happened, try again.
2017 */
2018 simple_unlock(&pset->idle_lock);
2019 goto retry;
2020 }
2021 /*
2022 * Processor was not dispatched (Rare).
2023 * Set it running again.
2024 */
2025 no_dispatch_count++;
2026 pset->idle_count--;
2027 queue_remove(&pset->idle_queue, myprocessor,
2028 processor_t, processor_queue);
2029 myprocessor->state = PROCESSOR_RUNNING;
2030 simple_unlock(&pset->idle_lock);
2031 counter(c_idle_thread_block++);
2032 thread_block(idle_thread_continue);
2033 }
2034 else if ((state == PROCESSOR_ASSIGN) ||
2035 (state == PROCESSOR_SHUTDOWN)) {
2036 /*
2037 * Changing processor sets, or going off-line.
2038 * Release next_thread if there is one. Actual
2039 * thread to run is on a runq.
2040 */
2041 if ((new_thread = (thread_t)*threadp)!= THREAD_NULL) {
2042 *threadp = (volatile thread_t) THREAD_NULL;
2043 thread_setrun(new_thread, FALSE);
2044 }
2045
2046 counter(c_idle_thread_block++);
2047 thread_block(idle_thread_continue);
2048 }
2049 else {
2050 printf(" Bad processor state %d (Cpu %d)\n",
2051 cpu_state(mycpu), mycpu);
2052 panic("idle_thread");
2053 }
2054
2055 (void) splx(s);
2056 }
2057 }
2058
2059 void idle_thread(void)
2060 {
2061 register thread_t self = current_thread();
2062 spl_t s;
2063
2064 stack_privilege(self);
2065
2066 s = splsched();
2067 self->priority = 31;
2068 self->sched_pri = 31;
2069
2070 /*
2071 * Set the idle flag to indicate that this is an idle thread,
2072 * enter ourselves in the idle array, and thread_block() to get
2073 * out of the run queues (and set the processor idle when we
2074 * run next time).
2075 */
2076 thread_lock(self);
2077 self->state |= TH_IDLE;
2078 thread_unlock(self);
2079 current_processor()->idle_thread = self;
2080 (void) splx(s);
2081
2082 counter(c_idle_thread_block++);
2083 thread_block(idle_thread_continue);
2084 idle_thread_continue();
2085 /*NOTREACHED*/
2086 }
2087
2088 /*
2089 * sched_thread: scheduler thread.
2090 *
2091 * This thread handles periodic calculations in the scheduler that
2092 * we don't want to do at interrupt level. This allows us to
2093 * avoid blocking.
2094 */
2095 void sched_thread_continue(void)
2096 {
2097 while (TRUE) {
2098 (void) compute_mach_factor();
2099
2100 /*
2101 * Check for stuck threads. This can't be done off of
2102 * the callout queue because it requires operations that
2103 * can't be used from interrupt level.
2104 */
2105 if (sched_tick & 1)
2106 do_thread_scan();
2107
2108 assert_wait((event_t) 0, FALSE);
2109 counter(c_sched_thread_block++);
2110 thread_block(sched_thread_continue);
2111 }
2112 }
2113
2114 void sched_thread(void)
2115 {
2116 sched_thread_id = current_thread();
2117
2118 /*
2119 * Sleep on event 0, recompute_priorities() will awaken
2120 * us by calling clear_wait().
2121 */
2122 assert_wait((event_t) 0, FALSE);
2123 counter(c_sched_thread_block++);
2124 thread_block(sched_thread_continue);
2125 sched_thread_continue();
2126 /*NOTREACHED*/
2127 }
2128
2129 #define MAX_STUCK_THREADS 16
2130
2131 /*
2132 * do_thread_scan: scan for stuck threads. A thread is stuck if
2133 * it is runnable but its priority is so low that it has not
2134 * run for several seconds. Its priority should be higher, but
2135 * won't be until it runs and calls update_priority. The scanner
2136 * finds these threads and does the updates.
2137 *
2138 * Scanner runs in two passes. Pass one squirrels likely
2139 * thread ids away in an array, and removes them from the run queue.
2140 * Pass two does the priority updates. This is necessary because
2141 * the run queue lock is required for the candidate scan, but
2142 * cannot be held during updates [set_pri will deadlock].
2143 *
2144 * Array length should be enough so that restart isn't necessary,
2145 * but restart logic is included. Does not scan processor runqs.
2146 *
2147 */
2148
2149 boolean_t do_thread_scan_debug = FALSE;
2150
2151 thread_t stuck_threads[MAX_STUCK_THREADS];
2152 int stuck_count = 0;
2153
2154 /*
2155 * do_runq_scan is the guts of pass 1. It scans a runq for
2156 * stuck threads. A boolean is returned indicating whether
2157 * it ran out of space.
2158 */
2159
2160 boolean_t
2161 do_runq_scan(
2162 run_queue_t runq)
2163 {
2164 register spl_t s;
2165 register queue_t q;
2166 register thread_t thread;
2167 register int count;
2168
2169 s = splsched();
2170 simple_lock(&runq->lock);
2171 if((count = runq->count) > 0) {
2172 q = runq->runq + runq->low;
2173 while (count > 0) {
2174 thread = (thread_t) queue_first(q);
2175 while (!queue_end(q, (queue_entry_t) thread)) {
2176 /*
2177 * Get the next thread now, since we may
2178 * remove this thread from the run queue.
2179 */
2180 thread_t next = (thread_t) queue_next(&thread->links);
2181
2182 if ((thread->state & TH_SCHED_STATE) == TH_RUN &&
2183 sched_tick - thread->sched_stamp > 1) {
2184 /*
2185 * Stuck, save its id for later.
2186 */
2187 if (stuck_count == MAX_STUCK_THREADS) {
2188 /*
2189 * !@#$% No more room.
2190 */
2191 simple_unlock(&runq->lock);
2192 splx(s);
2193 return TRUE;
2194 }
2195 /*
2196 * We can`t take the thread_lock here,
2197 * since we already have the runq lock.
2198 * So we can`t grab a reference to the
2199 * thread. However, a thread that is
2200 * in RUN state cannot be deallocated
2201 * until it stops running. If it isn`t
2202 * on the runq, then thread_halt cannot
2203 * see it. So we remove the thread
2204 * from the runq to make it safe.
2205 */
2206 remqueue(q, (queue_entry_t) thread);
2207 runq->count--;
2208 thread->runq = RUN_QUEUE_NULL;
2209
2210 stuck_threads[stuck_count++] = thread;
2211 if (do_thread_scan_debug)
2212 printf("do_runq_scan: adding thread %#x\n", thread);
2213 }
2214 count--;
2215 thread = next;
2216 }
2217 q++;
2218 }
2219 }
2220 simple_unlock(&runq->lock);
2221 splx(s);
2222
2223 return FALSE;
2224 }
2225
2226 void do_thread_scan(void)
2227 {
2228 register spl_t s;
2229 register boolean_t restart_needed = 0;
2230 register thread_t thread;
2231 #if MACH_HOST
2232 register processor_set_t pset;
2233 #endif /* MACH_HOST */
2234
2235 do {
2236 #if MACH_HOST
2237 simple_lock(&all_psets_lock);
2238 queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
2239 if (restart_needed = do_runq_scan(&pset->runq))
2240 break;
2241 }
2242 simple_unlock(&all_psets_lock);
2243 #else /* MACH_HOST */
2244 restart_needed = do_runq_scan(&default_pset.runq);
2245 #endif /* MACH_HOST */
2246 if (!restart_needed)
2247 restart_needed = do_runq_scan(&master_processor->runq);
2248
2249 /*
2250 * Ok, we now have a collection of candidates -- fix them.
2251 */
2252
2253 while (stuck_count > 0) {
2254 thread = stuck_threads[--stuck_count];
2255 stuck_threads[stuck_count] = THREAD_NULL;
2256 s = splsched();
2257 thread_lock(thread);
2258 if ((thread->state & TH_SCHED_STATE) == TH_RUN) {
2259 /*
2260 * Do the priority update. Call
2261 * thread_setrun because thread is
2262 * off the run queues.
2263 */
2264 update_priority(thread);
2265 thread_setrun(thread, TRUE);
2266 }
2267 thread_unlock(thread);
2268 splx(s);
2269 }
2270 } while (restart_needed);
2271 }
2272
2273 #if DEBUG
2274 void checkrq(
2275 run_queue_t rq,
2276 char *msg)
2277 {
2278 register queue_t q1;
2279 register int i, j;
2280 register queue_entry_t e;
2281 register int low;
2282
2283 low = -1;
2284 j = 0;
2285 q1 = rq->runq;
2286 for (i = 0; i < NRQS; i++) {
2287 if (q1->next == q1) {
2288 if (q1->prev != q1)
2289 panic("checkrq: empty at %s", msg);
2290 }
2291 else {
2292 if (low == -1)
2293 low = i;
2294
2295 for (e = q1->next; e != q1; e = e->next) {
2296 j++;
2297 if (e->next->prev != e)
2298 panic("checkrq-2 at %s", msg);
2299 if (e->prev->next != e)
2300 panic("checkrq-3 at %s", msg);
2301 }
2302 }
2303 q1++;
2304 }
2305 if (j != rq->count)
2306 panic("checkrq: count wrong at %s", msg);
2307 if (rq->count != 0 && low < rq->low)
2308 panic("checkrq: low wrong at %s", msg);
2309 }
2310
2311 void thread_check(
2312 register thread_t th,
2313 register run_queue_t rq)
2314 {
2315 register unsigned int whichq;
2316
2317 whichq = th->sched_pri;
2318 if (whichq >= NRQS) {
2319 printf("thread_check: priority too high\n");
2320 whichq = NRQS-1;
2321 }
2322 if ((th->links.next == &rq->runq[whichq]) &&
2323 (rq->runq[whichq].prev != (queue_entry_t)th))
2324 panic("thread_check");
2325 }
2326 #endif /* DEBUG */
Cache object: 6eaafd7473e02b261a836905cd473c98
|