FreeBSD/Linux Kernel Cross Reference
sys/kern/run_queues.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: run_queues.c,v $
29 * Revision 2.2 93/11/17 17:20:25 dbg
30 * Fixed csw_needed to check per-processor run queue for the
31 * current thread`s scheduling class on MP. Made thread_bind
32 * temporarily bind thread to the BP (bound-thread) policy when
33 * binding to a particular CPU (the same way that depressing a
34 * thread binds it to the background policy). NOTE that this won't
35 * work if a bound thread can be depressed; but on a symmetrical
36 * MP, we only bind the action thread. Fixing this (under
37 * MACH_IO_BINDING) is left as an exercise for the reader.
38 * [93/09/01 dbg]
39 *
40 * Use runq pointers in processor if NCPUS > 1 - processor_shutdown
41 * does not depend on MACH_HOST.
42 * [93/07/21 dbg]
43 *
44 * Break up thread lock.
45 * [93/05/26 dbg]
46 *
47 * machparam.h -> machspl.h
48 * [93/05/21 dbg]
49 *
50 * Changed policy to policy_index in thread.
51 * [93/05/05 dbg]
52 *
53 * Moved common code from thread_setrun and rem_runq here.
54 * [93/04/10 dbg]
55 *
56 * Processor set now has one run queue structure per scheduling
57 * policy. The actual run queues are policy-specific.
58 * [93/04/06 dbg]
59 *
60 * Always enable fixed-priority threads.
61 * [93/03/27 dbg]
62 *
63 * Moved routines here from kern/sched_prim.c.
64 * [93/01/12 dbg]
65 *
66 */
67
68 /*
69 * Run queue routines.
70 *
71 * Contain routines to move threads on and off run queues.
72 * The major routine to add a thread to a run queue, thread_setrun,
73 * is specific to the thread's policy.
74 *
75 * If there is no thread to run, the idle thread runs instead.
76 *
77 * The runq routines also maintain the processor`s quantum,
78 * which is only used by the timesharing scheduling policy.
79 * It is ignored by all of the others, so setting it is harmless.
80 */
81
82 #include <cpus.h>
83 #include <mach_host.h>
84 #include <mach_io_binding.h>
85
86 #include <kern/assert.h>
87 #include <kern/counters.h>
88 #include <kern/run_queues.h>
89 #include <kern/processor.h>
90 #include <kern/thread.h>
91 #include <kern/sched.h> /* sched_tick */
92 #include <kern/sched_policy.h>
93 #include <kern/sched_prim.h>
94 #include <kern/stack.h>
95 #include <kern/thread_swap.h>
96 #include <machine/machspl.h>
97
98 #if NCPUS > 1 && !MACH_IO_BINDING
99 #include <sched_policy/bp.h>
100 #endif
101
102 thread_t choose_idle_thread(
103 register processor_t myprocessor,
104 processor_set_t pset); /* forward */
105
106 /*
107 * Initialize the run queue header structure.
108 */
109 void run_queue_head_init(
110 run_queue_head_t runq)
111 {
112 int i;
113
114 simple_lock_init(&runq->lock);
115 runq->count = 0;
116 for (i = 0; i < NUM_POLICIES; i++)
117 runq->runqs[i] = RUN_QUEUE_NULL;
118 runq->last = 0;
119 }
120
121 void run_queue_head_dealloc(
122 run_queue_head_t runq)
123 {
124 int i;
125 run_queue_t rq;
126
127 for (i = runq->last; i >= 0; i--) {
128 rq = runq->runqs[i];
129 RUNQ_FREE(rq);
130 }
131 }
132
133 /*
134 * thread_setrun:
135 *
136 * Make thread runnable; dispatch directly onto an idle processor
137 * if possible. Else put on appropriate run queue (processor
138 * if bound, else processor set). Caller must have lock on thread.
139 * This is always called at splsched.
140 */
141
142 void thread_setrun(
143 register thread_t thread,
144 boolean_t may_preempt)
145 {
146 register processor_set_t pset = thread->processor_set;
147 run_queue_t rq;
148 #if NCPUS > 1
149 register processor_t processor;
150 #endif
151
152 /*
153 * Update priority if needed.
154 */
155 if (thread->sched_stamp != sched_tick)
156 UPDATE_PRIORITY(thread);
157
158 assert(thread->runq == RUN_QUEUE_HEAD_NULL);
159
160 #if NCPUS > 1
161 /*
162 * Try to dispatch the thread directly onto an idle processor.
163 */
164 if ((processor = thread->bound_processor) != PROCESSOR_NULL) {
165 /*
166 * Bound, can only run on bound processor. Have to lock
167 * processor here because it may not be the current one.
168 */
169 if (processor->state == PROCESSOR_IDLE) {
170 simple_lock(&processor->lock);
171 pset = processor->processor_set;
172 simple_lock(&pset->idle_lock);
173 if (processor->state == PROCESSOR_IDLE) {
174 queue_remove(&pset->idle_queue, processor,
175 processor_t, processor_queue);
176 pset->idle_count--;
177 processor->next_thread = thread;
178 processor->state = PROCESSOR_DISPATCHING;
179 simple_unlock(&pset->idle_lock);
180 simple_unlock(&processor->lock);
181 return;
182 }
183 simple_unlock(&pset->idle_lock);
184 simple_unlock(&processor->lock);
185 }
186
187 #if MACH_IO_BINDING
188
189 simple_lock(&processor->runq.lock);
190 rq = processor->runq.runqs[thread->policy_index];
191 (void) THREAD_ENQUEUE(rq, thread, FALSE);
192 processor->runq.count++;
193 thread->runq = &processor->runq;
194 simple_unlock(&processor->runq.lock);
195
196 #else /* MACH_IO_BINDING */
197
198
199 simple_lock(&processor->lock);
200 pset = processor->processor_set;
201 simple_lock(&pset->runq.lock);
202 rq = processor->runq.runqs[BOUND_POLICY_INDEX];
203 (void) THREAD_ENQUEUE(rq, thread, FALSE);
204 pset->runq.count++;
205 thread->runq = &pset->runq;
206 simple_unlock(&pset->runq.lock);
207 simple_unlock(&processor->lock);
208
209 #endif /* MACH_IO_BINDING */
210
211 /*
212 * Cause ast on processor if processor is on line.
213 */
214 if (processor == current_processor()) {
215 ast_on(cpu_number(), AST_BLOCK);
216 }
217 else if (processor->state != PROCESSOR_OFF_LINE) {
218 cause_ast_check(processor);
219 }
220 return;
221 }
222
223 /*
224 * Not bound, any processor in the processor set is ok.
225 */
226 #if HW_FOOTPRINT
227 /*
228 * But first check the last processor it ran on.
229 */
230 processor = thread->last_processor;
231 if (processor->state == PROCESSOR_IDLE) {
232 simple_lock(&processor->lock);
233 simple_lock(&pset->idle_lock);
234 if (processor->state == PROCESSOR_IDLE
235 #if MACH_HOST
236 && processor->processor_set == pset
237 #endif /* MACH_HOST */
238 )
239 {
240 queue_remove(&pset->idle_queue, processor,
241 processor_t, processor_queue);
242 pset->idle_count--;
243 processor->next_thread = thread;
244 processor->state = PROCESSOR_DISPATCHING;
245 simple_unlock(&pset->idle_lock);
246 simple_unlock(&processor->lock);
247 return;
248 }
249 simple_unlock(&pset->idle_lock);
250 simple_unlock(&processor->lock);
251 }
252 #endif /* HW_FOOTPRINT */
253
254 if (pset->idle_count > 0) {
255 simple_lock(&pset->idle_lock);
256 if (pset->idle_count > 0) {
257 queue_remove_first(&pset->idle_queue, processor,
258 processor_t, processor_queue);
259 pset->idle_count--;
260 processor->next_thread = thread;
261 processor->state = PROCESSOR_DISPATCHING;
262 simple_unlock(&pset->idle_lock);
263 return;
264 }
265 simple_unlock(&pset->idle_lock);
266 }
267
268 #else /* NCPUS == 1 */
269
270 /*
271 * Check for idle processor.
272 */
273 if (pset->idle_count > 0) {
274 pset->idle_count = 0;
275 processor_array[0].next_thread = thread;
276 processor_array[0].state = PROCESSOR_DISPATCHING;
277 return;
278 }
279
280 #endif /* NCPUS */
281
282 /*
283 * Queue to processor set.
284 */
285 simple_lock(&pset->runq.lock);
286 rq = pset->runq.runqs[thread->policy_index];
287 may_preempt = THREAD_ENQUEUE(rq, thread, may_preempt);
288 pset->runq.count++;
289 thread->runq = &pset->runq;
290 simple_unlock(&pset->runq.lock);
291
292 /*
293 * Preemption check. New thread first must be
294 * on the current processor`s processor set.
295 */
296 if (may_preempt
297 #if MACH_HOST
298 && pset == current_processor()->processor_set
299 #endif
300 )
301 {
302 /*
303 * New thread preempts current thread if it is
304 * in a higher scheduler class (lower index).
305 *
306 * Per-policy result is used if new thread and
307 * current thread are in the same scheduler class.
308 *
309 * No preemption if new thread is in a lower
310 * scheduler class than current thread.
311 */
312 if (thread->policy_index < current_thread()->policy_index ||
313 (thread->policy_index == current_thread()->policy_index &&
314 may_preempt))
315 {
316 /*
317 * Turn off first_quantum to allow context switch.
318 */
319 current_processor()->first_quantum = FALSE;
320 ast_on(cpu_number(), AST_BLOCK);
321 }
322 }
323 }
324
325 /*
326 * Remove a thread from the run queues. Returns the
327 * run queue that the thread was on, or RUN_QUEUE_HEAD_NULL
328 * if the thread was not found on the run queues.
329 *
330 * Called with the thread locked, at splsched.
331 */
332 run_queue_head_t
333 rem_runq(
334 thread_t thread)
335 {
336 run_queue_head_t runq;
337
338 runq = thread->runq;
339
340 /*
341 * If runq is RUN_QUEUE_NULL, the thread will stay out of the
342 * run_queues because the caller locked the thread. Otherwise
343 * the thread is on a runq, but could leave.
344 */
345 if (runq != RUN_QUEUE_HEAD_NULL) {
346 simple_lock(&runq->lock);
347 if (runq == thread->runq) {
348 /*
349 * Thread is in a runq and we have a lock on
350 * that runq.
351 */
352 run_queue_t rq;
353
354 #if NCPUS > 1 && !MACH_IO_BINDING
355 {
356 processor_t processor;
357 if ((processor = thread->bound_processor) != PROCESSOR_NULL)
358 rq = processor->runq.runqs[BOUND_POLICY_INDEX];
359 else
360 rq = runq->runqs[thread->policy_index];
361 }
362 #else
363 rq = runq->runqs[thread->policy_index];
364 #endif
365 THREAD_REMQUEUE(rq, thread);
366
367 runq->count--;
368
369 thread->runq = RUN_QUEUE_HEAD_NULL;
370 simple_unlock(&runq->lock);
371 }
372 else {
373 /*
374 * The thread left the runq before we could
375 * lock the runq. It is not on a runq now, and
376 * can't move again because this routine's
377 * caller locked the thread.
378 */
379 simple_unlock(&runq->lock);
380 runq = RUN_QUEUE_HEAD_NULL;
381 }
382 }
383
384 return runq;
385 }
386
387 /*
388 * Select the next thread to run on a processor.
389 *
390 * Called at splsched.
391 */
392 thread_t
393 thread_select(
394 processor_t processor)
395 {
396 thread_t thread;
397 processor_set_t pset = processor->processor_set;
398 run_queue_t runq;
399
400 #if MACH_IO_BINDING
401 /*
402 * Find the highest priority policy in both the
403 * local and global run queues. If they are the
404 * same, call a policy_specific routine to remove
405 * the highest_priority thread from both of the
406 * queues. If they are different, just pick the
407 * first thread from the highest_priority policy.
408 */
409 {
410 int gi, li;
411 run_queue_head_t rqh;
412
413 simple_lock(&pset->runq.lock);
414 simple_lock(&processor->runq.lock);
415
416 assert(pset->runq.last == processor->runq.last);
417
418 for (gi = pset->runq.last; gi >= 0; gi--) {
419 if (pset->runq.runqs[gi].rq_count > 0) {
420 break;
421 }
422 }
423
424 for (li = processor->runq.last; li > gi; li--) {
425 if (processor->runq.runqs[li].rq_count > 0) {
426 break;
427 }
428 }
429
430 if (li >= 0) {
431 /*
432 * Found something, in one or both run queues.
433 * Pick the one with the higher priority.
434 */
435 if (gi > li) {
436 /*
437 * Global run queue wins.
438 */
439 rqh = &pset->runq;
440 runq = rqh->runqs[gi];
441 }
442 else if (li > gi) {
443 /*
444 * Local run queue wins.
445 */
446 rqh = &processor->runq;
447 runq = rqh->runqs[li];
448 }
449 else {
450 /*
451 * A tie. Must call policy-specific function
452 * to decide between the two run queues.
453 */
454 run_queue_t global_runq, local_runq;
455
456 global_runq = pset->runq.runqs[gi];
457 local_runq = processor->runq.runqs[li];
458
459 if (RUNQ_HEAD_PREEMPT(global_runq, local_runq)) {
460 rqh = &pset->runq;
461 runq = global_runq;
462 }
463 else {
464 rqh = &processor->runq;
465 runq = local_runq;
466 }
467 }
468 thread = THREAD_DEQUEUE(runq);
469 thread->runq = RUN_QUEUE_HEAD_NULL;
470 rqh->count--;
471 simple_unlock(&processor->runq.lock);
472 simple_unlock(&pset->runq.lock);
473 return thread;
474 }
475
476 simple_unlock(&processor->runq.lock);
477 simple_unlock(&pset->runq.lock);
478 }
479 #else /* MACH_IO_BINDING */
480
481 /*
482 * Run down the list of scheduling policies for
483 * the processor set, until we find one with
484 * runnable threads. Then call its choose_thread
485 * routine.
486 */
487 {
488 int i;
489
490 simple_lock(&pset->runq.lock);
491
492 #if NCPUS > 1
493 /*
494 * Look at the per-processor run queue pointers,
495 * to pick up bound threads.
496 */
497 for (i = processor->runq.last; i >= 0; i--) {
498 runq = processor->runq.runqs[i];
499 if (runq->rq_count > 0) {
500 /*
501 * Found one.
502 */
503 thread = THREAD_DEQUEUE(runq);
504 thread->runq = RUN_QUEUE_HEAD_NULL;
505 pset->runq.count--;
506 simple_unlock(&pset->runq.lock);
507 return thread;
508 }
509 }
510 #else /* NCPUS == 1 */
511 for (i = pset->runq.last; i >= 0; i--) {
512 runq = pset->runq.runqs[i];
513 if (runq->rq_count > 0) {
514 /*
515 * Found one.
516 */
517 thread = THREAD_DEQUEUE(runq);
518 thread->runq = RUN_QUEUE_HEAD_NULL;
519 pset->runq.count--;
520 simple_unlock(&pset->runq.lock);
521 return thread;
522 }
523 }
524 #endif /* NCPUS > 1 */
525 simple_unlock(&pset->runq.lock);
526 }
527
528 #endif /* MACH_IO_BINDING */
529
530 /*
531 * No threads to run. Check whether the current
532 * thread is still runnable on this processor.
533 */
534 thread = current_thread();
535 if (thread->state == TH_RUN &&
536 check_processor_set(thread) &&
537 check_bound_processor(thread))
538 {
539 thread_sched_lock(thread);
540 if (thread->sched_stamp != sched_tick)
541 UPDATE_PRIORITY(thread);
542 thread_sched_unlock(thread);
543
544 return thread;
545 }
546
547 /*
548 * Really nothing to do.
549 */
550 return choose_idle_thread(processor, pset);
551 }
552
553 /*
554 * Check for preemption.
555 *
556 * The current thread is always preempted by any thread in
557 * a higher-priority scheduling class. The scheduler`s
558 * CSW_NEEDED routine checks whether the thread is preempted
559 * by a thread in the same class. Threads in lower priority
560 * scheduling classes cannot preempt the current thread.
561 */
562 boolean_t csw_needed(
563 thread_t thread,
564 processor_t processor)
565 {
566 run_queue_t runq;
567 int i, cur_index;
568
569 if ((thread->state & TH_SUSP) != 0)
570 return TRUE;
571
572 cur_index = thread->policy_index;
573
574 #if MACH_IO_BINDING
575 /*
576 * Must check both local and global run queues.
577 */
578 {
579 processor_set_t pset = processor->processor_set;
580 int gi, li;
581
582 for (gi = pset->runq.last; gi > cur_index; gi--) {
583 if (pset->runq.runqs[gi].rq_count > 0)
584 return TRUE;
585 }
586
587 for (li = pset->runq.last; li > cur_index; li--) {
588 if (processor->runq.runqs[li].rq_count > 0)
589 return TRUE;
590 }
591
592 runq = pset->runq.runqs[cur_index];
593 if (runq->rq_count > 0 && CSW_NEEDED(runq, thread))
594 return TRUE;
595
596 runq = processor->runq.runqs[cur_index];
597 if (runq->rq_count > 0 && CSW_NEEDED(runq, thread))
598 return TRUE;
599
600 return FALSE;
601
602 #else /* MACH_IO_BINDING */
603
604 #if NCPUS > 1
605 for (i = processor->runq.last; i > cur_index; i--) {
606 runq = processor->runq.runqs[i];
607 if (runq->rq_count > 0)
608 return TRUE;
609 }
610
611 runq = processor->runq.runqs[cur_index];
612 if (runq->rq_count > 0 && CSW_NEEDED(runq, thread))
613 return TRUE;
614 return FALSE;
615
616 #else /* NCPUS == 1 */
617 {
618 processor_set_t pset = processor->processor_set;
619 for (i = pset->runq.last; i > cur_index; i--) {
620 runq = pset->runq.runqs[i];
621 if (runq->rq_count > 0)
622 return TRUE;
623 }
624
625 runq = pset->runq.runqs[cur_index];
626 if (runq->rq_count > 0 && CSW_NEEDED(runq, thread))
627 return TRUE;
628 return FALSE;
629 }
630 #endif /* NCPUS > 1 */
631
632 #endif /* MACH_IO_BINDING */
633 }
634
635
636 /*
637 * choose_idle_thread:
638 * set processor idle and choose its idle thread.
639 *
640 * myprocessor is always the current
641 * processor, and pset must be its processor set.
642 * This routine sets the processor idle and
643 * returns its idle thread.
644 */
645
646 thread_t choose_idle_thread(
647 register processor_t myprocessor,
648 processor_set_t pset)
649 {
650 /*
651 * Nothing is runnable, so set this processor idle if it
652 * was running. If it was in an assignment or shutdown,
653 * leave it alone. Return its idle thread.
654 */
655 simple_lock(&pset->idle_lock);
656 if (myprocessor->state == PROCESSOR_RUNNING) {
657 myprocessor->state = PROCESSOR_IDLE;
658
659 #if NCPUS > 1
660 queue_enter_first(&(pset->idle_queue), myprocessor,
661 processor_t, processor_queue);
662 pset->idle_count++;
663 #else /* NCPUS > 1 */
664 pset->idle_count = 1;
665 #endif /* NCPUS > 1 */
666 }
667 simple_unlock(&pset->idle_lock);
668
669 return myprocessor->idle_thread;
670 }
671
672 /*
673 * no_dispatch_count counts number of times processors go non-idle
674 * without being dispatched. This should be very rare.
675 */
676 int no_dispatch_count = 0;
677
678 /*
679 * This is the idle thread, which just looks for other threads
680 * to execute.
681 */
682
683 no_return idle_thread_continue(void)
684 {
685 register processor_t myprocessor;
686 register thread_t volatile *threadp;
687 register volatile int *gcount;
688 #if MACH_IO_BINDING
689 register volatile int *lcount;
690 #endif
691 register thread_t new_thread;
692 register int state;
693 int mycpu;
694 spl_t s;
695
696 mycpu = cpu_number();
697 myprocessor = current_processor();
698 threadp = (thread_t volatile *) &myprocessor->next_thread;
699 #if MACH_IO_BINDING
700 lcount = (volatile int *) &myprocessor->runq.count;
701 #endif
702
703 while (TRUE) {
704
705 #ifdef MARK_CPU_IDLE
706 MARK_CPU_IDLE(mycpu);
707 #endif /* MARK_CPU_IDLE */
708
709 #if MACH_HOST
710 gcount = (volatile int *)
711 &myprocessor->processor_set->runq.count;
712 #else /* MACH_HOST */
713 gcount = (volatile int *) &default_pset.runq.count;
714 #endif /* MACH_HOST */
715
716 /*
717 * This cpu will be dispatched (by thread_setrun) by setting
718 * next_thread to the value of the thread to run next. Also
719 * check runq counts.
720 */
721
722 while ((*threadp == THREAD_NULL) &&
723 (*gcount == 0)
724 #if MACH_IO_BINDING
725 && (*lcount == 0)
726 #endif
727 )
728 {
729 /*
730 * check for kernel ASTs while we wait
731 */
732 AST_KERNEL_CHECK(mycpu);
733
734 /*
735 * machine_idle is a machine dependent function,
736 * to conserve power.
737 */
738 #if POWER_SAVE
739 machine_idle(mycpu);
740 #endif /* POWER_SAVE */
741 }
742
743 #ifdef MARK_CPU_ACTIVE
744 MARK_CPU_ACTIVE(mycpu);
745 #endif /* MARK_CPU_ACTIVE */
746
747 s = splsched();
748
749 /*
750 * This is not a switch statement to avoid the
751 * bounds checking code in the common case.
752 */
753 retry:
754 state = myprocessor->state;
755 if (state == PROCESSOR_DISPATCHING) {
756
757 /*
758 * Common case -- cpu dispatched.
759 */
760 new_thread = *threadp;
761 *threadp = THREAD_NULL;
762 myprocessor->state = PROCESSOR_RUNNING;
763
764 /*
765 * Just use set quantum. No point in
766 * checking for shorter local runq quantum;
767 * csw_needed will handle correctly.
768 */
769 #if MACH_HOST
770 myprocessor->quantum = new_thread->processor_set->set_quantum;
771 #else /* MACH_HOST */
772 myprocessor->quantum = default_pset.set_quantum;
773 #endif /* MACH_HOST */
774 myprocessor->first_quantum = TRUE;
775 counter(c_idle_thread_handoff++);
776 thread_run_noreturn(idle_thread_continue, new_thread);
777 /*NOTREACHED*/
778 }
779 else if (state == PROCESSOR_IDLE) {
780 register processor_set_t pset;
781
782 pset = myprocessor->processor_set;
783 simple_lock(&pset->idle_lock);
784 if (myprocessor->state != PROCESSOR_IDLE) {
785 /*
786 * Something happened, try again.
787 */
788 simple_unlock(&pset->idle_lock);
789 goto retry;
790 }
791
792 /*
793 * Processor was not dispatched (Rare).
794 * Set it running again.
795 */
796 no_dispatch_count++;
797 pset->idle_count--;
798 #if NCPUS > 1
799 queue_remove(&pset->idle_queue, myprocessor,
800 processor_t, processor_queue);
801 #endif
802 myprocessor->state = PROCESSOR_RUNNING;
803 simple_unlock(&pset->idle_lock);
804 counter(c_idle_thread_block++);
805 thread_block_noreturn(idle_thread_continue);
806 /*NOTREACHED*/
807 }
808 else if ((state == PROCESSOR_ASSIGN) ||
809 (state == PROCESSOR_SHUTDOWN)) {
810 /*
811 * Changing processor sets, or going off-line.
812 * Release next_thread if there is one. Actual
813 * thread to run is on a runq.
814 */
815 if ((new_thread = *threadp)!= THREAD_NULL) {
816 *threadp = THREAD_NULL;
817 thread_sched_lock(new_thread);
818 thread_setrun(new_thread, FALSE);
819 thread_sched_unlock(new_thread);
820 }
821
822 counter(c_idle_thread_block++);
823 thread_block_noreturn(idle_thread_continue);
824 /*NOTREACHED*/
825 }
826 else {
827 panic("Idle thread: bad processor state %d (Cpu %d)\n",
828 state, mycpu);
829 }
830
831 splx(s);
832 }
833 }
834
835 no_return idle_thread(void)
836 {
837 /*
838 * Can only call stack_privilege on the current thread.
839 */
840 stack_privilege(current_thread());
841
842 /*
843 * thread_block() to set the processor idle when we
844 * run next time.
845 */
846 counter(c_idle_thread_block++);
847 thread_block_noreturn(idle_thread_continue);
848 /*NOTREACHED*/
849 }
850
851 /*
852 * Create the idle thread for a CPU.
853 */
854 void idle_thread_create(
855 processor_t processor)
856 {
857 thread_t thread;
858
859 /*
860 * Create the thread.
861 */
862 {
863 thread_t temp;
864 (void) thread_create(kernel_task, &temp);
865 thread = temp;
866 }
867
868 /*
869 * set it to the idle policy,
870 * though it won`t really run it.
871 */
872 thread->sched_policy = sched_policy_lookup(POLICY_BACKGROUND);
873 thread->cur_policy = thread->sched_policy;
874 thread->policy_index = thread->sched_policy->rank;
875
876 thread_start(thread, idle_thread); /* start at idle thread */
877 thread_doswapin(thread); /* give it a stack */
878
879 processor->idle_thread = thread; /* make it this processor`s
880 idle thread */
881
882 thread->state |= TH_RUN | TH_IDLE; /* mark it as running and
883 idle so that */
884 (void) thread_resume(thread); /* this won`t put it on
885 the run queues */
886 }
887
888 #if NCPUS > 1
889 /*
890 * thread_bind:
891 *
892 * Force a thread to execute on the specified processor.
893 * If the thread is currently executing, it may wait until its
894 * time slice is up before switching onto the specified processor.
895 *
896 * A processor of PROCESSOR_NULL causes the thread to be unbound.
897 * xxx - DO NOT export this to users.
898 *
899 * Binding a thread to a particular processor temporarily switches
900 * the thread to running under the BP (bound_processor) scheduling
901 * policy. Unbinding the thread lets it run its normal policy.
902 *
903 * NOTE that this won`t work if a bound thread can be depressed,
904 * since depressing a thread also temporarily switches its
905 * scheduling policy. However, on a symmetrical multiprocessor,
906 * the only thread that is ever bound is the action thread.
907 * Fixing this (under MACH_IO_BINDING), as well as dealing with
908 * the case of a thread that must be bound to a processor that
909 * is not in its processor set, is left as an exercise for the
910 * reader.
911 */
912 void thread_bind(
913 register thread_t thread,
914 processor_t processor)
915 {
916 spl_t s;
917
918 s = splsched();
919 thread_sched_lock(thread);
920
921 thread->bound_processor = processor;
922 if (processor != PROCESSOR_NULL) {
923 /*
924 * Temporarily bind to BP policy.
925 */
926 extern struct sched_policy bp_sched_policy;
927
928 thread->cur_policy = &bp_sched_policy;
929 thread->policy_index = bp_sched_policy.rank;
930 }
931 else {
932 /*
933 * Resume normal scheduling.
934 */
935 thread->cur_policy = thread->sched_policy;
936 thread->policy_index = thread->sched_policy->rank;
937 }
938
939 thread_sched_unlock(thread);
940 splx(s);
941 }
942 #endif /* NCPUS > 1 */
943
Cache object: fe9f0ec56825befff7f692e1e7f86e0d
|