1 /*
2 * Mach Operating System
3 * Copyright (c) 1993 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: sched_policy.c,v $
29 * Revision 2.2 93/11/17 17:21:24 dbg
30 * Use runq pointers in processor if NCPUS > 1 - processor_shutdown
31 * does not depend on MACH_HOST.
32 * [93/07/21 dbg]
33 *
34 * Break up thread lock. Change lock ordering to
35 * thread_lock -> pset_lock -> thread_sched_lock.
36 * [93/05/27 dbg]
37 *
38 * machparam.h -> machspl.h
39 * [93/05/21 dbg]
40 *
41 * New interfaces.
42 * [93/05/05 dbg]
43 *
44 * Created.
45 * [93/04/02 dbg]
46 *
47 */
48
49 /*
50 * Scheduling policies and instances.
51 */
52
53 #include <mach_host.h>
54 #include <mach_io_binding.h>
55
56 #include <mach/kern_return.h>
57
58 #include <kern/kern_types.h>
59 #include <kern/sched_policy.h>
60 #include <kern/processor.h>
61 #include <kern/thread.h>
62
63 #include <sched_policy/standard.h> /* BG, TS, FP */
64
65 #include <machine/machspl.h>
66
67 void thread_set_default_policy(thread_t); /* forward */
68 void thread_enforce_policy_limits(thread_t, processor_set_t);
69 /* forward */
70
71 /*
72 * processor_set_policy_add
73 *
74 * Add a new scheduling policy to a processor set,
75 * and supply the limit values for the policy.
76 */
77 kern_return_t processor_set_policy_add(
78 processor_set_t pset,
79 int policy,
80 policy_param_t limit,
81 natural_t count)
82 {
83 sched_policy_t sched_policy;
84 int policy_index;
85 run_queue_t rq;
86 kern_return_t kr;
87
88 /*
89 * Make sure we have a processor set.
90 */
91 if (pset == PROCESSOR_SET_NULL)
92 return KERN_INVALID_ARGUMENT;
93
94 /*
95 * Look up the scheduling policy, by name.
96 */
97 sched_policy = sched_policy_lookup(policy);
98 if (sched_policy == 0)
99 return KERN_INVALID_ARGUMENT;
100
101 /*
102 * Allocate the run queue (and other policy-specific
103 * data structures) for the policy.
104 */
105 rq = RUNQ_ALLOC(sched_policy);
106
107 /*
108 * Set the limits for the policy
109 */
110 kr = RUNQ_SET_LIMIT(rq, limit, count);
111 if (kr != KERN_SUCCESS) {
112 /*
113 * Limit values were bad
114 */
115 RUNQ_FREE(rq);
116 return kr;
117 }
118
119 /*
120 * Attach the run queue to the processor set`s policy
121 * list, at its proper rank.
122 */
123 policy_index = sched_policy->rank;
124
125 pset_lock(pset);
126 if (pset->runq.runqs[policy_index] != 0) {
127 /*
128 * Someone else added the policy while we
129 * were unlocked.
130 */
131 pset_unlock(pset);
132
133 RUNQ_FREE(rq);
134
135 return KERN_SUCCESS; /* already there */
136 }
137
138 pset->runq.runqs[policy_index] = rq;
139 if (policy_index > pset->runq.last)
140 pset->runq.last = policy_index;
141
142 #if NCPUS > 1 && !MACH_IO_BINDING
143 /*
144 * And copy the pointer into the per-processor
145 * structures.
146 */
147 {
148 processor_t processor;
149
150 queue_iterate(&pset->processors, processor, processor_t,
151 processors)
152 {
153 processor->runq.runqs[policy_index] = rq;
154 if (processor->runq.last < policy_index)
155 processor->runq.last = policy_index;
156 }
157 }
158 #endif /* NCPUS > 1 && !MACH_IO_BINDING */
159
160 pset_unlock(pset);
161
162 return KERN_SUCCESS;
163 }
164
165 /*
166 * Processor_set_policy_remove:
167 *
168 * Remove a scheduling policy from a processor set.
169 * Threads running that policy are reset to
170 * timesharing, or to background if timesharing
171 * is not available on the processor set.
172 *
173 * The background policy cannot be removed.
174 */
175 kern_return_t processor_set_policy_remove(
176 processor_set_t pset,
177 int policy)
178 {
179 sched_policy_t sched_policy;
180 run_queue_t rq;
181 thread_t thread;
182 int policy_index;
183
184 /*
185 * Check that we have a processor set.
186 */
187 if (pset == PROCESSOR_SET_NULL)
188 return KERN_INVALID_ARGUMENT;
189
190 /*
191 * Background policy cannot be removed.
192 */
193 if (policy == POLICY_BACKGROUND)
194 return KERN_INVALID_ARGUMENT;
195
196 /*
197 * Look up the scheduling policy, by name.
198 */
199 sched_policy = sched_policy_lookup(policy);
200 if (sched_policy == 0)
201 return KERN_INVALID_ARGUMENT;
202
203 policy_index = sched_policy->rank;
204
205 /*
206 * Check whether the policy is enabled
207 * for the processor set.
208 */
209 pset_lock(pset);
210 if ((rq = pset->runq.runqs[policy_index]) == RUN_QUEUE_NULL) {
211 pset_unlock(pset);
212 return KERN_FAILURE; /* not enabled */
213 }
214
215 /*
216 * Remove policy from list, and reset 'last'
217 * if necessary.
218 */
219 pset->runq.runqs[policy_index] = RUN_QUEUE_NULL;
220 if (policy_index == pset->runq.last) {
221 int i = policy_index;
222 do {
223 --i;
224 } while (pset->runq.runqs[i] == RUN_QUEUE_NULL);
225 pset->runq.last = i;
226 }
227
228 #if NCPUS > 1 && !MACH_IO_BINDING
229 /*
230 * And remove the pointer from the per-processor
231 * structures.
232 */
233 {
234 processor_t processor;
235
236 queue_iterate(&pset->processors, processor, processor_t, processors) {
237 processor->runq.runqs[policy_index] = RUN_QUEUE_NULL;
238 if (processor->runq.last == policy_index)
239 processor->runq.last = pset->runq.last;
240 }
241 }
242 #endif /* NCPUS > 1 && !MACH_IO_BINDING */
243
244 /*
245 * Reassign all threads that run that policy
246 */
247 queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
248 spl_t s;
249
250 s = splsched();
251 thread_sched_lock(thread);
252 if (thread->sched_policy == sched_policy)
253 thread_set_default_policy(thread);
254 thread_sched_unlock(thread);
255 splx(s);
256 }
257 pset_unlock(pset);
258
259 /*
260 * Deallocate the run queues.
261 */
262 RUNQ_FREE(rq);
263
264 return KERN_SUCCESS;
265 }
266
267 /*
268 * processor_set_policy_limit
269 *
270 * Change the limit values for an existing scheduling policy
271 * on a processor set. This affects newly created and assigned
272 * threads. Optionally change existing ones.
273 */
274 kern_return_t processor_set_policy_limit(
275 processor_set_t pset,
276 int policy,
277 policy_param_t limit,
278 natural_t count,
279 boolean_t change_threads)
280 {
281 sched_policy_t sched_policy;
282 int policy_index;
283 run_queue_t rq;
284 kern_return_t kr;
285
286 /*
287 * Make sure we have a processor set.
288 */
289 if (pset == PROCESSOR_SET_NULL)
290 return KERN_INVALID_ARGUMENT;
291
292 /*
293 * Look up the scheduling policy, by name.
294 */
295 sched_policy = sched_policy_lookup(policy);
296 if (sched_policy == 0)
297 return KERN_INVALID_ARGUMENT;
298
299 policy_index = sched_policy->rank;
300
301 /*
302 * Check whether the policy is enabled
303 * for the processor set.
304 */
305 pset_lock(pset);
306
307 if ((rq = pset->runq.runqs[policy_index]) == RUN_QUEUE_NULL) {
308 pset_unlock(pset);
309 return KERN_FAILURE; /* not enabled */
310 }
311
312 /*
313 * Set the limits for the policy
314 */
315 kr = RUNQ_SET_LIMIT(rq, limit, count);
316 if (kr == KERN_SUCCESS) {
317
318 if (change_threads) {
319 register queue_head_t *list;
320 register thread_t thread;
321
322 list = &pset->threads;
323 queue_iterate(list, thread, thread_t, pset_threads) {
324 spl_t s;
325
326 s = splsched();
327 thread_sched_lock(thread);
328 if (thread->sched_policy == sched_policy)
329 thread_enforce_policy_limits(thread, pset);
330 thread_sched_unlock(thread);
331 splx(s);
332 }
333 }
334 }
335
336 pset_unlock(pset);
337 return kr;
338 }
339
340 /*
341 * Set the default policies for a new processor set.
342 *
343 * Defaults are timesharing and background.
344 */
345 void processor_set_default_policies(
346 processor_set_t pset)
347 {
348 (void) processor_set_policy_add(pset, POLICY_TIMESHARE, 0, 0);
349 (void) processor_set_policy_add(pset, POLICY_BACKGROUND, 0, 0);
350 }
351
352 /*
353 * Thread_set_policy:
354 *
355 * Set the scheduling policy for the thread,
356 * and provide the scheduling parameters.
357 * Pset control port must be provided as a key.
358 * If no scheduling parameters are provided,
359
360 * the default values for the policy will be used.
361 */
362 kern_return_t
363 thread_set_policy(
364 thread_t thread,
365 processor_set_t pset,
366 int policy,
367 policy_param_t param,
368 natural_t count)
369 {
370 spl_t s;
371 sched_policy_t old_policy, new_policy;
372 int new_policy_index;
373 run_queue_head_t runq;
374 kern_return_t kr;
375
376 if (thread == THREAD_NULL || pset == PROCESSOR_SET_NULL)
377 return KERN_INVALID_ARGUMENT;
378
379 kr = KERN_SUCCESS;
380
381 /*
382 * Look up the scheduling policy, by name.
383 */
384 new_policy = sched_policy_lookup(policy);
385 if (new_policy == 0)
386 return KERN_INVALID_ARGUMENT;
387
388 new_policy_index = new_policy->rank;
389
390 /*
391 * Check whether the policy is enabled
392 * for the processor set, and whether
393 * this is indeed the thread`s processor
394 * set.
395 *
396 * We only have to hold the pset lock to
397 * check the thread`s assignment: both the
398 * pset and thread locks must be held to
399 * change it.
400 */
401 pset_lock(pset);
402
403 if (pset != thread->processor_set) {
404 pset_unlock(pset);
405 return KERN_FAILURE;
406 }
407
408 if (pset->runq.runqs[new_policy_index] == RUN_QUEUE_NULL) {
409 pset_unlock(pset);
410 return KERN_FAILURE; /* not enabled */
411 }
412
413 s = splsched();
414 thread_sched_lock(thread);
415
416 /*
417 * Remove thread from its current run queue.
418 */
419 runq = rem_runq(thread);
420
421 old_policy = thread->sched_policy;
422 if (old_policy == new_policy) {
423 /*
424 * Thread already runs this policy. Just change
425 * scheduling parameters. Limits for policy
426 * parameters are those already set for the thread.
427 */
428 kr = THREAD_SET_PARAM(thread, param, count,
429 FALSE, /* not new policy -
430 use thread limits */
431 TRUE); /* error if over limits */
432 }
433 else {
434 /*
435 * Changing policy. Set the new policy.
436 */
437 thread->sched_policy = new_policy;
438
439 /*
440 * Set parameters from input: if they are
441 * not supplied, use per-policy defaults.
442 * Set per-thread limits from processor set.
443 */
444 kr = THREAD_SET_PARAM(thread, param, count,
445 TRUE, /* new policy -
446 use pset limits */
447 TRUE); /* error if over limits */
448 if (kr != KERN_SUCCESS) {
449 /*
450 * If parameters are invalid, reset thread
451 * to old policy.
452 */
453 thread->sched_policy = old_policy;
454 }
455 else {
456 /*
457 * Policy parameters are valid.
458 * Change current policy, if thread
459 * is not depressed.
460 */
461 if (thread->cur_policy == 0 ||
462 thread->cur_policy == old_policy)
463 {
464 thread->cur_policy = new_policy;
465 thread->policy_index = new_policy_index;
466 }
467 }
468 }
469
470 /*
471 * If the thread had been on a run queue,
472 * put it back on one.
473 */
474 if (runq != RUN_QUEUE_HEAD_NULL)
475 thread_setrun(thread, TRUE);
476
477 thread_sched_unlock(thread);
478 splx(s);
479 pset_unlock(pset);
480
481 return kr;
482 }
483
484 /*
485 * Set initial policy for thread, from parent task.
486 * If parent task has set no policy, set to the
487 * default policy.
488 */
489 void thread_set_initial_policy(
490 thread_t thread,
491 task_t task)
492 {
493 processor_set_t pset = thread->processor_set;
494 sched_policy_t new_policy;
495 int new_policy_index;
496 spl_t s;
497
498 new_policy = task->sched_policy;
499 new_policy_index = new_policy->rank;
500
501 /*
502 * If policy is not enabled on processor set,
503 * use pset`s default policy.
504 */
505 if (pset->runq.runqs[new_policy_index] == RUN_QUEUE_NULL) {
506 thread_set_default_policy(thread);
507 return;
508 }
509
510 s = splsched();
511 thread_sched_lock(thread);
512
513 thread->sched_policy = new_policy;
514 thread->cur_policy = new_policy;
515 thread->policy_index = new_policy_index;
516
517 (void) THREAD_SET_PARAM(thread,
518 &task->sched_data.data[0],
519 task->sched_data_count,
520 TRUE, /* new policy -
521 use pset limits */
522 FALSE); /* no error if over limits */
523
524 thread_sched_unlock(thread);
525 splx(s);
526 }
527
528 /*
529 * Set default policy for thread.
530 * Use timesharing if enabled; otherwise use background.
531 *
532 * Pset lock and thread_sched_lock must be held.
533 */
534 void thread_set_default_policy(
535 thread_t thread)
536 {
537 processor_set_t pset = thread->processor_set;
538 sched_policy_t policy;
539 run_queue_head_t runq_head;
540
541 policy = &ts_sched_policy;
542 if (pset->runq.runqs[policy->rank] == RUN_QUEUE_NULL) {
543 policy = &bg_sched_policy;
544 }
545
546 /*
547 * Since this is called by thread_create,
548 * we must check for no scheduling policy.
549 */
550 if (thread->sched_policy)
551 runq_head = rem_runq(thread);
552 else
553 runq_head = RUN_QUEUE_HEAD_NULL;
554
555 if (thread->cur_policy == 0 ||
556 thread->cur_policy == thread->sched_policy)
557 {
558 /*
559 * Not depressed - set current policy.
560 */
561 thread->cur_policy = policy;
562 thread->policy_index = policy->rank;
563 }
564 thread->sched_policy = policy;
565
566 /*
567 * Set parameters from per-policy defaults.
568 * Set limits from processor set.
569 */
570 (void) THREAD_SET_PARAM(thread, 0, 0,
571 TRUE, /* new policy -
572 use pset limits */
573 FALSE); /* no error if over limits */
574
575 if (runq_head != RUN_QUEUE_HEAD_NULL) {
576 thread_setrun(thread, TRUE); /* updates priority */
577 }
578 else {
579 UPDATE_PRIORITY(thread);
580 }
581 }
582
583 /*
584 * Check whether the thread`s policy is valid
585 * in the thread`s processor set. If it is,
586 * enforce the processor set`s limits. If not,
587 * reassign the thread to the default policy.
588 *
589 * Pset lock and thread_sched_lock must be held.
590 */
591 void thread_enforce_policy_limits(
592 thread_t thread,
593 processor_set_t pset)
594 {
595 run_queue_head_t runq_head;
596
597 if (pset->runq.runqs[thread->policy_index] == RUN_QUEUE_NULL) {
598 thread_set_default_policy(thread);
599 return;
600 }
601
602 runq_head = rem_runq(thread);
603
604 /*
605 * Check per-thread parameters and limits
606 * against the pset`s limit values.
607 */
608 (void) THREAD_SET_PARAM(thread, 0, 0,
609 FALSE, /* same policy -
610 use thread limits */
611 FALSE); /* no error if over limits */
612
613 if (runq_head != RUN_QUEUE_HEAD_NULL) {
614 thread_setrun(thread, TRUE); /* updates priority */
615 }
616 else {
617 UPDATE_PRIORITY(thread);
618 }
619 }
620
621 /*
622 * Set scheduling parameters for a thread.
623 * The parameters must be valid for the current
624 * scheduling policy.
625 */
626 kern_return_t
627 thread_set_policy_param(
628 thread_t thread,
629 boolean_t set_limit,
630 policy_param_t param,
631 natural_t count)
632 {
633 run_queue_head_t runq;
634 spl_t s;
635 kern_return_t kr;
636
637 if (thread == THREAD_NULL)
638 return KERN_INVALID_ARGUMENT;
639
640 /*
641 * Hold the thread lock to prevent the
642 * thread from being reassigned.
643 */
644 thread_lock(thread);
645
646 s = splsched();
647 thread_sched_lock(thread);
648
649 /*
650 * Remove thread from its current run queue.
651 */
652 runq = rem_runq(thread);
653
654 /*
655 * Set the new per-policy scheduling parameters.
656 */
657 kr = THREAD_SET_PARAM(thread, param, count,
658 FALSE, /* not new policy -
659 use thread limits */
660 TRUE); /* error if over limits */
661
662 /*
663 * If user asked to lower the limit, set that
664 * also.
665 */
666 if (set_limit && kr == KERN_SUCCESS) {
667 kr = THREAD_SET_LIMIT(thread, param, count);
668 assert(kr == KERN_SUCCESS);
669 }
670
671 /*
672 * If the thread had been on a run queue,
673 * put it back on the run queue.
674 */
675 if (runq != RUN_QUEUE_HEAD_NULL)
676 thread_setrun(thread, TRUE);
677
678 thread_sched_unlock(thread);
679 splx(s);
680
681 thread_unlock(thread);
682
683 return kr;
684 }
685
686 /*
687 * Set scheduling limits for a thread.
688 * The parameters must be valid for the current
689 * scheduling policy.
690 */
691 kern_return_t
692 thread_set_policy_limit(
693 thread_t thread,
694 processor_set_t pset,
695 policy_param_t limit,
696 natural_t count)
697 {
698 run_queue_head_t runq;
699 spl_t s;
700 kern_return_t kr;
701
702 if (thread == THREAD_NULL || pset == PROCESSOR_SET_NULL)
703 return KERN_INVALID_ARGUMENT;
704
705 pset_lock(pset);
706 if (pset != thread->processor_set) {
707 pset_unlock(pset);
708 return KERN_FAILURE;
709 }
710
711 s = splsched();
712 thread_sched_lock(thread);
713
714 /*
715 * Remove thread from its current run queue.
716 */
717 runq = rem_runq(thread);
718
719 /*
720 * Set the new per-policy scheduling information.
721 */
722 kr = THREAD_SET_LIMIT(thread, limit, count);
723
724 /*
725 * If the thread had been on a run queue,
726 * put it back on the run queue.
727 */
728 if (runq != RUN_QUEUE_HEAD_NULL)
729 thread_setrun(thread, TRUE);
730
731 thread_sched_unlock(thread);
732 splx(s);
733
734 pset_unlock(pset);
735
736 return kr;
737 }
738
739 /*
740 * Set default scheduling policy and parameters for a
741 * task, to be used when creating new threads in the
742 * task. Optionally change the policy and parameters
743 * for all threads in the task.
744 *
745 * The task must be assigned to the processor set
746 * (security key).
747 */
748 kern_return_t
749 task_set_default_policy(
750 task_t task,
751 processor_set_t pset,
752 int policy,
753 policy_param_t param,
754 natural_t count,
755 boolean_t assign_threads)
756 {
757 sched_policy_t old_policy, new_policy;
758 int new_policy_index;
759 kern_return_t kr;
760
761 if (task == TASK_NULL || pset == PROCESSOR_SET_NULL)
762 return KERN_INVALID_ARGUMENT;
763
764 kr = KERN_SUCCESS;
765
766 /*
767 * Look up the scheduling policy, by name.
768 */
769 new_policy = sched_policy_lookup(policy);
770 if (new_policy == 0)
771 return KERN_INVALID_ARGUMENT;
772
773 new_policy_index = new_policy->rank;
774
775 /*
776 * Check whether the policy is enabled
777 * for the processor set, and whether
778 * this is indeed the task`s processor
779 * set.
780 *
781 * We must hold both the task and pset
782 * lock to do this, since the task lock
783 * must be acquired first.
784 */
785
786 task_lock(task);
787 pset_lock(pset);
788
789 if (pset != task->processor_set) {
790 pset_unlock(pset);
791 task_unlock(task);
792 return KERN_FAILURE;
793 }
794
795 if (pset->runq.runqs[new_policy_index] == RUN_QUEUE_NULL) {
796 pset_unlock(pset);
797 task_unlock(task);
798 return KERN_FAILURE; /* not enabled */
799 }
800
801 /*
802 * Save the old policy, if we are changing
803 * policy and the new parameters are invalid.
804 */
805 old_policy = task->sched_policy;
806
807 /*
808 * Set the new policy.
809 */
810 task->sched_policy = new_policy;
811
812 /*
813 * Set parameters from input: if they are
814 * not supplied, use per-policy defaults.
815 */
816 kr = TASK_SET_PARAM(task, param, count);
817 if (kr != KERN_SUCCESS) {
818 /*
819 * If parameters are invalid, reset task
820 * to old policy. Parameters have not been changed.
821 */
822 task->sched_policy = old_policy;
823 pset_unlock(pset);
824 }
825 else {
826 /*
827 * Policy parameters are valid.
828 * Change policy/parameters for threads,
829 * if requested.
830 */
831 pset_unlock(pset); /* so thread_set_policy can lock it */
832
833 if (assign_threads) {
834 thread_t thread;
835
836 queue_iterate(&task->thread_list, thread, thread_t,
837 thread_list)
838 {
839 if (thread_set_policy(thread,
840 pset,
841 policy,
842 param,
843 count) != KERN_SUCCESS)
844 kr = KERN_FAILURE;
845 }
846 }
847 }
848 task_unlock(task);
849
850 return kr;
851 }
852
853 /*
854 * Inherit task default policy from parent task,
855 * or system defaults if none.
856 */
857 void task_inherit_default_policy(
858 task_t parent_task,
859 task_t new_task)
860 {
861 if (parent_task != TASK_NULL) {
862 /*
863 * Copy from parent task
864 */
865 new_task->sched_policy = parent_task->sched_policy;
866 new_task->sched_data = parent_task->sched_data;
867 new_task->sched_data_count = parent_task->sched_data_count;
868 }
869 else {
870 /*
871 * No parent task - default to timesharing
872 */
873 new_task->sched_policy = &ts_sched_policy;
874 (void) TASK_SET_PARAM(new_task, 0, 0); /* use default */
875 }
876 }
877
Cache object: 1a19e7fcafeaaf033b28d839e09d7f7e
|