FreeBSD/Linux Kernel Cross Reference
sys/kern/processor.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: processor.c,v $
29 * Revision 2.12 93/01/14 17:35:48 danner
30 * Fixed indireciton in cpu_control call.
31 * [93/01/12 danner]
32 * Fixed type of count argument to processor_control.
33 * [93/01/12 danner]
34 * Explictily include <kern/task.h>
35 * [93/01/12 dbg]
36 * 64bit cleanup.
37 * [92/12/01 af]
38 *
39 * 28-Oct-92 David Golub (dbg) at Carnegie-Mellon University
40 * Added separate pset_ref_lock, only governing reference count, to
41 * fix lock ordering to avoid deadlocks. Converted function declarations
42 * to use prototypes.
43 *
44 * Revision 2.11 92/08/03 17:38:44 jfriedl
45 * removed silly prototypes
46 * [92/08/02 jfriedl]
47 *
48 * Revision 2.10 92/05/21 17:15:08 jfriedl
49 * Added void to funtions that still needed it.
50 * [92/05/16 jfriedl]
51 *
52 * Revision 2.9 91/05/14 16:45:27 mrt
53 * Correcting copyright
54 *
55 * Revision 2.8 91/05/08 12:48:02 dbg
56 * Changed pset_sys_init to give each processor a control port,
57 * even when it is not running. Without a control port, there is
58 * no way to start an inactive processor.
59 * [91/04/26 14:42:59 dbg]
60 *
61 * Revision 2.7 91/02/05 17:28:27 mrt
62 * Changed to new Mach copyright
63 * [91/02/01 16:15:59 mrt]
64 *
65 * Revision 2.6 90/09/09 14:32:26 rpd
66 * Removed pset_is_garbage, do_pset_scan.
67 * Changed processor_set_create to return the actual processor set.
68 * [90/08/30 rpd]
69 *
70 * Revision 2.5 90/08/27 22:03:17 dbg
71 * Fixed processor_set_info to return the correct count.
72 * [90/08/23 rpd]
73 *
74 * Revision 2.4 90/08/27 11:52:07 dbg
75 * Fix type mismatch in processor_set_create.
76 * [90/07/18 dbg]
77 *
78 * Revision 2.3 90/06/19 22:59:17 rpd
79 * Fixed bug in processor_set_things.
80 * [90/06/14 rpd]
81 *
82 * Revision 2.2 90/06/02 14:55:32 rpd
83 * Created for new host/processor technology.
84 * [90/03/26 23:49:46 rpd]
85 *
86 * Move includes
87 * [89/08/02 dlb]
88 * Eliminate interrupt protection for pset locks.
89 * Add init for quantum_adj_lock.
90 * [89/06/14 dlb]
91 *
92 * Add processor_set_{tasks,threads}. Use common internals.
93 * Maintain all_psets_count for host_processor_sets.
94 * [89/06/09 dlb]
95 *
96 * Add processor_set_policy, sched flavor of processor_set_info.
97 * [89/05/18 dlb]
98 *
99 * Add processor_set_max_priority.
100 * [89/05/12 dlb]
101 *
102 * Add wait argument to processor_assign call.
103 * [89/05/10 dlb]
104 * Move processor reassignment to processor_set_destroy from
105 * pset_deallocate.
106 * [89/03/13 dlb]
107 *
108 * Fix interrupt protection in processor_set_{create,destroy}.
109 * [89/03/09 dlb]
110 * Remove reference count decrements from pset_remove_{task,thread}.
111 * Callers must explicitly call pset_deallocate().
112 * [89/02/17 dlb]
113 * Add load factor/average inits. Make info available to users.
114 * [89/02/09 dlb]
115 *
116 * 24-Sep-1988 David Black (dlb) at Carnegie-Mellon University
117 *
118 * Revision 2.5.2.2 90/02/22 23:20:24 rpd
119 * Changed to use kalloc/kfree instead of ipc_kernel_map.
120 * Fixed calls to convert_task_to_port/convert_thread_to_port.
121 *
122 * Revision 2.5.2.1 90/02/20 22:21:47 rpd
123 * Revised for new IPC.
124 * [90/02/19 23:36:11 rpd]
125 *
126 * Revision 2.5 89/12/22 15:52:54 rpd
127 * Changes to implement pset garbage collection:
128 * 1. Add pset_is_garbage to detect abandoned processor sets.
129 * Add do_pset_scan to look for them.
130 * 2. Pass back the actual ports from processor_set_create, so they
131 * will always have extra references; this way a newly created
132 * processor set never looks like garbage.
133 *
134 * Also optimized processor_set_destroy.
135 * [89/12/15 dlb]
136 * Put all fixed priority support code under MACH_FIXPRI switch.
137 * Add thread_change_psets for use by thread_assign.
138 * [89/11/10 dlb]
139 * Check for null processor set in pset_deallocate.
140 * [89/11/06 dlb]
141 *
142 * Revision 2.4 89/11/20 11:23:45 mja
143 * Put all fixed priority support code under MACH_FIXPRI switch.
144 * Add thread_change_psets for use by thread_assign.
145 * [89/11/10 dlb]
146 * Check for null processor set in pset_deallocate.
147 * [89/11/06 dlb]
148 *
149 * Revision 2.3 89/10/15 02:05:04 rpd
150 * Minor cleanups.
151 *
152 * Revision 2.2 89/10/11 14:20:11 dlb
153 * Add processor_set_{tasks,threads}. Use common internals.
154 * Maintain all_psets_count for host_processor_sets.
155 * Add processor_set_policy, sched flavor of processor_set_info.
156 * Add processor_set_max_priority.
157 * Remove reference count decrements from pset_remove_{task,thread}.
158 * Callers must explicitly call pset_deallocate().
159 * Add load factor/average inits. Make info available to users.
160 *
161 * Created
162 */
163
164 /*
165 * processor.c: processor and processor_set manipulation routines.
166 */
167
168 #include <cpus.h>
169 #include <mach_fixpri.h>
170 #include <mach_host.h>
171
172 #include <mach/boolean.h>
173 #include <mach/policy.h>
174 #include <mach/processor_info.h>
175 #include <mach/vm_param.h>
176 #include <kern/cpu_number.h>
177 #include <kern/lock.h>
178 #include <kern/host.h>
179 #include <kern/processor.h>
180 #include <kern/sched.h>
181 #include <kern/task.h>
182 #include <kern/thread.h>
183 #include <kern/ipc_host.h>
184 #include <ipc/ipc_port.h>
185
186 #if MACH_HOST
187 #include <kern/zalloc.h>
188 zone_t pset_zone;
189 #endif /* MACH_HOST */
190
191
192 /*
193 * Exported variables.
194 */
195 struct processor_set default_pset;
196 struct processor processor_array[NCPUS];
197
198 queue_head_t all_psets;
199 int all_psets_count;
200 decl_simple_lock_data(, all_psets_lock);
201
202 processor_t master_processor;
203 processor_t processor_ptr[NCPUS];
204
205 /*
206 * Forward declarations.
207 */
208 void quantum_set(processor_set_t);
209 void pset_init(processor_set_t);
210 void processor_init(processor_t, int);
211
212 /*
213 * Bootstrap the processor/pset system so the scheduler can run.
214 */
215 void pset_sys_bootstrap(void)
216 {
217 register int i;
218
219 pset_init(&default_pset);
220 default_pset.empty = FALSE;
221 for (i = 0; i < NCPUS; i++) {
222 /*
223 * Initialize processor data structures.
224 * Note that cpu_to_processor(i) is processor_ptr[i].
225 */
226 processor_ptr[i] = &processor_array[i];
227 processor_init(processor_ptr[i], i);
228 }
229 master_processor = cpu_to_processor(master_cpu);
230 queue_init(&all_psets);
231 simple_lock_init(&all_psets_lock);
232 queue_enter(&all_psets, &default_pset, processor_set_t, all_psets);
233 all_psets_count = 1;
234 default_pset.active = TRUE;
235 default_pset.empty = FALSE;
236
237 /*
238 * Note: the default_pset has a max_priority of BASEPRI_USER.
239 * Internal kernel threads override this in kernel_thread.
240 */
241 }
242
243 #if MACH_HOST
244 /*
245 * Rest of pset system initializations.
246 */
247 void pset_sys_init(void)
248 {
249 register int i;
250 register processor_t processor;
251
252 /*
253 * Allocate the zone for processor sets.
254 */
255 pset_zone = zinit(sizeof(struct processor_set), 128*PAGE_SIZE,
256 PAGE_SIZE, FALSE, "processor sets");
257
258 /*
259 * Give each processor a control port.
260 * The master processor already has one.
261 */
262 for (i = 0; i < NCPUS; i++) {
263 processor = cpu_to_processor(i);
264 if (processor != master_processor &&
265 machine_slot[i].is_cpu)
266 {
267 ipc_processor_init(processor);
268 }
269 }
270 }
271 #endif /* MACH_HOST */
272
273 /*
274 * Initialize the given processor_set structure.
275 */
276
277 void pset_init(
278 register processor_set_t pset)
279 {
280 int i;
281
282 simple_lock_init(&pset->runq.lock);
283 pset->runq.low = 0;
284 pset->runq.count = 0;
285 for (i = 0; i < NRQS; i++) {
286 queue_init(&(pset->runq.runq[i]));
287 }
288 queue_init(&pset->idle_queue);
289 pset->idle_count = 0;
290 simple_lock_init(&pset->idle_lock);
291 queue_init(&pset->processors);
292 pset->processor_count = 0;
293 pset->empty = TRUE;
294 queue_init(&pset->tasks);
295 pset->task_count = 0;
296 queue_init(&pset->threads);
297 pset->thread_count = 0;
298 pset->ref_count = 1;
299 simple_lock_init(&pset->ref_lock);
300 queue_init(&pset->all_psets);
301 pset->active = FALSE;
302 simple_lock_init(&pset->lock);
303 pset->pset_self = IP_NULL;
304 pset->pset_name_self = IP_NULL;
305 pset->max_priority = BASEPRI_USER;
306 #if MACH_FIXPRI
307 pset->policies = POLICY_TIMESHARE;
308 #endif /* MACH_FIXPRI */
309 pset->set_quantum = min_quantum;
310 #if NCPUS > 1
311 pset->quantum_adj_index = 0;
312 simple_lock_init(&pset->quantum_adj_lock);
313
314 for (i = 0; i <= NCPUS; i++) {
315 pset->machine_quantum[i] = min_quantum;
316 }
317 #endif /* NCPUS > 1 */
318 pset->mach_factor = 0;
319 pset->load_average = 0;
320 pset->sched_load = SCHED_SCALE; /* i.e. 1 */
321 }
322
323 /*
324 * Initialize the given processor structure for the processor in
325 * the slot specified by slot_num.
326 */
327
328 void processor_init(
329 register processor_t pr,
330 int slot_num)
331 {
332 int i;
333
334 simple_lock_init(&pr->runq.lock);
335 pr->runq.low = 0;
336 pr->runq.count = 0;
337 for (i = 0; i < NRQS; i++) {
338 queue_init(&(pr->runq.runq[i]));
339 }
340 queue_init(&pr->processor_queue);
341 pr->state = PROCESSOR_OFF_LINE;
342 pr->next_thread = THREAD_NULL;
343 pr->idle_thread = THREAD_NULL;
344 pr->quantum = 0;
345 pr->first_quantum = FALSE;
346 pr->last_quantum = 0;
347 pr->processor_set = PROCESSOR_SET_NULL;
348 pr->processor_set_next = PROCESSOR_SET_NULL;
349 queue_init(&pr->processors);
350 simple_lock_init(&pr->lock);
351 pr->processor_self = IP_NULL;
352 pr->slot_num = slot_num;
353 }
354
355 /*
356 * pset_remove_processor() removes a processor from a processor_set.
357 * It can only be called on the current processor. Caller must
358 * hold lock on current processor and processor set.
359 */
360
361 void pset_remove_processor(
362 processor_set_t pset,
363 processor_t processor)
364 {
365 if (pset != processor->processor_set)
366 panic("pset_remove_processor: wrong pset");
367
368 queue_remove(&pset->processors, processor, processor_t, processors);
369 processor->processor_set = PROCESSOR_SET_NULL;
370 pset->processor_count--;
371 quantum_set(pset);
372 }
373
374 /*
375 * pset_add_processor() adds a processor to a processor_set.
376 * It can only be called on the current processor. Caller must
377 * hold lock on curent processor and on pset. No reference counting on
378 * processors. Processor reference to pset is implicit.
379 */
380
381 void pset_add_processor(
382 processor_set_t pset,
383 processor_t processor)
384 {
385 queue_enter(&pset->processors, processor, processor_t, processors);
386 processor->processor_set = pset;
387 pset->processor_count++;
388 quantum_set(pset);
389 }
390
391 /*
392 * pset_remove_task() removes a task from a processor_set.
393 * Caller must hold locks on pset and task. Pset reference count
394 * is not decremented; caller must explicitly pset_deallocate.
395 */
396
397 void pset_remove_task(
398 processor_set_t pset,
399 task_t task)
400 {
401 if (pset != task->processor_set)
402 return;
403
404 queue_remove(&pset->tasks, task, task_t, pset_tasks);
405 task->processor_set = PROCESSOR_SET_NULL;
406 pset->task_count--;
407 }
408
409 /*
410 * pset_add_task() adds a task to a processor_set.
411 * Caller must hold locks on pset and task. Pset references to
412 * tasks are implicit.
413 */
414
415 void pset_add_task(
416 processor_set_t pset,
417 task_t task)
418 {
419 queue_enter(&pset->tasks, task, task_t, pset_tasks);
420 task->processor_set = pset;
421 pset->task_count++;
422 }
423
424 /*
425 * pset_remove_thread() removes a thread from a processor_set.
426 * Caller must hold locks on pset and thread. Pset reference count
427 * is not decremented; caller must explicitly pset_deallocate.
428 */
429
430 void pset_remove_thread(
431 processor_set_t pset,
432 thread_t thread)
433 {
434 queue_remove(&pset->threads, thread, thread_t, pset_threads);
435 thread->processor_set = PROCESSOR_SET_NULL;
436 pset->thread_count--;
437 }
438
439 /*
440 * pset_add_thread() adds a thread to a processor_set.
441 * Caller must hold locks on pset and thread. Pset references to
442 * threads are implicit.
443 */
444
445 void pset_add_thread(
446 processor_set_t pset,
447 thread_t thread)
448 {
449 queue_enter(&pset->threads, thread, thread_t, pset_threads);
450 thread->processor_set = pset;
451 pset->thread_count++;
452 }
453
454 /*
455 * thread_change_psets() changes the pset of a thread. Caller must
456 * hold locks on both psets and thread. The old pset must be
457 * explicitly pset_deallocat()'ed by caller.
458 */
459
460 void thread_change_psets(
461 thread_t thread,
462 processor_set_t old_pset,
463 processor_set_t new_pset)
464 {
465 queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
466 old_pset->thread_count--;
467 queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
468 thread->processor_set = new_pset;
469 new_pset->thread_count++;
470 }
471
472 /*
473 * pset_deallocate:
474 *
475 * Remove one reference to the processor set. Destroy processor_set
476 * if this was the last reference.
477 */
478 void pset_deallocate(
479 processor_set_t pset)
480 {
481 if (pset == PROCESSOR_SET_NULL)
482 return;
483
484 pset_ref_lock(pset);
485 if (--pset->ref_count > 0) {
486 pset_ref_unlock(pset);
487 return;
488 }
489 #if !MACH_HOST
490 panic("pset_deallocate: default_pset destroyed");
491 #endif /* !MACH_HOST */
492
493 #if MACH_HOST
494 /*
495 * Reference count is zero, however the all_psets list
496 * holds an implicit reference and may make new ones.
497 * Its lock also dominates the pset lock. To check for this,
498 * temporarily restore one reference, and then lock the
499 * other structures in the right order.
500 */
501 pset->ref_count = 1;
502 pset_ref_unlock(pset);
503
504 simple_lock(&all_psets_lock);
505 pset_ref_lock(pset);
506 if (--pset->ref_count > 0) {
507 /*
508 * Made an extra reference.
509 */
510 pset_ref_unlock(pset);
511 simple_unlock(&all_psets_lock);
512 return;
513 }
514
515 /*
516 * Ok to destroy pset. Make a few paranoia checks.
517 */
518
519 if ((pset == &default_pset) || (pset->thread_count > 0) ||
520 (pset->task_count > 0) || pset->processor_count > 0) {
521 panic("pset_deallocate: destroy default or active pset");
522 }
523 /*
524 * Remove from all_psets queue.
525 */
526 queue_remove(&all_psets, pset, processor_set_t, all_psets);
527 all_psets_count--;
528
529 pset_ref_unlock(pset);
530 simple_unlock(&all_psets_lock);
531
532 /*
533 * That's it, free data structure.
534 */
535 zfree(pset_zone, (vm_offset_t)pset);
536 #endif /* MACH_HOST */
537 }
538
539 /*
540 * pset_reference:
541 *
542 * Add one reference to the processor set.
543 */
544 void pset_reference(
545 processor_set_t pset)
546 {
547 pset_ref_lock(pset);
548 pset->ref_count++;
549 pset_ref_unlock(pset);
550 }
551
552 kern_return_t
553 processor_info(
554 register processor_t processor,
555 int flavor,
556 host_t *host,
557 processor_info_t info,
558 natural_t *count)
559 {
560 register int slot_num, state;
561 register processor_basic_info_t basic_info;
562
563 if (processor == PROCESSOR_NULL)
564 return KERN_INVALID_ARGUMENT;
565
566 if (flavor != PROCESSOR_BASIC_INFO ||
567 *count < PROCESSOR_BASIC_INFO_COUNT)
568 return KERN_FAILURE;
569
570 basic_info = (processor_basic_info_t) info;
571
572 slot_num = processor->slot_num;
573 basic_info->cpu_type = machine_slot[slot_num].cpu_type;
574 basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
575 state = processor->state;
576 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
577 basic_info->running = FALSE;
578 else
579 basic_info->running = TRUE;
580 basic_info->slot_num = slot_num;
581 if (processor == master_processor)
582 basic_info->is_master = TRUE;
583 else
584 basic_info->is_master = FALSE;
585
586 *count = PROCESSOR_BASIC_INFO_COUNT;
587 *host = &realhost;
588 return KERN_SUCCESS;
589 }
590
591 kern_return_t processor_start(
592 processor_t processor)
593 {
594 if (processor == PROCESSOR_NULL)
595 return KERN_INVALID_ARGUMENT;
596 #if NCPUS > 1
597 return cpu_start(processor->slot_num);
598 #else /* NCPUS > 1 */
599 return KERN_FAILURE;
600 #endif /* NCPUS > 1 */
601 }
602
603 kern_return_t processor_exit(
604 processor_t processor)
605 {
606 if (processor == PROCESSOR_NULL)
607 return KERN_INVALID_ARGUMENT;
608
609 #if NCPUS > 1
610 return processor_shutdown(processor);
611 #else /* NCPUS > 1 */
612 return KERN_FAILURE;
613 #endif /* NCPUS > 1 */
614 }
615
616 kern_return_t
617 processor_control(
618 processor_t processor,
619 processor_info_t info,
620 natural_t count)
621 {
622 if (processor == PROCESSOR_NULL)
623 return KERN_INVALID_ARGUMENT;
624
625 #if NCPUS > 1
626 return cpu_control(processor->slot_num, (int *)info, count);
627 #else /* NCPUS > 1 */
628 return KERN_FAILURE;
629 #endif /* NCPUS > 1 */
630 }
631
632 /*
633 * Precalculate the appropriate system quanta based on load. The
634 * index into machine_quantum is the number of threads on the
635 * processor set queue. It is limited to the number of processors in
636 * the set.
637 */
638
639 void quantum_set(
640 processor_set_t pset)
641 {
642 #if NCPUS > 1
643 register int i,ncpus;
644
645 ncpus = pset->processor_count;
646
647 for ( i=1 ; i <= ncpus ; i++) {
648 pset->machine_quantum[i] =
649 ((min_quantum * ncpus) + (i/2)) / i ;
650 }
651 pset->machine_quantum[0] = 2 * pset->machine_quantum[1];
652
653 i = ((pset->runq.count > pset->processor_count) ?
654 pset->processor_count : pset->runq.count);
655 pset->set_quantum = pset->machine_quantum[i];
656 #else /* NCPUS > 1 */
657 default_pset.set_quantum = min_quantum;
658 #endif /* NCPUS > 1 */
659 }
660
661 #if MACH_HOST
662 /*
663 * processor_set_create:
664 *
665 * Create and return a new processor set.
666 */
667
668 kern_return_t
669 processor_set_create(
670 host_t host,
671 processor_set_t *new_set,
672 processor_set_t *new_name)
673 {
674 processor_set_t pset;
675
676 if (host == HOST_NULL)
677 return KERN_INVALID_ARGUMENT;
678
679 pset = (processor_set_t) zalloc(pset_zone);
680 pset_init(pset);
681 pset_reference(pset); /* for new_set out argument */
682 pset_reference(pset); /* for new_name out argument */
683 ipc_pset_init(pset);
684 pset->active = TRUE;
685
686 simple_lock(&all_psets_lock);
687 queue_enter(&all_psets, pset, processor_set_t, all_psets);
688 all_psets_count++;
689 simple_unlock(&all_psets_lock);
690
691 ipc_pset_enable(pset);
692
693 *new_set = pset;
694 *new_name = pset;
695 return KERN_SUCCESS;
696 }
697
698 /*
699 * processor_set_destroy:
700 *
701 * destroy a processor set. Any tasks, threads or processors
702 * currently assigned to it are reassigned to the default pset.
703 */
704 kern_return_t processor_set_destroy(
705 processor_set_t pset)
706 {
707 register queue_entry_t elem;
708 register queue_head_t *list;
709
710 if (pset == PROCESSOR_SET_NULL || pset == &default_pset)
711 return KERN_INVALID_ARGUMENT;
712
713 /*
714 * Handle multiple termination race. First one through sets
715 * active to FALSE and disables ipc access.
716 */
717 pset_lock(pset);
718 if (!(pset->active)) {
719 pset_unlock(pset);
720 return KERN_FAILURE;
721 }
722
723 pset->active = FALSE;
724 ipc_pset_disable(pset);
725
726
727 /*
728 * Now reassign everything in this set to the default set.
729 */
730
731 if (pset->task_count > 0) {
732 list = &pset->tasks;
733 while (!queue_empty(list)) {
734 elem = queue_first(list);
735 task_reference((task_t) elem);
736 pset_unlock(pset);
737 task_assign((task_t) elem, &default_pset, FALSE);
738 task_deallocate((task_t) elem);
739 pset_lock(pset);
740 }
741 }
742
743 if (pset->thread_count > 0) {
744 list = &pset->threads;
745 while (!queue_empty(list)) {
746 elem = queue_first(list);
747 thread_reference((thread_t) elem);
748 pset_unlock(pset);
749 thread_assign((thread_t) elem, &default_pset);
750 thread_deallocate((thread_t) elem);
751 pset_lock(pset);
752 }
753 }
754
755 if (pset->processor_count > 0) {
756 list = &pset->processors;
757 while(!queue_empty(list)) {
758 elem = queue_first(list);
759 pset_unlock(pset);
760 processor_assign((processor_t) elem, &default_pset, TRUE);
761 pset_lock(pset);
762 }
763 }
764
765 pset_unlock(pset);
766
767 /*
768 * Destroy ipc state.
769 */
770 ipc_pset_terminate(pset);
771
772 /*
773 * Deallocate pset's reference to itself.
774 */
775 pset_deallocate(pset);
776 return KERN_SUCCESS;
777 }
778
779 #else /* MACH_HOST */
780
781 kern_return_t
782 processor_set_create(
783 host_t host,
784 processor_set_t *new_set,
785 processor_set_t *new_name)
786 {
787 #ifdef lint
788 host++; new_set++; new_name++;
789 #endif /* lint */
790 return KERN_FAILURE;
791 }
792
793 kern_return_t processor_set_destroy(
794 processor_set_t pset)
795 {
796 #ifdef lint
797 pset++;
798 #endif /* lint */
799 return KERN_FAILURE;
800 }
801
802 #endif MACH_HOST
803
804 kern_return_t
805 processor_get_assignment(
806 processor_t processor,
807 processor_set_t *pset)
808 {
809 int state;
810
811 state = processor->state;
812 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
813 return KERN_FAILURE;
814
815 *pset = processor->processor_set;
816 pset_reference(*pset);
817 return KERN_SUCCESS;
818 }
819
820 kern_return_t
821 processor_set_info(
822 processor_set_t pset,
823 int flavor,
824 host_t *host,
825 processor_set_info_t info,
826 natural_t *count)
827 {
828 if (pset == PROCESSOR_SET_NULL)
829 return KERN_INVALID_ARGUMENT;
830
831 if (flavor == PROCESSOR_SET_BASIC_INFO) {
832 register processor_set_basic_info_t basic_info;
833
834 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
835 return KERN_FAILURE;
836
837 basic_info = (processor_set_basic_info_t) info;
838
839 pset_lock(pset);
840 basic_info->processor_count = pset->processor_count;
841 basic_info->task_count = pset->task_count;
842 basic_info->thread_count = pset->thread_count;
843 basic_info->mach_factor = pset->mach_factor;
844 basic_info->load_average = pset->load_average;
845 pset_unlock(pset);
846
847 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
848 *host = &realhost;
849 return KERN_SUCCESS;
850 }
851 else if (flavor == PROCESSOR_SET_SCHED_INFO) {
852 register processor_set_sched_info_t sched_info;
853
854 if (*count < PROCESSOR_SET_SCHED_INFO_COUNT)
855 return KERN_FAILURE;
856
857 sched_info = (processor_set_sched_info_t) info;
858
859 pset_lock(pset);
860 #if MACH_FIXPRI
861 sched_info->policies = pset->policies;
862 #else /* MACH_FIXPRI */
863 sched_info->policies = POLICY_TIMESHARE;
864 #endif /* MACH_FIXPRI */
865 sched_info->max_priority = pset->max_priority;
866 pset_unlock(pset);
867
868 *count = PROCESSOR_SET_SCHED_INFO_COUNT;
869 *host = &realhost;
870 return KERN_SUCCESS;
871 }
872
873 *host = HOST_NULL;
874 return KERN_INVALID_ARGUMENT;
875 }
876
877 /*
878 * processor_set_max_priority:
879 *
880 * Specify max priority permitted on processor set. This affects
881 * newly created and assigned threads. Optionally change existing
882 * ones.
883 */
884 kern_return_t
885 processor_set_max_priority(
886 processor_set_t pset,
887 int max_priority,
888 boolean_t change_threads)
889 {
890 if (pset == PROCESSOR_SET_NULL || invalid_pri(max_priority))
891 return KERN_INVALID_ARGUMENT;
892
893 pset_lock(pset);
894 pset->max_priority = max_priority;
895
896 if (change_threads) {
897 register queue_head_t *list;
898 register thread_t thread;
899
900 list = &pset->threads;
901 queue_iterate(list, thread, thread_t, pset_threads) {
902 if (thread->max_priority < max_priority)
903 thread_max_priority(thread, pset, max_priority);
904 }
905 }
906
907 pset_unlock(pset);
908
909 return KERN_SUCCESS;
910 }
911
912 /*
913 * processor_set_policy_enable:
914 *
915 * Allow indicated policy on processor set.
916 */
917
918 kern_return_t
919 processor_set_policy_enable(
920 processor_set_t pset,
921 int policy)
922 {
923 if ((pset == PROCESSOR_SET_NULL) || invalid_policy(policy))
924 return KERN_INVALID_ARGUMENT;
925
926 #if MACH_FIXPRI
927 pset_lock(pset);
928 pset->policies |= policy;
929 pset_unlock(pset);
930
931 return KERN_SUCCESS;
932 #else /* MACH_FIXPRI */
933 if (policy == POLICY_TIMESHARE)
934 return KERN_SUCCESS;
935 else
936 return KERN_FAILURE;
937 #endif /* MACH_FIXPRI */
938 }
939
940 /*
941 * processor_set_policy_disable:
942 *
943 * Forbid indicated policy on processor set. Time sharing cannot
944 * be forbidden.
945 */
946
947 kern_return_t
948 processor_set_policy_disable(
949 processor_set_t pset,
950 int policy,
951 boolean_t change_threads)
952 {
953 if ((pset == PROCESSOR_SET_NULL) || policy == POLICY_TIMESHARE ||
954 invalid_policy(policy))
955 return KERN_INVALID_ARGUMENT;
956
957 #if MACH_FIXPRI
958 pset_lock(pset);
959
960 /*
961 * Check if policy enabled. Disable if so, then handle
962 * change_threads.
963 */
964 if (pset->policies & policy) {
965 pset->policies &= ~policy;
966
967 if (change_threads) {
968 register queue_head_t *list;
969 register thread_t thread;
970
971 list = &pset->threads;
972 queue_iterate(list, thread, thread_t, pset_threads) {
973 if (thread->policy == policy)
974 thread_policy(thread, POLICY_TIMESHARE, 0);
975 }
976 }
977 }
978 pset_unlock(pset);
979 #endif /* MACH_FIXPRI */
980
981 return KERN_SUCCESS;
982 }
983
984 #define THING_TASK 0
985 #define THING_THREAD 1
986
987 /*
988 * processor_set_things:
989 *
990 * Common internals for processor_set_{threads,tasks}
991 */
992 kern_return_t
993 processor_set_things(
994 processor_set_t pset,
995 mach_port_t **thing_list,
996 natural_t *count,
997 int type)
998 {
999 unsigned int actual; /* this many things */
1000 int i;
1001
1002 vm_size_t size, size_needed;
1003 vm_offset_t addr;
1004
1005 if (pset == PROCESSOR_SET_NULL)
1006 return KERN_INVALID_ARGUMENT;
1007
1008 size = 0; addr = 0;
1009
1010 for (;;) {
1011 pset_lock(pset);
1012 if (!pset->active) {
1013 pset_unlock(pset);
1014 return KERN_FAILURE;
1015 }
1016
1017 if (type == THING_TASK)
1018 actual = pset->task_count;
1019 else
1020 actual = pset->thread_count;
1021
1022 /* do we have the memory we need? */
1023
1024 size_needed = actual * sizeof(mach_port_t);
1025 if (size_needed <= size)
1026 break;
1027
1028 /* unlock the pset and allocate more memory */
1029 pset_unlock(pset);
1030
1031 if (size != 0)
1032 kfree(addr, size);
1033
1034 assert(size_needed > 0);
1035 size = size_needed;
1036
1037 addr = kalloc(size);
1038 if (addr == 0)
1039 return KERN_RESOURCE_SHORTAGE;
1040 }
1041
1042 /* OK, have memory and the processor_set is locked & active */
1043
1044 switch (type) {
1045 case THING_TASK: {
1046 task_t *tasks = (task_t *) addr;
1047 task_t task;
1048
1049 for (i = 0, task = (task_t) queue_first(&pset->tasks);
1050 i < actual;
1051 i++, task = (task_t) queue_next(&task->pset_tasks)) {
1052 /* take ref for convert_task_to_port */
1053 task_reference(task);
1054 tasks[i] = task;
1055 }
1056 assert(queue_end(&pset->tasks, (queue_entry_t) task));
1057 break;
1058 }
1059
1060 case THING_THREAD: {
1061 thread_t *threads = (thread_t *) addr;
1062 thread_t thread;
1063
1064 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
1065 i < actual;
1066 i++,
1067 thread = (thread_t) queue_next(&thread->pset_threads)) {
1068 /* take ref for convert_thread_to_port */
1069 thread_reference(thread);
1070 threads[i] = thread;
1071 }
1072 assert(queue_end(&pset->threads, (queue_entry_t) thread));
1073 break;
1074 }
1075 }
1076
1077 /* can unlock processor set now that we have the task/thread refs */
1078 pset_unlock(pset);
1079
1080 if (actual == 0) {
1081 /* no things, so return null pointer and deallocate memory */
1082 *thing_list = 0;
1083 *count = 0;
1084
1085 if (size != 0)
1086 kfree(addr, size);
1087 } else {
1088 /* if we allocated too much, must copy */
1089
1090 if (size_needed < size) {
1091 vm_offset_t newaddr;
1092
1093 newaddr = kalloc(size_needed);
1094 if (newaddr == 0) {
1095 switch (type) {
1096 case THING_TASK: {
1097 task_t *tasks = (task_t *) addr;
1098
1099 for (i = 0; i < actual; i++)
1100 task_deallocate(tasks[i]);
1101 break;
1102 }
1103
1104 case THING_THREAD: {
1105 thread_t *threads = (thread_t *) addr;
1106
1107 for (i = 0; i < actual; i++)
1108 thread_deallocate(threads[i]);
1109 break;
1110 }
1111 }
1112 kfree(addr, size);
1113 return KERN_RESOURCE_SHORTAGE;
1114 }
1115
1116 bcopy((char *) addr, (char *) newaddr, size_needed);
1117 kfree(addr, size);
1118 addr = newaddr;
1119 }
1120
1121 *thing_list = (mach_port_t *) addr;
1122 *count = actual;
1123
1124 /* do the conversion that Mig should handle */
1125
1126 switch (type) {
1127 case THING_TASK: {
1128 task_t *tasks = (task_t *) addr;
1129
1130 for (i = 0; i < actual; i++)
1131 ((mach_port_t *) tasks)[i] =
1132 (mach_port_t)convert_task_to_port(tasks[i]);
1133 break;
1134 }
1135
1136 case THING_THREAD: {
1137 thread_t *threads = (thread_t *) addr;
1138
1139 for (i = 0; i < actual; i++)
1140 ((mach_port_t *) threads)[i] =
1141 (mach_port_t)convert_thread_to_port(threads[i]);
1142 break;
1143 }
1144 }
1145 }
1146
1147 return KERN_SUCCESS;
1148 }
1149
1150
1151 /*
1152 * processor_set_tasks:
1153 *
1154 * List all tasks in the processor set.
1155 */
1156 kern_return_t
1157 processor_set_tasks(
1158 processor_set_t pset,
1159 task_array_t *task_list,
1160 natural_t *count)
1161 {
1162 return processor_set_things(pset, task_list, count, THING_TASK);
1163 }
1164
1165 /*
1166 * processor_set_threads:
1167 *
1168 * List all threads in the processor set.
1169 */
1170 kern_return_t
1171 processor_set_threads(
1172 processor_set_t pset,
1173 thread_array_t *thread_list,
1174 natural_t *count)
1175 {
1176 return processor_set_things(pset, thread_list, count, THING_THREAD);
1177 }
Cache object: a009fd759e6bcf8f228287e18a14993e
|