FreeBSD/Linux Kernel Cross Reference
sys/kern/machine.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: machine.c,v $
29 * Revision 2.19 93/11/17 17:16:54 dbg
30 * Remove thread_freeze and thread_unfreeze (no longer needed).
31 * [93/05/27 dbg]
32 *
33 * Removed include of kern/sched.h. Added ANSI function prototypes.
34 * Added action_thread_init to initialize action queue.
35 * Use no_return type for functions that do not return.
36 * [93/05/21 dbg]
37 *
38 * Revision 2.18 93/05/15 18:54:26 mrt
39 * machparam.h -> machspl.h
40 *
41 * Revision 2.17 93/05/10 21:19:55 rvb
42 * Do not cast to (int) unless you know what that is.
43 * Use (integer_t) when in doubt.
44 * [93/04/09 af]
45 *
46 * Revision 2.16 93/01/14 17:35:17 danner
47 * Events must be of type 'vm_offset_t', not 'int'.
48 * Proper spl typing.
49 * [92/12/01 af]
50 *
51 * Fixed lock ordering problem in processor_doaction by having
52 * processor_assign wait until a previous assignment is completed.
53 * [92/10/29 dbg]
54 *
55 * Revision 2.15 92/08/03 17:38:20 jfriedl
56 * removed silly prototypes
57 * [92/08/02 jfriedl]
58 *
59 * Revision 2.14 92/05/21 17:14:50 jfriedl
60 * Made non-returning fcns volatile under gcc. Added void to fcns
61 * that still needed it.
62 * [92/05/16 jfriedl]
63 *
64 * Revision 2.13 92/03/10 16:26:47 jsb
65 * From durriya@ri.osf.org: added host_get_boot_info.
66 * [92/01/08 16:38:55 jsb]
67 *
68 * Revision 2.12 91/07/31 17:46:12 dbg
69 * Removed interrupt_stack - it's machine-dependent.
70 * [91/07/26 dbg]
71 *
72 * Revision 2.11 91/05/18 14:32:39 rpd
73 * Picked up processor_doaction fix from dlb.
74 * [91/04/08 rpd]
75 *
76 * Revision 2.10 91/05/14 16:44:36 mrt
77 * Correcting copyright
78 *
79 * Revision 2.9 91/05/08 12:47:37 dbg
80 * Add volatile declarations.
81 *
82 * Preserve the control port for a processor when shutting
83 * it down.
84 * [91/04/26 14:42:42 dbg]
85 *
86 * Revision 2.8 91/03/16 14:50:54 rpd
87 * Added action_thread_continue.
88 * [91/01/22 rpd]
89 *
90 * Revision 2.7 91/02/05 17:28:02 mrt
91 * Changed to new Mach copyright
92 * [91/02/01 16:15:17 mrt]
93 *
94 * Revision 2.6 91/01/08 15:16:29 rpd
95 * Added continuation argument to thread_block.
96 * [90/12/08 rpd]
97 *
98 * Revision 2.5 90/08/27 22:02:56 dbg
99 * Correct PMAP_DEACTIVATE calls.
100 * [90/07/18 dbg]
101 *
102 * Revision 2.4 90/06/02 14:55:18 rpd
103 * Updated to new host/processor technology.
104 * [90/03/26 22:12:59 rpd]
105 *
106 * Revision 2.3 90/01/11 11:43:37 dbg
107 * Make host_reboot return SUCCESS if Debugger returns. Remove
108 * lint.
109 * [89/12/06 dbg]
110 *
111 * Revision 2.2 89/09/25 11:00:54 rwd
112 * host_reboot can now enter debugger.
113 * [89/09/20 rwd]
114 *
115 * Revision 2.1 89/08/03 15:49:03 rwd
116 * Created.
117 *
118 * 14-Jan-89 David Golub (dbg) at Carnegie-Mellon University
119 * Changed xxx_port_allocate to port_alloc_internal. Added
120 * host_reboot stub.
121 *
122 * 6-Sep-88 David Golub (dbg) at Carnegie-Mellon University
123 * Replaced old privileged-user check in cpu_control by check for
124 * host_port. Added host_init to allocate the host port.
125 *
126 * 9-Aug-88 David Black (dlb) at Carnegie-Mellon University
127 * Removed next_thread check. Handled by idle_thread now.
128 *
129 * 26-May-88 David Black (dlb) at Carnegie-Mellon University
130 * Add interrupt protection to cpu_doshutdown.
131 *
132 * 20-May-88 David Black (dlb) at Carnegie-Mellon University
133 * Added shutdown thread. This replaces should_exit logic.
134 * Only needed for multiprocessors.
135 *
136 * 24-Mar-88 David Black (dlb) at Carnegie-Mellon University
137 * Maintain cpu state in cpu_up and cpu_down.
138 *
139 * 15-Sep-87 Michael Young (mwyoung) at Carnegie-Mellon University
140 * De-linted.
141 *
142 * 17-Jul-87 David Black (dlb) at Carnegie-Mellon University
143 * Bug fix to cpu_down - update slot structure correctly.
144 *
145 * 28-Feb-87 Avadis Tevanian (avie) at Carnegie-Mellon University
146 * Created.
147 *
148 */
149 /*
150 * File: kern/machine.c
151 * Author: Avadis Tevanian, Jr.
152 * Date: 1987
153 *
154 * Support for machine independent machine abstraction.
155 */
156
157 #include <norma_ether.h>
158 #include <cpus.h>
159 #include <mach_host.h>
160
161 #include <mach/boolean.h>
162 #include <mach/kern_return.h>
163 #include <mach/mach_types.h>
164 #include <mach/machine.h>
165 #include <mach/host_info.h>
166 #include <kern/assert.h>
167 #include <kern/counters.h>
168 #include <kern/ipc_host.h>
169 #include <kern/host.h>
170 #include <kern/lock.h>
171 #include <kern/machine.h>
172 #include <kern/processor.h>
173 #include <kern/queue.h>
174 #include <kern/strings.h>
175 #include <kern/task.h>
176 #include <kern/thread.h>
177 #include <machine/machspl.h> /* for splsched */
178 #include <sys/reboot.h>
179
180
181
182 /*
183 * Exported variables:
184 */
185
186 struct machine_info machine_info;
187 struct machine_slot machine_slot[NCPUS];
188
189 queue_head_t action_queue; /* assign/shutdown queue */
190 decl_simple_lock_data(,action_lock);
191
192 void action_thread_init(void)
193 {
194 queue_init(&action_queue); /* XXX move to machine.c */
195 simple_lock_init(&action_lock);
196 }
197
198 /*
199 * [ obsolete, exported ]
200 * xxx_host_info:
201 *
202 * Return the host_info structure.
203 */
204 kern_return_t xxx_host_info(
205 task_t task,
206 machine_info_t info)
207 {
208 #ifdef lint
209 task++;
210 #endif /* lint */
211 *info = machine_info;
212 return KERN_SUCCESS;
213 }
214
215 /*
216 * [ obsolete, exported ]
217 * xxx_slot_info:
218 *
219 * Return the slot_info structure for the specified slot.
220 */
221 kern_return_t xxx_slot_info(
222 task_t task,
223 int slot,
224 machine_slot_t info)
225 {
226 #ifdef lint
227 task++;
228 #endif /* lint */
229 if (slot < 0 || slot >= NCPUS)
230 return KERN_INVALID_ARGUMENT;
231 *info = machine_slot[slot];
232 return KERN_SUCCESS;
233 }
234
235 /*
236 * [ obsolete, exported, not implemented ]
237 * xxx_cpu_control:
238 *
239 * Support for user control of cpus. The user indicates which cpu
240 * he is interested in, and whether or not that cpu should be running.
241 */
242 kern_return_t xxx_cpu_control(
243 task_t task,
244 int cpu,
245 boolean_t runnable)
246 {
247 #ifdef lint
248 task++; cpu++; runnable++;
249 #endif /* lint */
250 return KERN_FAILURE;
251 }
252
253 /*
254 * cpu_up:
255 *
256 * Flag specified cpu as up and running. Called when a processor comes
257 * online.
258 */
259 void cpu_up(
260 int cpu)
261 {
262 register processor_t processor;
263 register spl_t s;
264
265 processor = cpu_to_processor(cpu);
266 pset_lock(&default_pset);
267 s = splsched();
268 processor_lock(processor);
269 #if NCPUS > 1
270 init_ast_check(processor);
271 #endif /* NCPUS > 1 */
272 machine_slot[cpu].running = TRUE;
273 machine_info.avail_cpus++;
274 pset_add_processor(&default_pset, processor);
275 processor->state = PROCESSOR_RUNNING;
276 processor_unlock(processor);
277 splx(s);
278 pset_unlock(&default_pset);
279 }
280
281 /*
282 * cpu_down:
283 *
284 * Flag specified cpu as down. Called when a processor is about to
285 * go offline.
286 */
287 void cpu_down(
288 int cpu)
289 {
290 register processor_t processor;
291 register spl_t s;
292
293 s = splsched();
294 processor = cpu_to_processor(cpu);
295 processor_lock(processor);
296 machine_slot[cpu].running = FALSE;
297 machine_info.avail_cpus--;
298 /*
299 * processor has already been removed from pset.
300 */
301 processor->processor_set_next = PROCESSOR_SET_NULL;
302 processor->state = PROCESSOR_OFF_LINE;
303 processor_unlock(processor);
304 splx(s);
305 }
306
307 /*
308 * [ exported ]
309 * host_reboot:
310 *
311 * Reboot or halt the system,
312 * or trap into the kernel debugger (for user-level panics).
313 */
314 kern_return_t
315 host_reboot(
316 host_t host,
317 int options)
318 {
319 if (host == HOST_NULL)
320 return KERN_INVALID_HOST;
321
322 if (options & RB_DEBUGGER) {
323 Debugger("Debugger");
324 } else {
325 halt_all_cpus(!(options & RB_HALT));
326 }
327 return KERN_SUCCESS;
328 }
329
330 #if NCPUS > 1
331 /*
332 * processor_request_action - common internals of processor_assign
333 * and processor_shutdown. If new_pset is null, this is
334 * a shutdown, else it's an assign and caller must donate
335 * a reference.
336 */
337 void
338 processor_request_action(
339 processor_t processor,
340 processor_set_t new_pset)
341 {
342 register processor_set_t pset;
343
344 /*
345 * Processor must be in a processor set. Must lock its idle lock to
346 * get at processor state.
347 */
348 pset = processor->processor_set;
349 simple_lock(&pset->idle_lock);
350
351 /*
352 * If the processor is dispatching, let it finish - it will set its
353 * state to running very soon.
354 */
355 while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING)
356 continue;
357
358 /*
359 * Now lock the action queue and do the dirty work.
360 */
361 simple_lock(&action_lock);
362
363 switch (processor->state) {
364 case PROCESSOR_IDLE:
365 /*
366 * Remove from idle queue.
367 */
368 queue_remove(&pset->idle_queue, processor, processor_t,
369 processor_queue);
370 pset->idle_count--;
371
372 /* fall through ... */
373 case PROCESSOR_RUNNING:
374 /*
375 * Put it on the action queue.
376 */
377 queue_enter(&action_queue, processor, processor_t,
378 processor_queue);
379
380 /* fall through ... */
381 case PROCESSOR_ASSIGN:
382 /*
383 * And ask the action_thread to do the work.
384 */
385
386 if (new_pset == PROCESSOR_SET_NULL) {
387 processor->state = PROCESSOR_SHUTDOWN;
388 }
389 else {
390 assert(processor->state != PROCESSOR_ASSIGN);
391 processor->state = PROCESSOR_ASSIGN;
392 processor->processor_set_next = new_pset;
393 }
394 break;
395
396 default:
397 panic("processor_request_action: bad state: %d",
398 processor->state);
399 }
400 simple_unlock(&action_lock);
401 simple_unlock(&pset->idle_lock);
402
403 thread_wakeup((event_t)&action_queue);
404 }
405
406 #if MACH_HOST
407 /*
408 * processor_assign() changes the processor set that a processor is
409 * assigned to. Any previous assignment in progress is overridden.
410 * Synchronizes with assignment completion if wait is TRUE.
411 */
412 kern_return_t
413 processor_assign(
414 processor_t processor,
415 processor_set_t new_pset,
416 boolean_t wait)
417 {
418 spl_t s;
419
420 /*
421 * Check for null arguments.
422 * XXX Can't assign master processor.
423 */
424 if (processor == PROCESSOR_NULL || new_pset == PROCESSOR_SET_NULL ||
425 processor == master_processor) {
426 return KERN_INVALID_ARGUMENT;
427 }
428
429 /*
430 * Get pset reference to donate to processor_request_action.
431 */
432 pset_reference(new_pset);
433
434 /*
435 * Check processor status.
436 * If shutdown or being shutdown, can`t reassign.
437 * If being assigned, wait for assignment to finish.
438 */
439 s = splsched();
440 processor_lock(processor);
441
442 while (processor->state == PROCESSOR_ASSIGN) {
443 assert_wait((event_t) processor, TRUE);
444 processor_unlock(processor);
445 thread_block(CONTINUE_NULL);
446 processor_lock(processor);
447 }
448
449 if (processor->state == PROCESSOR_OFF_LINE ||
450 processor->state == PROCESSOR_SHUTDOWN) {
451 /*
452 * Already shutdown or being shutdown -- Can't reassign.
453 */
454 processor_unlock(processor);
455 splx(s);
456 pset_deallocate(new_pset);
457 return KERN_FAILURE;
458 }
459
460 /*
461 * Avoid work if processor is already in this processor set.
462 */
463 if (processor->processor_set == new_pset) {
464 processor_unlock(processor);
465 splx(s);
466 /* clean up dangling ref */
467 pset_deallocate(new_pset);
468 return KERN_SUCCESS;
469 }
470
471 /*
472 * OK to start processor assignment.
473 */
474 processor_request_action(processor, new_pset);
475
476 /*
477 * Synchronization with completion.
478 */
479 if (wait) {
480 while (processor->state == PROCESSOR_ASSIGN ||
481 processor->state == PROCESSOR_SHUTDOWN) {
482 assert_wait((event_t)processor, TRUE);
483 processor_unlock(processor);
484 splx(s);
485 thread_block(CONTINUE_NULL);
486 s = splsched();
487 processor_lock(processor);
488 }
489 }
490 processor_unlock(processor);
491 splx(s);
492
493 return KERN_SUCCESS;
494 }
495
496 #else /* MACH_HOST */
497
498 kern_return_t
499 processor_assign(
500 processor_t processor,
501 processor_set_t new_pset,
502 boolean_t wait)
503 {
504 #ifdef lint
505 processor++; new_pset++; wait++;
506 #endif
507 return KERN_FAILURE;
508 }
509
510 #endif /* MACH_HOST */
511
512 /*
513 * processor_shutdown() queues a processor up for shutdown.
514 * Any assignment in progress is overriden. It does not synchronize
515 * with the shutdown (can be called from interrupt level).
516 */
517 kern_return_t
518 processor_shutdown(
519 processor_t processor)
520 {
521 spl_t s;
522
523 if (processor == PROCESSOR_NULL)
524 return KERN_INVALID_ARGUMENT;
525
526 s = splsched();
527 processor_lock(processor);
528 if (processor->state == PROCESSOR_OFF_LINE ||
529 processor->state == PROCESSOR_SHUTDOWN) {
530 /*
531 * Already shutdown or being shutdown -- nothing to do.
532 */
533 processor_unlock(processor);
534 splx(s);
535 return KERN_SUCCESS;
536 }
537
538 processor_request_action(processor, PROCESSOR_SET_NULL);
539 processor_unlock(processor);
540 splx(s);
541
542 return KERN_SUCCESS;
543 }
544
545 /*
546 * action_thread() shuts down processors or changes their assignment.
547 */
548 void processor_doaction(processor_t); /* forward */
549
550 no_return action_thread_continue(void)
551 {
552 register processor_t processor;
553 register spl_t s;
554
555 while (TRUE) {
556 s = splsched();
557 simple_lock(&action_lock);
558 while ( !queue_empty(&action_queue)) {
559 queue_remove_first(&action_queue, processor,
560 processor_t, processor_queue);
561 simple_unlock(&action_lock);
562 splx(s);
563
564 processor_doaction(processor);
565
566 s = splsched();
567 simple_lock(&action_lock);
568 }
569
570 assert_wait((event_t) &action_queue, FALSE);
571 simple_unlock(&action_lock);
572 splx(s);
573 counter(c_action_thread_block++);
574 thread_block(action_thread_continue);
575 }
576 }
577
578 no_return action_thread(void)
579 {
580 action_thread_continue();
581 /*NOTREACHED*/
582 }
583
584 /*
585 * processor_doaction actually does the shutdown. The trick here
586 * is to schedule ourselves onto a cpu and then save our
587 * context back into the runqs before taking out the cpu.
588 */
589 no_return processor_doshutdown(processor_t); /* forward */
590
591 void processor_doaction(
592 register processor_t processor)
593 {
594 thread_t this_thread;
595 spl_t s;
596 register processor_set_t pset;
597 #if MACH_HOST
598 register processor_set_t new_pset;
599 register thread_t thread;
600 register thread_t prev_thread = THREAD_NULL;
601 boolean_t have_pset_ref = FALSE;
602 #endif /* MACH_HOST */
603
604 /*
605 * Get onto the processor to shutdown
606 */
607 this_thread = current_thread();
608 thread_bind(this_thread, processor);
609 thread_block(CONTINUE_NULL);
610
611 pset = processor->processor_set;
612 #if MACH_HOST
613 /*
614 * If this is the last processor in the processor_set,
615 * stop all the threads first.
616 */
617 pset_lock(pset);
618 if (pset->processor_count == 1) {
619 /*
620 * First suspend all of them.
621 */
622 queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
623 thread_hold(thread);
624 }
625 pset->empty = TRUE;
626 /*
627 * Now actually stop them. Need a pset reference.
628 */
629 pset->ref_count++;
630 have_pset_ref = TRUE;
631
632 Restart_thread:
633 prev_thread = THREAD_NULL;
634 queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
635 thread_reference(thread);
636 pset_unlock(pset);
637 if (prev_thread != THREAD_NULL)
638 thread_deallocate(prev_thread);
639
640 /*
641 * Only wait for threads still in the pset.
642 */
643 if (thread->processor_set != pset) {
644 /*
645 * It got away - start over.
646 */
647 thread_deallocate(thread);
648 pset_lock(pset);
649 goto Restart_thread;
650 }
651
652 (void) thread_dowait(thread, TRUE);
653 prev_thread = thread;
654 pset_lock(pset);
655 }
656 }
657 pset_unlock(pset);
658
659 /*
660 * At this point, it is ok to remove the processor from the pset.
661 * We can use processor->processor_set_next without locking the
662 * processor, since it cannot change while processor->state is
663 * PROCESSOR_ASSIGN or PROCESSOR_SHUTDOWN.
664 */
665
666 new_pset = processor->processor_set_next;
667
668 Restart_pset:
669 if (new_pset) {
670 /*
671 * Reassigning processor.
672 */
673
674 if ((vm_offset_t) pset < (vm_offset_t) new_pset) {
675 pset_lock(pset);
676 pset_lock(new_pset);
677 }
678 else {
679 pset_lock(new_pset);
680 pset_lock(pset);
681 }
682 if (!(new_pset->active)) {
683 pset_unlock(new_pset);
684 pset_unlock(pset);
685 pset_deallocate(new_pset);
686 new_pset = &default_pset;
687 pset_reference(new_pset);
688 goto Restart_pset;
689 }
690
691 /*
692 * Handle remove last / assign first race.
693 * Only happens if there is more than one action thread.
694 */
695 while (new_pset->empty && new_pset->processor_count > 0) {
696 pset_unlock(new_pset);
697 pset_unlock(pset);
698 while (*(volatile boolean_t *)&new_pset->empty &&
699 *(volatile int *)&new_pset->processor_count > 0)
700 /* spin */;
701 goto Restart_pset;
702 }
703
704 /*
705 * Lock the processor. new_pset should not have changed.
706 */
707 s = splsched();
708 processor_lock(processor);
709 assert(processor->processor_set_next == new_pset);
710
711 /*
712 * Shutdown may have been requested while this assignment
713 * was in progress.
714 */
715 if (processor->state == PROCESSOR_SHUTDOWN) {
716 processor->processor_set_next = PROCESSOR_SET_NULL;
717 pset_unlock(new_pset);
718 goto shutdown; /* releases pset reference */
719 }
720
721 /*
722 * Do assignment, then wakeup anyone waiting for it.
723 */
724 pset_remove_processor(pset, processor);
725 pset_unlock(pset);
726
727 pset_add_processor(new_pset, processor);
728 if (new_pset->empty) {
729 /*
730 * Set all the threads loose.
731 *
732 * NOTE: this appears to violate the locking
733 * order, since the processor lock should
734 * be taken AFTER a thread lock. However,
735 * thread_setrun (called by thread_release)
736 * only takes the processor lock if the
737 * processor is idle. The processor is
738 * not idle here.
739 */
740 queue_iterate(&new_pset->threads, thread, thread_t,
741 pset_threads) {
742 thread_release(thread);
743 }
744 new_pset->empty = FALSE;
745 }
746 processor->processor_set_next = PROCESSOR_SET_NULL;
747 processor->state = PROCESSOR_RUNNING;
748 thread_wakeup((event_t)processor);
749 processor_unlock(processor);
750 splx(s);
751 pset_unlock(new_pset);
752
753 /*
754 * Clean up dangling references, and release our binding.
755 */
756 pset_deallocate(new_pset);
757 if (have_pset_ref)
758 pset_deallocate(pset);
759 if (prev_thread != THREAD_NULL)
760 thread_deallocate(prev_thread);
761
762 thread_bind(this_thread, PROCESSOR_NULL);
763 thread_block(CONTINUE_NULL);
764 return;
765 }
766
767 #endif /* MACH_HOST */
768
769 /*
770 * Do shutdown, make sure we live when processor dies.
771 */
772 if (processor->state != PROCESSOR_SHUTDOWN) {
773 panic("action_thread -- bad processor state: %d",
774 processor->state);
775 }
776
777 s = splsched();
778 processor_lock(processor);
779
780 shutdown:
781 pset_remove_processor(pset, processor);
782 processor_unlock(processor);
783 pset_unlock(pset);
784 splx(s);
785
786 /*
787 * Clean up dangling references, and release our binding.
788 */
789 #if MACH_HOST
790 if (new_pset != PROCESSOR_SET_NULL)
791 pset_deallocate(new_pset);
792 if (have_pset_ref)
793 pset_deallocate(pset);
794 if (prev_thread != THREAD_NULL)
795 thread_deallocate(prev_thread);
796 #endif /* MACH_HOST */
797
798 thread_bind(this_thread, PROCESSOR_NULL);
799 switch_to_shutdown_context(this_thread,
800 processor_doshutdown,
801 processor);
802
803 }
804
805 /*
806 * Actually do the processor shutdown. This is called at splsched,
807 * running on the processor's shutdown stack.
808 */
809 no_return processor_doshutdown(
810 register processor_t processor)
811 {
812 register int cpu = processor->slot_num;
813
814 timer_switch(&kernel_timer[cpu]);
815
816 /*
817 * Ok, now exit this cpu.
818 */
819 PMAP_DEACTIVATE_KERNEL(cpu);
820 active_threads[cpu] = THREAD_NULL;
821 cpu_down(cpu);
822 thread_wakeup((event_t)processor);
823 halt_cpu();
824 /*
825 * The action thread returns to life after the call to
826 * switch_to_shutdown_context above, on some other cpu.
827 */
828
829 /*NOTREACHED*/
830 }
831 #else /* NCPUS > 1 */
832
833 kern_return_t
834 processor_assign(
835 processor_t processor,
836 processor_set_t new_pset,
837 boolean_t wait)
838 {
839 #ifdef lint
840 processor++; new_pset++; wait++;
841 #endif lint
842 return KERN_FAILURE;
843 }
844
845 #endif /* NCPUS > 1 */
846
847 /*
848 * [ exported ]
849 *
850 * Returns the boot environment string provided
851 * by the bootstrap loader, if there is one.
852 */
853 kern_return_t
854 host_get_boot_info(
855 host_t priv_host,
856 kernel_boot_info_t boot_info)
857 {
858 char *src = "";
859
860 if (priv_host == HOST_NULL) {
861 return KERN_INVALID_HOST;
862 }
863
864 #if NORMA_ETHER
865 {
866 extern char *norma_ether_boot_info(void);
867 src = norma_ether_boot_info();
868 }
869 #endif /* NORMA_ETHER */
870 #if defined(iPSC386) || defined(iPSC860)
871 {
872 extern char *ipsc_boot_environ(void);
873 src = ipsc_boot_environ();
874 }
875 #endif /* defined(iPSC386) || defined(iPSC860) */
876
877 (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
878 return KERN_SUCCESS;
879 }
Cache object: 111f381969ee4319c96df07ba328869a
|