The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/machine.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* 
    2  * Mach Operating System
    3  * Copyright (c) 1993-1987 Carnegie Mellon University
    4  * All Rights Reserved.
    5  * 
    6  * Permission to use, copy, modify and distribute this software and its
    7  * documentation is hereby granted, provided that both the copyright
    8  * notice and this permission notice appear in all copies of the
    9  * software, derivative works or modified versions, and any portions
   10  * thereof, and that both notices appear in supporting documentation.
   11  * 
   12  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   13  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
   14  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   15  * 
   16  * Carnegie Mellon requests users of this software to return to
   17  * 
   18  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   19  *  School of Computer Science
   20  *  Carnegie Mellon University
   21  *  Pittsburgh PA 15213-3890
   22  * 
   23  * any improvements or extensions that they make and grant Carnegie Mellon
   24  * the rights to redistribute these changes.
   25  */
   26 /*
   27  * HISTORY
   28  * $Log:        machine.c,v $
   29  * Revision 2.18  93/05/15  18:54:26  mrt
   30  *      machparam.h -> machspl.h
   31  * 
   32  * Revision 2.17  93/05/10  21:19:55  rvb
   33  *      Do not cast to (int) unless you know what that is.
   34  *      Use (integer_t) when in doubt.
   35  *      [93/04/09            af]
   36  * 
   37  * Revision 2.16  93/01/14  17:35:17  danner
   38  *      Fixed lock ordering problem in processor_doaction by having
   39  *      processor_assign wait until a previous assignment is completed.
   40  *      [92/10/29            dbg]
   41  *      Events must be of type 'vm_offset_t', not 'int'.
   42  *      Proper spl typing.
   43  *      [92/12/01            af]
   44  * 
   45  *      Fixed lock ordering problem in processor_doaction by having
   46  *      processor_assign wait until a previous assignment is completed.
   47  *      [92/10/29            dbg]
   48  * 
   49  * Revision 2.15  92/08/03  17:38:20  jfriedl
   50  *      removed silly prototypes
   51  *      [92/08/02            jfriedl]
   52  * 
   53  * Revision 2.14  92/05/21  17:14:50  jfriedl
   54  *      Made non-returning fcns volatile under gcc. Added void to fcns
   55  *      that still needed it.
   56  *      [92/05/16            jfriedl]
   57  * 
   58  * Revision 2.13  92/03/10  16:26:47  jsb
   59  *      From durriya@ri.osf.org: added host_get_boot_info.
   60  *      [92/01/08  16:38:55  jsb]
   61  * 
   62  * Revision 2.12  91/07/31  17:46:12  dbg
   63  *      Removed interrupt_stack - it's machine-dependent.
   64  *      [91/07/26            dbg]
   65  * 
   66  * Revision 2.11  91/05/18  14:32:39  rpd
   67  *      Picked up processor_doaction fix from dlb.
   68  *      [91/04/08            rpd]
   69  * 
   70  * Revision 2.10  91/05/14  16:44:36  mrt
   71  *      Correcting copyright
   72  * 
   73  * Revision 2.9  91/05/08  12:47:37  dbg
   74  *      Add volatile declarations.
   75  * 
   76  *      Preserve the control port for a processor when shutting
   77  *      it down.
   78  *      [91/04/26  14:42:42  dbg]
   79  * 
   80  * Revision 2.8  91/03/16  14:50:54  rpd
   81  *      Added action_thread_continue.
   82  *      [91/01/22            rpd]
   83  * 
   84  * Revision 2.7  91/02/05  17:28:02  mrt
   85  *      Changed to new Mach copyright
   86  *      [91/02/01  16:15:17  mrt]
   87  * 
   88  * Revision 2.6  91/01/08  15:16:29  rpd
   89  *      Added continuation argument to thread_block.
   90  *      [90/12/08            rpd]
   91  * 
   92  * Revision 2.5  90/08/27  22:02:56  dbg
   93  *      Correct PMAP_DEACTIVATE calls.
   94  *      [90/07/18            dbg]
   95  * 
   96  * Revision 2.4  90/06/02  14:55:18  rpd
   97  *      Updated to new host/processor technology.
   98  *      [90/03/26  22:12:59  rpd]
   99  * 
  100  * Revision 2.3  90/01/11  11:43:37  dbg
  101  *      Make host_reboot return SUCCESS if Debugger returns.  Remove
  102  *      lint.
  103  *      [89/12/06            dbg]
  104  * 
  105  * Revision 2.2  89/09/25  11:00:54  rwd
  106  *      host_reboot can now enter debugger.
  107  *      [89/09/20            rwd]
  108  * 
  109  * Revision 2.1  89/08/03  15:49:03  rwd
  110  * Created.
  111  * 
  112  * 14-Jan-89  David Golub (dbg) at Carnegie-Mellon University
  113  *      Changed xxx_port_allocate to port_alloc_internal.  Added
  114  *      host_reboot stub.
  115  *
  116  *  6-Sep-88  David Golub (dbg) at Carnegie-Mellon University
  117  *      Replaced old privileged-user check in cpu_control by check for
  118  *      host_port.  Added host_init to allocate the host port.
  119  *
  120  *  9-Aug-88  David Black (dlb) at Carnegie-Mellon University
  121  *      Removed next_thread check.  Handled by idle_thread now.
  122  *
  123  * 26-May-88  David Black (dlb) at Carnegie-Mellon University
  124  *      Add interrupt protection to cpu_doshutdown.
  125  *
  126  * 20-May-88  David Black (dlb) at Carnegie-Mellon University
  127  *      Added shutdown thread.  This replaces should_exit logic.
  128  *      Only needed for multiprocessors.
  129  *
  130  * 24-Mar-88  David Black (dlb) at Carnegie-Mellon University
  131  *      Maintain cpu state in cpu_up and cpu_down.
  132  *
  133  * 15-Sep-87  Michael Young (mwyoung) at Carnegie-Mellon University
  134  *      De-linted.
  135  *
  136  * 17-Jul-87  David Black (dlb) at Carnegie-Mellon University
  137  *      Bug fix to cpu_down - update slot structure correctly.
  138  *
  139  * 28-Feb-87  Avadis Tevanian (avie) at Carnegie-Mellon University
  140  *      Created.
  141  *
  142  */
  143 /*
  144  *      File:   kern/machine.c
  145  *      Author: Avadis Tevanian, Jr.
  146  *      Date:   1987
  147  *
  148  *      Support for machine independent machine abstraction.
  149  */
  150 
  151 #include <norma_ether.h>
  152 #include <cpus.h>
  153 #include <mach_host.h>
  154 
  155 #include <mach/boolean.h>
  156 #include <mach/kern_return.h>
  157 #include <mach/mach_types.h>
  158 #include <mach/machine.h>
  159 #include <mach/host_info.h>
  160 #include <kern/counters.h>
  161 #include <kern/ipc_host.h>
  162 #include <kern/host.h>
  163 #include <kern/lock.h>
  164 #include <kern/processor.h>
  165 #include <kern/queue.h>
  166 #include <kern/sched.h>
  167 #include <kern/task.h>
  168 #include <kern/thread.h>
  169 #include <machine/machspl.h>    /* for splsched */
  170 #include <sys/reboot.h>
  171 
  172 
  173 
  174 /*
  175  *      Exported variables:
  176  */
  177 
  178 struct machine_info     machine_info;
  179 struct machine_slot     machine_slot[NCPUS];
  180 
  181 queue_head_t    action_queue;   /* assign/shutdown queue */
  182 decl_simple_lock_data(,action_lock);
  183 
  184 /*
  185  *      xxx_host_info:
  186  *
  187  *      Return the host_info structure.  This routine is exported to the
  188  *      user level.
  189  */
  190 kern_return_t xxx_host_info(task, info)
  191         task_t          task;
  192         machine_info_t  info;
  193 {
  194 #ifdef  lint
  195         task++;
  196 #endif  /* lint */
  197         *info = machine_info;
  198         return(KERN_SUCCESS);
  199 }
  200 
  201 /*
  202  *      xxx_slot_info:
  203  *
  204  *      Return the slot_info structure for the specified slot.  This routine
  205  *      is exported to the user level.
  206  */
  207 kern_return_t xxx_slot_info(task, slot, info)
  208         task_t          task;
  209         int             slot;
  210         machine_slot_t  info;
  211 {
  212 #ifdef  lint
  213         task++;
  214 #endif  /* lint */
  215         if ((slot < 0) || (slot >= NCPUS))
  216                 return(KERN_INVALID_ARGUMENT);
  217         *info = machine_slot[slot];
  218         return(KERN_SUCCESS);
  219 }
  220 
  221 /*
  222  *      xxx_cpu_control:
  223  *
  224  *      Support for user control of cpus.  The user indicates which cpu
  225  *      he is interested in, and whether or not that cpu should be running.
  226  */
  227 kern_return_t xxx_cpu_control(task, cpu, runnable)
  228         task_t          task;
  229         int             cpu;
  230         boolean_t       runnable;
  231 {
  232 #ifdef  lint
  233         task++; cpu++; runnable++;
  234 #endif  /* lint */
  235         return(KERN_FAILURE);
  236 }
  237 
  238 /*
  239  *      cpu_up:
  240  *
  241  *      Flag specified cpu as up and running.  Called when a processor comes
  242  *      online.
  243  */
  244 void cpu_up(cpu)
  245         int     cpu;
  246 {
  247         register struct machine_slot    *ms;
  248         register processor_t    processor;
  249         register spl_t s;
  250 
  251         processor = cpu_to_processor(cpu);
  252         pset_lock(&default_pset);
  253         s = splsched();
  254         processor_lock(processor);
  255 #if     NCPUS > 1
  256         init_ast_check(processor);
  257 #endif  /* NCPUS > 1 */
  258         ms = &machine_slot[cpu];
  259         ms->running = TRUE;
  260         machine_info.avail_cpus++;
  261         pset_add_processor(&default_pset, processor);
  262         processor->state = PROCESSOR_RUNNING;
  263         processor_unlock(processor);
  264         splx(s);
  265         pset_unlock(&default_pset);
  266 }
  267 
  268 /*
  269  *      cpu_down:
  270  *
  271  *      Flag specified cpu as down.  Called when a processor is about to
  272  *      go offline.
  273  */
  274 void cpu_down(cpu)
  275         int     cpu;
  276 {
  277         register struct machine_slot    *ms;
  278         register processor_t    processor;
  279         register spl_t  s;
  280 
  281         s = splsched();
  282         processor = cpu_to_processor(cpu);
  283         processor_lock(processor);
  284         ms = &machine_slot[cpu];
  285         ms->running = FALSE;
  286         machine_info.avail_cpus--;
  287         /*
  288          *      processor has already been removed from pset.
  289          */
  290         processor->processor_set_next = PROCESSOR_SET_NULL;
  291         processor->state = PROCESSOR_OFF_LINE;
  292         processor_unlock(processor);
  293         splx(s);
  294 }
  295 
  296 kern_return_t
  297 host_reboot(host, options)
  298         host_t  host;
  299         int     options;
  300 {
  301         if (host == HOST_NULL)
  302                 return (KERN_INVALID_HOST);
  303 
  304         if (options & RB_DEBUGGER) {
  305                 extern void Debugger();
  306                 Debugger("Debugger");
  307         } else {
  308                 halt_all_cpus(!(options & RB_HALT));
  309         }
  310         return (KERN_SUCCESS);
  311 }
  312 
  313 #if     NCPUS > 1
  314 /*
  315  *      processor_request_action - common internals of processor_assign
  316  *              and processor_shutdown.  If new_pset is null, this is
  317  *              a shutdown, else it's an assign and caller must donate
  318  *              a reference.
  319  */
  320 void
  321 processor_request_action(processor, new_pset)
  322 processor_t     processor;
  323 processor_set_t new_pset;
  324 {
  325     register processor_set_t pset;
  326 
  327     /*
  328      *  Processor must be in a processor set.  Must lock its idle lock to
  329      *  get at processor state.
  330      */
  331     pset = processor->processor_set;
  332     simple_lock(&pset->idle_lock);
  333 
  334     /*
  335      *  If the processor is dispatching, let it finish - it will set its
  336      *  state to running very soon.
  337      */
  338     while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING)
  339         continue;
  340 
  341     /*
  342      *  Now lock the action queue and do the dirty work.
  343      */
  344     simple_lock(&action_lock);
  345 
  346     switch (processor->state) {
  347         case PROCESSOR_IDLE:
  348             /*
  349              *  Remove from idle queue.
  350              */
  351             queue_remove(&pset->idle_queue, processor,  processor_t,
  352                 processor_queue);
  353             pset->idle_count--;
  354 
  355             /* fall through ... */
  356         case PROCESSOR_RUNNING:
  357             /*
  358              *  Put it on the action queue.
  359              */
  360             queue_enter(&action_queue, processor, processor_t,
  361                 processor_queue);
  362 
  363             /* fall through ... */
  364         case PROCESSOR_ASSIGN:
  365             /*
  366              * And ask the action_thread to do the work.
  367              */
  368 
  369             if (new_pset == PROCESSOR_SET_NULL) {
  370                 processor->state = PROCESSOR_SHUTDOWN;
  371             }
  372             else {
  373                 assert(processor->state != PROCESSOR_ASSIGN);
  374                 processor->state = PROCESSOR_ASSIGN;
  375                 processor->processor_set_next = new_pset;
  376             }
  377             break;
  378 
  379         default:
  380             printf("state: %d\n", processor->state);
  381             panic("processor_request_action: bad state");
  382     }
  383     simple_unlock(&action_lock);
  384     simple_unlock(&pset->idle_lock);
  385 
  386     thread_wakeup((event_t)&action_queue);
  387 }
  388 
  389 #if     MACH_HOST
  390 /*
  391  *      processor_assign() changes the processor set that a processor is
  392  *      assigned to.  Any previous assignment in progress is overridden.
  393  *      Synchronizes with assignment completion if wait is TRUE.
  394  */
  395 kern_return_t
  396 processor_assign(processor, new_pset, wait)
  397 processor_t     processor;
  398 processor_set_t new_pset;
  399 boolean_t       wait;
  400 {
  401     spl_t               s;
  402 
  403     /*
  404      *  Check for null arguments.
  405      *  XXX Can't assign master processor.
  406      */
  407     if (processor == PROCESSOR_NULL || new_pset == PROCESSOR_SET_NULL ||
  408         processor == master_processor) {
  409             return(KERN_INVALID_ARGUMENT);
  410     }
  411 
  412     /*
  413      *  Get pset reference to donate to processor_request_action.
  414      */
  415     pset_reference(new_pset);
  416 
  417     /*
  418      * Check processor status.
  419      * If shutdown or being shutdown, can`t reassign.
  420      * If being assigned, wait for assignment to finish.
  421      */
  422 Retry:
  423     s = splsched();
  424     processor_lock(processor);
  425     if(processor->state == PROCESSOR_OFF_LINE ||
  426         processor->state == PROCESSOR_SHUTDOWN) {
  427             /*
  428              *  Already shutdown or being shutdown -- Can't reassign.
  429              */
  430             processor_unlock(processor);
  431             (void) splx(s);
  432             pset_deallocate(new_pset);
  433             return(KERN_FAILURE);
  434     }
  435 
  436     if (processor->state == PROCESSOR_ASSIGN) {
  437         assert_wait((event_t) processor, TRUE);
  438         processor_unlock(processor);
  439         splx(s);
  440         thread_block((void(*)()) 0);
  441         goto Retry;
  442     }
  443          
  444     /*
  445      *  Avoid work if processor is already in this processor set.
  446      */
  447     if (processor->processor_set == new_pset)  {
  448         processor_unlock(processor);
  449         (void) splx(s);
  450         /* clean up dangling ref */
  451         pset_deallocate(new_pset);
  452         return(KERN_SUCCESS);
  453     }
  454 
  455     /*
  456      * OK to start processor assignment.
  457      */
  458     processor_request_action(processor, new_pset);
  459 
  460     /*
  461      *  Synchronization with completion.
  462      */
  463     if (wait) {
  464         while (processor->state == PROCESSOR_ASSIGN ||
  465             processor->state == PROCESSOR_SHUTDOWN) {
  466                 assert_wait((event_t)processor, TRUE);
  467                 processor_unlock(processor);
  468                 splx(s);
  469                 thread_block((void (*)()) 0);
  470                 s = splsched();
  471                 processor_lock(processor);
  472         }
  473     }
  474     processor_unlock(processor);
  475     splx(s);
  476     
  477     return(KERN_SUCCESS);
  478 }
  479 
  480 #else   /* MACH_HOST */
  481 
  482 kern_return_t
  483 processor_assign(processor, new_pset, wait)
  484 processor_t     processor;
  485 processor_set_t new_pset;
  486 boolean_t       wait;
  487 {
  488 #ifdef  lint
  489         processor++; new_pset++; wait++;
  490 #endif
  491         return KERN_FAILURE;
  492 }
  493 
  494 #endif  /* MACH_HOST */
  495 
  496 /*
  497  *      processor_shutdown() queues a processor up for shutdown.
  498  *      Any assignment in progress is overriden.  It does not synchronize
  499  *      with the shutdown (can be called from interrupt level).
  500  */
  501 kern_return_t
  502 processor_shutdown(processor)
  503 processor_t     processor;
  504 {
  505     spl_t               s;
  506 
  507     if (processor == PROCESSOR_NULL)
  508         return KERN_INVALID_ARGUMENT;
  509 
  510     s = splsched();
  511     processor_lock(processor);
  512     if(processor->state == PROCESSOR_OFF_LINE ||
  513         processor->state == PROCESSOR_SHUTDOWN) {
  514             /*
  515              *  Already shutdown or being shutdown -- nothing to do.
  516              */
  517             processor_unlock(processor);
  518             splx(s);
  519             return(KERN_SUCCESS);
  520     }
  521 
  522     processor_request_action(processor, PROCESSOR_SET_NULL);
  523     processor_unlock(processor);
  524     splx(s);
  525 
  526     return(KERN_SUCCESS);
  527 }
  528 
  529 /*
  530  *      action_thread() shuts down processors or changes their assignment.
  531  */
  532 void    processor_doaction();   /* forward */
  533 
  534 void action_thread_continue()
  535 {
  536         register processor_t    processor;
  537         register spl_t          s;
  538 
  539         while (TRUE) {
  540                 s = splsched();
  541                 simple_lock(&action_lock);
  542                 while ( !queue_empty(&action_queue)) {
  543                         processor = (processor_t) queue_first(&action_queue);
  544                         queue_remove(&action_queue, processor, processor_t,
  545                                      processor_queue);
  546                         simple_unlock(&action_lock);
  547                         (void) splx(s);
  548 
  549                         processor_doaction(processor);
  550 
  551                         s = splsched();
  552                         simple_lock(&action_lock);
  553                 }
  554 
  555                 assert_wait((event_t) &action_queue, FALSE);
  556                 simple_unlock(&action_lock);
  557                 (void) splx(s);
  558                 counter(c_action_thread_block++);
  559                 thread_block(action_thread_continue);
  560         }
  561 }
  562 
  563 void action_thread()
  564 {
  565         action_thread_continue();
  566         /*NOTREACHED*/
  567 }
  568 
  569 /*
  570  *      processor_doaction actually does the shutdown.  The trick here
  571  *      is to schedule ourselves onto a cpu and then save our
  572  *      context back into the runqs before taking out the cpu.
  573  */
  574 #ifdef __GNUC__
  575 __volatile__
  576 #endif
  577 void    processor_doshutdown(); /* forward */
  578 
  579 void processor_doaction(processor)
  580 register processor_t    processor;
  581 {
  582         thread_t                        this_thread;
  583         spl_t                           s;
  584         register processor_set_t        pset;
  585 #if     MACH_HOST
  586         register processor_set_t        new_pset;
  587         register thread_t               thread;
  588         register thread_t               prev_thread = THREAD_NULL;
  589         boolean_t                       have_pset_ref = FALSE;
  590 #endif  /* MACH_HOST */
  591 
  592         /*
  593          *      Get onto the processor to shutdown
  594          */
  595         this_thread = current_thread();
  596         thread_bind(this_thread, processor);
  597         thread_block((void (*)()) 0);
  598 
  599         pset = processor->processor_set;
  600 #if     MACH_HOST
  601         /*
  602          *      If this is the last processor in the processor_set,
  603          *      stop all the threads first.
  604          */
  605         pset_lock(pset);
  606         if (pset->processor_count == 1) {
  607                 /*
  608                  *      First suspend all of them.
  609                  */
  610                 queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
  611                         thread_hold(thread);
  612                 }
  613                 pset->empty = TRUE;
  614                 /*
  615                  *      Now actually stop them.  Need a pset reference.
  616                  */
  617                 pset->ref_count++;
  618                 have_pset_ref = TRUE;
  619 
  620 Restart_thread:
  621                 prev_thread = THREAD_NULL;
  622                 queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
  623                         thread_reference(thread);
  624                         pset_unlock(pset);
  625                         if (prev_thread != THREAD_NULL)
  626                                 thread_deallocate(prev_thread);
  627 
  628                         /*
  629                          *      Only wait for threads still in the pset.
  630                          */
  631                         thread_freeze(thread);
  632                         if (thread->processor_set != pset) {
  633                                 /*
  634                                  *      It got away - start over.
  635                                  */
  636                                 thread_unfreeze(thread);
  637                                 thread_deallocate(thread);
  638                                 pset_lock(pset);
  639                                 goto Restart_thread;
  640                         }
  641 
  642                         (void) thread_dowait(thread, TRUE);
  643                         prev_thread = thread;
  644                         pset_lock(pset);
  645                         thread_unfreeze(prev_thread);
  646                 }
  647         }
  648         pset_unlock(pset);
  649 
  650         /*
  651          *      At this point, it is ok to remove the processor from the pset.
  652          *      We can use processor->processor_set_next without locking the
  653          *      processor, since it cannot change while processor->state is
  654          *      PROCESSOR_ASSIGN or PROCESSOR_SHUTDOWN.
  655          */
  656 
  657         new_pset = processor->processor_set_next;
  658 
  659 Restart_pset:
  660         if (new_pset) {
  661             /*
  662              *  Reassigning processor.
  663              */
  664 
  665             if ((integer_t) pset < (integer_t) new_pset) {
  666                 pset_lock(pset);
  667                 pset_lock(new_pset);
  668             }
  669             else {
  670                 pset_lock(new_pset);
  671                 pset_lock(pset);
  672             }
  673             if (!(new_pset->active)) {
  674                 pset_unlock(new_pset);
  675                 pset_unlock(pset);
  676                 pset_deallocate(new_pset);
  677                 new_pset = &default_pset;
  678                 pset_reference(new_pset);
  679                 goto Restart_pset;
  680             }
  681 
  682             /*
  683              *  Handle remove last / assign first race.
  684              *  Only happens if there is more than one action thread.
  685              */
  686             while (new_pset->empty && new_pset->processor_count > 0) {
  687                 pset_unlock(new_pset);
  688                 pset_unlock(pset);
  689                 while (*(volatile boolean_t *)&new_pset->empty &&
  690                        *(volatile int *)&new_pset->processor_count > 0)
  691                         /* spin */;
  692                 goto Restart_pset;
  693             }
  694 
  695             /*
  696              *  Lock the processor.  new_pset should not have changed.
  697              */
  698             s = splsched();
  699             processor_lock(processor);
  700             assert(processor->processor_set_next == new_pset);
  701 
  702             /*
  703              *  Shutdown may have been requested while this assignment
  704              *  was in progress.
  705              */
  706             if (processor->state == PROCESSOR_SHUTDOWN) {
  707                 processor->processor_set_next = PROCESSOR_SET_NULL;
  708                 pset_unlock(new_pset);
  709                 goto shutdown;  /* releases pset reference */
  710             }
  711 
  712             /*
  713              *  Do assignment, then wakeup anyone waiting for it.
  714              */
  715             pset_remove_processor(pset, processor);
  716             pset_unlock(pset);
  717 
  718             pset_add_processor(new_pset, processor);
  719             if (new_pset->empty) {
  720                 /*
  721                  *      Set all the threads loose.
  722                  *
  723                  *      NOTE: this appears to violate the locking
  724                  *      order, since the processor lock should
  725                  *      be taken AFTER a thread lock.  However,
  726                  *      thread_setrun (called by thread_release)
  727                  *      only takes the processor lock if the
  728                  *      processor is idle.  The processor is
  729                  *      not idle here.
  730                  */
  731                 queue_iterate(&new_pset->threads, thread, thread_t,
  732                               pset_threads) {
  733                     thread_release(thread);
  734                 }
  735                 new_pset->empty = FALSE;
  736             }
  737             processor->processor_set_next = PROCESSOR_SET_NULL;
  738             processor->state = PROCESSOR_RUNNING;
  739             thread_wakeup((event_t)processor);
  740             processor_unlock(processor);
  741             splx(s);
  742             pset_unlock(new_pset);
  743 
  744             /*
  745              *  Clean up dangling references, and release our binding.
  746              */
  747             pset_deallocate(new_pset);
  748             if (have_pset_ref)
  749                 pset_deallocate(pset);
  750             if (prev_thread != THREAD_NULL)
  751                 thread_deallocate(prev_thread);
  752             thread_bind(this_thread, PROCESSOR_NULL);
  753 
  754             thread_block((void (*)()) 0);
  755             return;
  756         }
  757 
  758 #endif  /* MACH_HOST */
  759         
  760         /*
  761          *      Do shutdown, make sure we live when processor dies.
  762          */
  763         if (processor->state != PROCESSOR_SHUTDOWN) {
  764                 printf("state: %d\n", processor->state);
  765                 panic("action_thread -- bad processor state");
  766         }
  767 
  768         s = splsched();
  769         processor_lock(processor);
  770 
  771     shutdown:
  772         pset_remove_processor(pset, processor);
  773         processor_unlock(processor);
  774         pset_unlock(pset);
  775         splx(s);
  776 
  777         /*
  778          *      Clean up dangling references, and release our binding.
  779          */
  780 #if     MACH_HOST
  781         if (new_pset != PROCESSOR_SET_NULL)
  782                 pset_deallocate(new_pset);
  783         if (have_pset_ref)
  784                 pset_deallocate(pset);
  785         if (prev_thread != THREAD_NULL)
  786                 thread_deallocate(prev_thread);
  787 #endif  /* MACH_HOST */
  788 
  789         thread_bind(this_thread, PROCESSOR_NULL);
  790         switch_to_shutdown_context(this_thread,
  791                                    processor_doshutdown,
  792                                    processor);
  793 
  794 }
  795 
  796 /*
  797  *      Actually do the processor shutdown.  This is called at splsched,
  798  *      running on the processor's shutdown stack.
  799  */
  800 
  801 #ifdef __GNUC__
  802 extern __volatile__ void halt_cpu();
  803 #endif
  804 
  805 #ifdef __GNUC__
  806 __volatile__
  807 #endif
  808 void processor_doshutdown(processor)
  809 register processor_t    processor;
  810 {
  811         register int            cpu = processor->slot_num;
  812 
  813         timer_switch(&kernel_timer[cpu]);
  814 
  815         /*
  816          *      Ok, now exit this cpu.
  817          */
  818         PMAP_DEACTIVATE_KERNEL(cpu);
  819         active_threads[cpu] = THREAD_NULL;
  820         cpu_down(cpu);
  821         thread_wakeup((event_t)processor);
  822         halt_cpu();
  823         /*
  824          *      The action thread returns to life after the call to
  825          *      switch_to_shutdown_context above, on some other cpu.
  826          */
  827 
  828         /*NOTREACHED*/
  829 }
  830 #else   /* NCPUS > 1 */
  831 
  832 kern_return_t
  833 processor_assign(processor, new_pset, wait)
  834 processor_t     processor;
  835 processor_set_t new_pset;
  836 boolean_t       wait;
  837 {
  838 #ifdef  lint
  839         processor++; new_pset++; wait++;
  840 #endif  lint
  841         return(KERN_FAILURE);
  842 }
  843 
  844 #endif /* NCPUS > 1 */
  845 
  846 kern_return_t
  847 host_get_boot_info(priv_host, boot_info)
  848         host_t              priv_host;
  849         kernel_boot_info_t  boot_info;
  850 {
  851         char *src = "";
  852 
  853         if (priv_host == HOST_NULL) {
  854                 return KERN_INVALID_HOST;
  855         }
  856 
  857 #if     NORMA_ETHER
  858 {
  859         extern char *norma_ether_boot_info();
  860         src = norma_ether_boot_info();
  861 }
  862 #endif  /* NORMA_ETHER */
  863 #if     defined(iPSC386) || defined(iPSC860)
  864 {
  865         extern char *ipsc_boot_environ();
  866         src = ipsc_boot_environ();
  867 }
  868 #endif  /* defined(iPSC386) || defined(iPSC860) */
  869 
  870         (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
  871         return KERN_SUCCESS;
  872 }

Cache object: 5820c9b37f412f277edc84aa5b39f65a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.