The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/i386/mp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
    3  *
    4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
    5  * 
    6  * This file contains Original Code and/or Modifications of Original Code
    7  * as defined in and that are subject to the Apple Public Source License
    8  * Version 2.0 (the 'License'). You may not use this file except in
    9  * compliance with the License. The rights granted to you under the License
   10  * may not be used to create, or enable the creation or redistribution of,
   11  * unlawful or unlicensed copies of an Apple operating system, or to
   12  * circumvent, violate, or enable the circumvention or violation of, any
   13  * terms of an Apple operating system software license agreement.
   14  * 
   15  * Please obtain a copy of the License at
   16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
   17  * 
   18  * The Original Code and all software distributed under the License are
   19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   23  * Please see the License for the specific language governing rights and
   24  * limitations under the License.
   25  * 
   26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
   27  */
   28 /*
   29  * @OSF_COPYRIGHT@
   30  */
   31 
   32 #include <mach_rt.h>
   33 #include <mach_kdb.h>
   34 #include <mach_kdp.h>
   35 #include <mach_ldebug.h>
   36 #include <gprof.h>
   37 
   38 #include <mach/mach_types.h>
   39 #include <mach/kern_return.h>
   40 
   41 #include <kern/kern_types.h>
   42 #include <kern/startup.h>
   43 #include <kern/timer_queue.h>
   44 #include <kern/processor.h>
   45 #include <kern/cpu_number.h>
   46 #include <kern/cpu_data.h>
   47 #include <kern/assert.h>
   48 #include <kern/machine.h>
   49 #include <kern/pms.h>
   50 #include <kern/misc_protos.h>
   51 
   52 #include <vm/vm_map.h>
   53 #include <vm/vm_kern.h>
   54 
   55 #include <profiling/profile-mk.h>
   56 
   57 #include <i386/proc_reg.h>
   58 #include <i386/cpu_threads.h>
   59 #include <i386/mp_desc.h>
   60 #include <i386/misc_protos.h>
   61 #include <i386/trap.h>
   62 #include <i386/postcode.h>
   63 #include <i386/machine_routines.h>
   64 #include <i386/mp.h>
   65 #include <i386/mp_events.h>
   66 #include <i386/lapic.h>
   67 #include <i386/ipl.h>
   68 #include <i386/cpuid.h>
   69 #include <i386/fpu.h>
   70 #include <i386/machine_cpu.h>
   71 #include <i386/mtrr.h>
   72 #include <i386/pmCPU.h>
   73 #if CONFIG_MCA
   74 #include <i386/machine_check.h>
   75 #endif
   76 #include <i386/acpi.h>
   77 
   78 #include <chud/chud_xnu.h>
   79 #include <chud/chud_xnu_private.h>
   80 
   81 #include <sys/kdebug.h>
   82 #if MACH_KDB
   83 #include <machine/db_machdep.h>
   84 #include <ddb/db_aout.h>
   85 #include <ddb/db_access.h>
   86 #include <ddb/db_sym.h>
   87 #include <ddb/db_variables.h>
   88 #include <ddb/db_command.h>
   89 #include <ddb/db_output.h>
   90 #include <ddb/db_expr.h>
   91 #endif
   92 
   93 #if     MP_DEBUG
   94 #define PAUSE           delay(1000000)
   95 #define DBG(x...)       kprintf(x)
   96 #else
   97 #define DBG(x...)
   98 #define PAUSE
   99 #endif  /* MP_DEBUG */
  100 
  101 
  102 void            slave_boot_init(void);
  103 
  104 #if MACH_KDB
  105 static void     mp_kdb_wait(void);
  106 volatile boolean_t      mp_kdb_trap = FALSE;
  107 volatile long   mp_kdb_ncpus = 0;
  108 #endif
  109 
  110 static void     mp_kdp_wait(boolean_t flush, boolean_t isNMI);
  111 static void     mp_rendezvous_action(void);
  112 static void     mp_broadcast_action(void);
  113 
  114 static boolean_t        cpu_signal_pending(int cpu, mp_event_t event);
  115 static int              cpu_signal_handler(x86_saved_state_t *regs);
  116 static int              NMIInterruptHandler(x86_saved_state_t *regs);
  117 
  118 boolean_t               smp_initialized = FALSE;
  119 volatile boolean_t      force_immediate_debugger_NMI = FALSE;
  120 volatile boolean_t      pmap_tlb_flush_timeout = FALSE;
  121 decl_simple_lock_data(,mp_kdp_lock);
  122 
  123 decl_lck_mtx_data(static, mp_cpu_boot_lock);
  124 lck_mtx_ext_t   mp_cpu_boot_lock_ext;
  125 
  126 /* Variables needed for MP rendezvous. */
  127 decl_simple_lock_data(,mp_rv_lock);
  128 static void     (*mp_rv_setup_func)(void *arg);
  129 static void     (*mp_rv_action_func)(void *arg);
  130 static void     (*mp_rv_teardown_func)(void *arg);
  131 static void     *mp_rv_func_arg;
  132 static volatile int     mp_rv_ncpus;
  133                         /* Cache-aligned barriers: */
  134 static volatile long    mp_rv_entry    __attribute__((aligned(64)));
  135 static volatile long    mp_rv_exit     __attribute__((aligned(64)));
  136 static volatile long    mp_rv_complete __attribute__((aligned(64)));
  137 
  138 volatile        uint64_t        debugger_entry_time;
  139 volatile        uint64_t        debugger_exit_time;
  140 #if MACH_KDP
  141 
  142 static struct _kdp_xcpu_call_func {
  143         kdp_x86_xcpu_func_t func;
  144         void     *arg0, *arg1;
  145         volatile long     ret;
  146         volatile uint16_t cpu;
  147 } kdp_xcpu_call_func = {
  148         .cpu  = KDP_XCPU_NONE
  149 };
  150 
  151 #endif
  152 
  153 /* Variables needed for MP broadcast. */
  154 static void        (*mp_bc_action_func)(void *arg);
  155 static void        *mp_bc_func_arg;
  156 static int      mp_bc_ncpus;
  157 static volatile long   mp_bc_count;
  158 decl_lck_mtx_data(static, mp_bc_lock);
  159 lck_mtx_ext_t   mp_bc_lock_ext;
  160 static  volatile int    debugger_cpu = -1;
  161 
  162 static void     mp_cpus_call_action(void); 
  163 static void     mp_call_PM(void);
  164 
  165 char            mp_slave_stack[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); // Temp stack for slave init
  166 
  167 
  168 #if GPROF
  169 /*
  170  * Initialize dummy structs for profiling. These aren't used but
  171  * allows hertz_tick() to be built with GPROF defined.
  172  */
  173 struct profile_vars _profile_vars;
  174 struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars };
  175 #define GPROF_INIT()                                                    \
  176 {                                                                       \
  177         int     i;                                                      \
  178                                                                         \
  179         /* Hack to initialize pointers to unused profiling structs */   \
  180         for (i = 1; i < MAX_CPUS; i++)                          \
  181                 _profile_vars_cpus[i] = &_profile_vars;                 \
  182 }
  183 #else
  184 #define GPROF_INIT()
  185 #endif /* GPROF */
  186 
  187 static lck_grp_t        smp_lck_grp;
  188 static lck_grp_attr_t   smp_lck_grp_attr;
  189 
  190 extern void     slave_pstart(void);
  191 
  192 void
  193 smp_init(void)
  194 {
  195         simple_lock_init(&mp_kdp_lock, 0);
  196         simple_lock_init(&mp_rv_lock, 0);
  197         lck_grp_attr_setdefault(&smp_lck_grp_attr);
  198         lck_grp_init(&smp_lck_grp, "i386_smp", &smp_lck_grp_attr);
  199         lck_mtx_init_ext(&mp_cpu_boot_lock, &mp_cpu_boot_lock_ext, &smp_lck_grp, LCK_ATTR_NULL);
  200         lck_mtx_init_ext(&mp_bc_lock, &mp_bc_lock_ext, &smp_lck_grp, LCK_ATTR_NULL);
  201         console_init();
  202 
  203         /* Local APIC? */
  204         if (!lapic_probe())
  205                 return;
  206 
  207         lapic_init();
  208         lapic_configure();
  209         lapic_set_intr_func(LAPIC_NMI_INTERRUPT,  NMIInterruptHandler);
  210         lapic_set_intr_func(LAPIC_VECTOR(INTERPROCESSOR), cpu_signal_handler);
  211 
  212         cpu_thread_init();
  213 
  214         GPROF_INIT();
  215         DBGLOG_CPU_INIT(master_cpu);
  216 
  217         install_real_mode_bootstrap(slave_pstart);
  218 
  219         smp_initialized = TRUE;
  220 
  221         return;
  222 }
  223 
  224 /*
  225  * Poll a CPU to see when it has marked itself as running.
  226  */
  227 static void
  228 mp_wait_for_cpu_up(int slot_num, unsigned int iters, unsigned int usecdelay)
  229 {
  230         while (iters-- > 0) {
  231                 if (cpu_datap(slot_num)->cpu_running)
  232                         break;
  233                 delay(usecdelay);
  234         }
  235 }
  236 
  237 /*
  238  * Quickly bring a CPU back online which has been halted.
  239  */
  240 kern_return_t
  241 intel_startCPU_fast(int slot_num)
  242 {
  243         kern_return_t   rc;
  244 
  245         /*
  246          * Try to perform a fast restart
  247          */
  248         rc = pmCPUExitHalt(slot_num);
  249         if (rc != KERN_SUCCESS)
  250                 /*
  251                  * The CPU was not eligible for a fast restart.
  252                  */
  253                 return(rc);
  254 
  255         /*
  256          * Wait until the CPU is back online.
  257          */
  258         mp_disable_preemption();
  259     
  260         /*
  261          * We use short pauses (1us) for low latency.  30,000 iterations is
  262          * longer than a full restart would require so it should be more
  263          * than long enough.
  264          */
  265         mp_wait_for_cpu_up(slot_num, 30000, 1);
  266         mp_enable_preemption();
  267 
  268         /*
  269          * Check to make sure that the CPU is really running.  If not,
  270          * go through the slow path.
  271          */
  272         if (cpu_datap(slot_num)->cpu_running)
  273                 return(KERN_SUCCESS);
  274         else
  275                 return(KERN_FAILURE);
  276 }
  277 
  278 typedef struct {
  279         int     target_cpu;
  280         int     target_lapic;
  281         int     starter_cpu;
  282 } processor_start_info_t;
  283 
  284 static processor_start_info_t start_info;
  285 
  286 static void
  287 start_cpu(void *arg)
  288 {
  289         int                     i = 1000;
  290         processor_start_info_t  *psip = (processor_start_info_t *) arg;
  291 
  292         /* Ignore this if the current processor is not the starter */
  293         if (cpu_number() != psip->starter_cpu)
  294                 return;
  295 
  296         LAPIC_WRITE(ICRD, psip->target_lapic << LAPIC_ICRD_DEST_SHIFT);
  297         LAPIC_WRITE(ICR, LAPIC_ICR_DM_INIT);
  298         delay(100);
  299 
  300         LAPIC_WRITE(ICRD, psip->target_lapic << LAPIC_ICRD_DEST_SHIFT);
  301         LAPIC_WRITE(ICR, LAPIC_ICR_DM_STARTUP|(REAL_MODE_BOOTSTRAP_OFFSET>>12));
  302 
  303 #ifdef  POSTCODE_DELAY
  304         /* Wait much longer if postcodes are displayed for a delay period. */
  305         i *= 10000;
  306 #endif
  307         mp_wait_for_cpu_up(psip->target_cpu, i*100, 100);
  308 }
  309 
  310 extern char     prot_mode_gdt[];
  311 extern char     slave_boot_base[];
  312 extern char real_mode_bootstrap_base[];
  313 extern char real_mode_bootstrap_end[];
  314 extern char     slave_boot_end[];
  315 
  316 kern_return_t
  317 intel_startCPU(
  318         int     slot_num)
  319 {
  320         int             lapic = cpu_to_lapic[slot_num];
  321         boolean_t       istate;
  322 
  323         assert(lapic != -1);
  324 
  325         DBGLOG_CPU_INIT(slot_num);
  326 
  327         DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic);
  328         DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) IdlePTD);
  329 
  330         /*
  331          * Initialize (or re-initialize) the descriptor tables for this cpu.
  332          * Propagate processor mode to slave.
  333          */
  334         if (cpu_mode_is64bit())
  335                 cpu_desc_init64(cpu_datap(slot_num));
  336         else
  337                 cpu_desc_init(cpu_datap(slot_num));
  338 
  339         /* Serialize use of the slave boot stack, etc. */
  340         lck_mtx_lock(&mp_cpu_boot_lock);
  341 
  342         istate = ml_set_interrupts_enabled(FALSE);
  343         if (slot_num == get_cpu_number()) {
  344                 ml_set_interrupts_enabled(istate);
  345                 lck_mtx_unlock(&mp_cpu_boot_lock);
  346                 return KERN_SUCCESS;
  347         }
  348 
  349         start_info.starter_cpu  = cpu_number();
  350         start_info.target_cpu   = slot_num;
  351         start_info.target_lapic = lapic;
  352 
  353         /*
  354          * Perform the processor startup sequence with all running
  355          * processors rendezvous'ed. This is required during periods when
  356          * the cache-disable bit is set for MTRR/PAT initialization.
  357          */
  358         mp_rendezvous_no_intrs(start_cpu, (void *) &start_info);
  359 
  360         ml_set_interrupts_enabled(istate);
  361         lck_mtx_unlock(&mp_cpu_boot_lock);
  362 
  363         if (!cpu_datap(slot_num)->cpu_running) {
  364                 kprintf("Failed to start CPU %02d\n", slot_num);
  365                 printf("Failed to start CPU %02d, rebooting...\n", slot_num);
  366                 delay(1000000);
  367                 halt_cpu();
  368                 return KERN_SUCCESS;
  369         } else {
  370                 kprintf("Started cpu %d (lapic id %08x)\n", slot_num, lapic);
  371                 return KERN_SUCCESS;
  372         }
  373 }
  374 
  375 #if     MP_DEBUG
  376 cpu_signal_event_log_t  *cpu_signal[MAX_CPUS];
  377 cpu_signal_event_log_t  *cpu_handle[MAX_CPUS];
  378 
  379 MP_EVENT_NAME_DECL();
  380 
  381 #endif  /* MP_DEBUG */
  382 
  383 int
  384 cpu_signal_handler(x86_saved_state_t *regs)
  385 {
  386         int             my_cpu;
  387         volatile int    *my_word;
  388 #if     MACH_KDB && MACH_ASSERT
  389         int             i=100;
  390 #endif  /* MACH_KDB && MACH_ASSERT */
  391 
  392         mp_disable_preemption();
  393 
  394         my_cpu = cpu_number();
  395         my_word = &current_cpu_datap()->cpu_signals;
  396 
  397         do {
  398 #if     MACH_KDB && MACH_ASSERT
  399                 if (i-- <= 0)
  400                     Debugger("cpu_signal_handler: signals did not clear");
  401 #endif  /* MACH_KDB && MACH_ASSERT */
  402 #if     MACH_KDP
  403                 if (i_bit(MP_KDP, my_word)) {
  404                         DBGLOG(cpu_handle,my_cpu,MP_KDP);
  405                         i_bit_clear(MP_KDP, my_word);
  406 /* Ensure that the i386_kernel_state at the base of the
  407  * current thread's stack (if any) is synchronized with the
  408  * context at the moment of the interrupt, to facilitate
  409  * access through the debugger.
  410  */
  411                         sync_iss_to_iks(regs);
  412                         mp_kdp_wait(TRUE, FALSE);
  413                 } else
  414 #endif  /* MACH_KDP */
  415                 if (i_bit(MP_TLB_FLUSH, my_word)) {
  416                         DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
  417                         i_bit_clear(MP_TLB_FLUSH, my_word);
  418                         pmap_update_interrupt();
  419                 } else if (i_bit(MP_AST, my_word)) {
  420                         DBGLOG(cpu_handle,my_cpu,MP_AST);
  421                         i_bit_clear(MP_AST, my_word);
  422                         ast_check(cpu_to_processor(my_cpu));
  423 #if     MACH_KDB
  424                 } else if (i_bit(MP_KDB, my_word)) {
  425 
  426                         i_bit_clear(MP_KDB, my_word);
  427                         current_cpu_datap()->cpu_kdb_is_slave++;
  428                         mp_kdb_wait();
  429                         current_cpu_datap()->cpu_kdb_is_slave--;
  430 #endif  /* MACH_KDB */
  431                 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
  432                         DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
  433                         i_bit_clear(MP_RENDEZVOUS, my_word);
  434                         mp_rendezvous_action();
  435                 } else if (i_bit(MP_BROADCAST, my_word)) {
  436                         DBGLOG(cpu_handle,my_cpu,MP_BROADCAST);
  437                         i_bit_clear(MP_BROADCAST, my_word);
  438                         mp_broadcast_action();
  439                 } else if (i_bit(MP_CHUD, my_word)) {
  440                         DBGLOG(cpu_handle,my_cpu,MP_CHUD);
  441                         i_bit_clear(MP_CHUD, my_word);
  442                         chudxnu_cpu_signal_handler();
  443                 } else if (i_bit(MP_CALL, my_word)) {
  444                         DBGLOG(cpu_handle,my_cpu,MP_CALL);
  445                         i_bit_clear(MP_CALL, my_word);
  446                         mp_cpus_call_action();
  447                 } else if (i_bit(MP_CALL_PM, my_word)) {
  448                         DBGLOG(cpu_handle,my_cpu,MP_CALL_PM);
  449                         i_bit_clear(MP_CALL_PM, my_word);
  450                         mp_call_PM();
  451                 }
  452         } while (*my_word);
  453 
  454         mp_enable_preemption();
  455 
  456         return 0;
  457 }
  458 
  459 static int
  460 NMIInterruptHandler(x86_saved_state_t *regs)
  461 {
  462         void    *stackptr;
  463         
  464         sync_iss_to_iks_unconditionally(regs);
  465 #if defined (__i386__)
  466         __asm__ volatile("movl %%ebp, %0" : "=m" (stackptr));
  467 #elif defined (__x86_64__)
  468         __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr));
  469 #endif
  470 
  471         if (cpu_number() == debugger_cpu)
  472                         goto NMExit;
  473 
  474         if (pmap_tlb_flush_timeout == TRUE && current_cpu_datap()->cpu_tlb_invalid) {
  475                 char pstr[128];
  476                 snprintf(&pstr[0], sizeof(pstr), "Panic(CPU %d): Unresponsive processor\n", cpu_number());
  477                 panic_i386_backtrace(stackptr, 16, &pstr[0], TRUE, regs);
  478         }
  479 
  480 #if MACH_KDP
  481         mp_kdp_wait(FALSE, pmap_tlb_flush_timeout);
  482 #endif
  483 NMExit: 
  484         return 1;
  485 }
  486 
  487 #ifdef  MP_DEBUG
  488 int     max_lock_loops = 100000000;
  489 int             trappedalready = 0;     /* (BRINGUP) */
  490 #endif  /* MP_DEBUG */
  491 
  492 static void
  493 i386_cpu_IPI(int cpu)
  494 {
  495         boolean_t       state;
  496         
  497 #ifdef  MP_DEBUG
  498         if(cpu_datap(cpu)->cpu_signals & 6) {   /* (BRINGUP) */
  499                 kprintf("i386_cpu_IPI: sending enter debugger signal (%08X) to cpu %d\n", cpu_datap(cpu)->cpu_signals, cpu);
  500         }
  501 #endif  /* MP_DEBUG */
  502 
  503 #if MACH_KDB
  504 #ifdef  MP_DEBUG
  505         if(!trappedalready && (cpu_datap(cpu)->cpu_signals & 6)) {      /* (BRINGUP) */
  506                 if(kdb_cpu != cpu_number()) {
  507                         trappedalready = 1;
  508                         panic("i386_cpu_IPI: sending enter debugger signal (%08X) to cpu %d and I do not own debugger, owner = %08X\n", 
  509                                 cpu_datap(cpu)->cpu_signals, cpu, kdb_cpu);
  510                 }
  511         }
  512 #endif  /* MP_DEBUG */
  513 #endif
  514 
  515         /* Wait for previous interrupt to be delivered... */
  516 #ifdef  MP_DEBUG
  517         int     pending_busy_count = 0;
  518         while (LAPIC_READ(ICR) & LAPIC_ICR_DS_PENDING) {
  519                 if (++pending_busy_count > max_lock_loops)
  520                         panic("i386_cpu_IPI() deadlock\n");
  521 #else
  522         while (LAPIC_READ(ICR) & LAPIC_ICR_DS_PENDING) {
  523 #endif  /* MP_DEBUG */
  524                 cpu_pause();
  525         }
  526 
  527         state = ml_set_interrupts_enabled(FALSE);
  528         LAPIC_WRITE(ICRD, cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT);
  529         LAPIC_WRITE(ICR, LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_FIXED);
  530         (void) ml_set_interrupts_enabled(state);
  531 }
  532 
  533 /*
  534  * cpu_interrupt is really just to be used by the scheduler to
  535  * get a CPU's attention it may not always issue an IPI.  If an
  536  * IPI is always needed then use i386_cpu_IPI.
  537  */
  538 void
  539 cpu_interrupt(int cpu)
  540 {
  541         if (smp_initialized
  542             && pmCPUExitIdle(cpu_datap(cpu))) {
  543                 i386_cpu_IPI(cpu);
  544         }
  545 }
  546 
  547 /*
  548  * Send a true NMI via the local APIC to the specified CPU.
  549  */
  550 void
  551 cpu_NMI_interrupt(int cpu)
  552 {
  553         boolean_t       state;
  554 
  555         if (smp_initialized) {
  556                 state = ml_set_interrupts_enabled(FALSE);
  557 /* Program the interrupt command register */
  558                 LAPIC_WRITE(ICRD, cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT);
  559 /* The vector is ignored in this case--the target CPU will enter on the
  560  * NMI vector.
  561  */
  562                 LAPIC_WRITE(ICR, LAPIC_VECTOR(INTERPROCESSOR)|LAPIC_ICR_DM_NMI);
  563                 (void) ml_set_interrupts_enabled(state);
  564         }
  565 }
  566 
  567 static void     (* volatile mp_PM_func)(void) = NULL;
  568 
  569 static void
  570 mp_call_PM(void)
  571 {
  572         assert(!ml_get_interrupts_enabled());
  573 
  574         if (mp_PM_func != NULL)
  575                 mp_PM_func();
  576 }
  577 
  578 void
  579 cpu_PM_interrupt(int cpu)
  580 {
  581         assert(!ml_get_interrupts_enabled());
  582 
  583         if (mp_PM_func != NULL) {
  584                 if (cpu == cpu_number())
  585                         mp_PM_func();
  586                 else
  587                         i386_signal_cpu(cpu, MP_CALL_PM, ASYNC);
  588         }
  589 }
  590 
  591 void
  592 PM_interrupt_register(void (*fn)(void))
  593 {
  594         mp_PM_func = fn;
  595 }
  596 
  597 void
  598 i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
  599 {
  600         volatile int    *signals = &cpu_datap(cpu)->cpu_signals;
  601         uint64_t        tsc_timeout;
  602 
  603         
  604         if (!cpu_datap(cpu)->cpu_running)
  605                 return;
  606 
  607         if (event == MP_TLB_FLUSH)
  608                 KERNEL_DEBUG(0xef800020 | DBG_FUNC_START, cpu, 0, 0, 0, 0);
  609 
  610         DBGLOG(cpu_signal, cpu, event);
  611         
  612         i_bit_set(event, signals);
  613         i386_cpu_IPI(cpu);
  614         if (mode == SYNC) {
  615            again:
  616                 tsc_timeout = rdtsc64() + (1000*1000*1000);
  617                 while (i_bit(event, signals) && rdtsc64() < tsc_timeout) {
  618                         cpu_pause();
  619                 }
  620                 if (i_bit(event, signals)) {
  621                         DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
  622                                 cpu, event);
  623                         goto again;
  624                 }
  625         }
  626         if (event == MP_TLB_FLUSH)
  627                 KERNEL_DEBUG(0xef800020 | DBG_FUNC_END, cpu, 0, 0, 0, 0);
  628 }
  629 
  630 /*
  631  * Send event to all running cpus.
  632  * Called with the topology locked.
  633  */
  634 void
  635 i386_signal_cpus(mp_event_t event, mp_sync_t mode)
  636 {
  637         unsigned int    cpu;
  638         unsigned int    my_cpu = cpu_number();
  639 
  640         assert(hw_lock_held((hw_lock_t)&x86_topo_lock));
  641 
  642         for (cpu = 0; cpu < real_ncpus; cpu++) {
  643                 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
  644                         continue;
  645                 i386_signal_cpu(cpu, event, mode);
  646         }
  647 }
  648 
  649 /*
  650  * Return the number of running cpus.
  651  * Called with the topology locked.
  652  */
  653 int
  654 i386_active_cpus(void)
  655 {
  656         unsigned int    cpu;
  657         unsigned int    ncpus = 0;
  658 
  659         assert(hw_lock_held((hw_lock_t)&x86_topo_lock));
  660 
  661         for (cpu = 0; cpu < real_ncpus; cpu++) {
  662                 if (cpu_datap(cpu)->cpu_running)
  663                         ncpus++;
  664         }
  665         return(ncpus);
  666 }
  667 
  668 /*
  669  * All-CPU rendezvous:
  670  *      - CPUs are signalled,
  671  *      - all execute the setup function (if specified),
  672  *      - rendezvous (i.e. all cpus reach a barrier),
  673  *      - all execute the action function (if specified),
  674  *      - rendezvous again,
  675  *      - execute the teardown function (if specified), and then
  676  *      - resume.
  677  *
  678  * Note that the supplied external functions _must_ be reentrant and aware
  679  * that they are running in parallel and in an unknown lock context.
  680  */
  681 
  682 static void
  683 mp_rendezvous_action(void)
  684 {
  685         boolean_t intrs_enabled;
  686 
  687         /* setup function */
  688         if (mp_rv_setup_func != NULL)
  689                 mp_rv_setup_func(mp_rv_func_arg);
  690 
  691         intrs_enabled = ml_get_interrupts_enabled();
  692 
  693 
  694         /* spin on entry rendezvous */
  695         atomic_incl(&mp_rv_entry, 1);
  696         while (mp_rv_entry < mp_rv_ncpus) {
  697                 /* poll for pesky tlb flushes if interrupts disabled */
  698                 if (!intrs_enabled)
  699                         handle_pending_TLB_flushes();
  700                 cpu_pause();
  701         }
  702         /* action function */
  703         if (mp_rv_action_func != NULL)
  704                 mp_rv_action_func(mp_rv_func_arg);
  705         /* spin on exit rendezvous */
  706         atomic_incl(&mp_rv_exit, 1);
  707         while (mp_rv_exit < mp_rv_ncpus) {
  708                 if (!intrs_enabled)
  709                         handle_pending_TLB_flushes();
  710                 cpu_pause();
  711         }
  712         /* teardown function */
  713         if (mp_rv_teardown_func != NULL)
  714                 mp_rv_teardown_func(mp_rv_func_arg);
  715 
  716         /* Bump completion count */
  717         atomic_incl(&mp_rv_complete, 1);
  718 }
  719 
  720 void
  721 mp_rendezvous(void (*setup_func)(void *), 
  722               void (*action_func)(void *),
  723               void (*teardown_func)(void *),
  724               void *arg)
  725 {
  726 
  727         if (!smp_initialized) {
  728                 if (setup_func != NULL)
  729                         setup_func(arg);
  730                 if (action_func != NULL)
  731                         action_func(arg);
  732                 if (teardown_func != NULL)
  733                         teardown_func(arg);
  734                 return;
  735         }
  736                 
  737         /* obtain rendezvous lock */
  738         simple_lock(&mp_rv_lock);
  739 
  740         /* set static function pointers */
  741         mp_rv_setup_func = setup_func;
  742         mp_rv_action_func = action_func;
  743         mp_rv_teardown_func = teardown_func;
  744         mp_rv_func_arg = arg;
  745 
  746         mp_rv_entry    = 0;
  747         mp_rv_exit     = 0;
  748         mp_rv_complete = 0;
  749 
  750         /*
  751          * signal other processors, which will call mp_rendezvous_action()
  752          * with interrupts disabled
  753          */
  754         simple_lock(&x86_topo_lock);
  755         mp_rv_ncpus = i386_active_cpus();
  756         i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
  757         simple_unlock(&x86_topo_lock);
  758 
  759         /* call executor function on this cpu */
  760         mp_rendezvous_action();
  761 
  762         /*
  763          * Spin for everyone to complete.
  764          * This is necessary to ensure that all processors have proceeded
  765          * from the exit barrier before we release the rendezvous structure.
  766          */
  767         while (mp_rv_complete < mp_rv_ncpus) {
  768                 cpu_pause();
  769         }
  770         
  771         /* Tidy up */
  772         mp_rv_setup_func = NULL;
  773         mp_rv_action_func = NULL;
  774         mp_rv_teardown_func = NULL;
  775         mp_rv_func_arg = NULL;
  776 
  777         /* release lock */
  778         simple_unlock(&mp_rv_lock);
  779 }
  780 
  781 void
  782 mp_rendezvous_break_lock(void)
  783 {
  784         simple_lock_init(&mp_rv_lock, 0);
  785 }
  786 
  787 static void
  788 setup_disable_intrs(__unused void * param_not_used)
  789 {
  790         /* disable interrupts before the first barrier */
  791         boolean_t intr = ml_set_interrupts_enabled(FALSE);
  792 
  793         current_cpu_datap()->cpu_iflag = intr;
  794         DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
  795 }
  796 
  797 static void
  798 teardown_restore_intrs(__unused void * param_not_used)
  799 {
  800         /* restore interrupt flag following MTRR changes */
  801         ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
  802         DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
  803 }
  804 
  805 /*
  806  * A wrapper to mp_rendezvous() to call action_func() with interrupts disabled.
  807  * This is exported for use by kexts.
  808  */
  809 void
  810 mp_rendezvous_no_intrs(
  811               void (*action_func)(void *),
  812               void *arg)
  813 {
  814         mp_rendezvous(setup_disable_intrs,
  815                       action_func,
  816                       teardown_restore_intrs,
  817                       arg);     
  818 }
  819 
  820 void
  821 handle_pending_TLB_flushes(void)
  822 {
  823         volatile int    *my_word = &current_cpu_datap()->cpu_signals;
  824 
  825         if (i_bit(MP_TLB_FLUSH, my_word)) {
  826                 DBGLOG(cpu_handle, cpu_number(), MP_TLB_FLUSH);
  827                 i_bit_clear(MP_TLB_FLUSH, my_word);
  828                 pmap_update_interrupt();
  829         }
  830 }
  831 
  832 /*
  833  * This is called from cpu_signal_handler() to process an MP_CALL signal.
  834  */
  835 static void
  836 mp_cpus_call_action(void)
  837 {
  838         if (mp_rv_action_func != NULL)
  839                 mp_rv_action_func(mp_rv_func_arg);
  840         atomic_incl(&mp_rv_complete, 1);
  841 }
  842 
  843 /*
  844  * mp_cpus_call() runs a given function on cpus specified in a given cpu mask.
  845  * If the mode is SYNC, the function is called serially on the target cpus
  846  * in logical cpu order. If the mode is ASYNC, the function is called in
  847  * parallel over the specified cpus.
  848  * The action function may be NULL.
  849  * The cpu mask may include the local cpu. Offline cpus are ignored.
  850  * Return does not occur until the function has completed on all cpus.
  851  * The return value is the number of cpus on which the function was called.
  852  */
  853 cpu_t
  854 mp_cpus_call(
  855         cpumask_t       cpus,
  856         mp_sync_t       mode,
  857         void            (*action_func)(void *),
  858         void            *arg)
  859 {
  860         cpu_t           cpu;
  861         boolean_t       intrs_enabled = ml_get_interrupts_enabled();
  862         boolean_t       call_self = FALSE;
  863 
  864         if (!smp_initialized) {
  865                 if ((cpus & CPUMASK_SELF) == 0)
  866                         return 0;
  867                 if (action_func != NULL) {
  868                         (void) ml_set_interrupts_enabled(FALSE);
  869                         action_func(arg);
  870                         ml_set_interrupts_enabled(intrs_enabled);
  871                 }
  872                 return 1;
  873         }
  874                 
  875         /* obtain rendezvous lock */
  876         simple_lock(&mp_rv_lock);
  877 
  878         /* Use the rendezvous data structures for this call */
  879         mp_rv_action_func = action_func;
  880         mp_rv_func_arg = arg;
  881         mp_rv_ncpus = 0;
  882         mp_rv_complete = 0;
  883 
  884         simple_lock(&x86_topo_lock);
  885         for (cpu = 0; cpu < (cpu_t) real_ncpus; cpu++) {
  886                 if (((cpu_to_cpumask(cpu) & cpus) == 0) ||
  887                     !cpu_datap(cpu)->cpu_running)
  888                         continue;
  889                 if (cpu == (cpu_t) cpu_number()) {
  890                         /*
  891                          * We don't IPI ourself and if calling asynchronously,
  892                          * we defer our call until we have signalled all others.
  893                          */
  894                         call_self = TRUE;
  895                         if (mode == SYNC && action_func != NULL) {
  896                                 (void) ml_set_interrupts_enabled(FALSE);
  897                                 action_func(arg);
  898                                 ml_set_interrupts_enabled(intrs_enabled);
  899                         }
  900                 } else {
  901                         /*
  902                          * Bump count of other cpus called and signal this cpu.
  903                          * Note: we signal asynchronously regardless of mode
  904                          * because we wait on mp_rv_complete either here
  905                          * (if mode == SYNC) or later (if mode == ASYNC).
  906                          * While spinning, poll for TLB flushes if interrupts
  907                          * are disabled.
  908                          */
  909                         mp_rv_ncpus++;
  910                         i386_signal_cpu(cpu, MP_CALL, ASYNC);
  911                         if (mode == SYNC) {
  912                                 simple_unlock(&x86_topo_lock);
  913                                 while (mp_rv_complete < mp_rv_ncpus) {
  914                                         if (!intrs_enabled)
  915                                                 handle_pending_TLB_flushes();
  916                                         cpu_pause();
  917                                 }
  918                                 simple_lock(&x86_topo_lock);
  919                         }
  920                 }
  921         }
  922         simple_unlock(&x86_topo_lock);
  923 
  924         /*
  925          * If calls are being made asynchronously,
  926          * make the local call now if needed, and then
  927          * wait for all other cpus to finish their calls.
  928          */
  929         if (mode == ASYNC) {
  930                 if (call_self && action_func != NULL) {
  931                         (void) ml_set_interrupts_enabled(FALSE);
  932                         action_func(arg);
  933                         ml_set_interrupts_enabled(intrs_enabled);
  934                 }
  935                 while (mp_rv_complete < mp_rv_ncpus) {
  936                         if (!intrs_enabled)
  937                                 handle_pending_TLB_flushes();
  938                         cpu_pause();
  939                 }
  940         }
  941         
  942         /* Determine the number of cpus called */
  943         cpu = mp_rv_ncpus + (call_self ? 1 : 0);
  944 
  945         simple_unlock(&mp_rv_lock);
  946 
  947         return cpu;
  948 }
  949 
  950 static void
  951 mp_broadcast_action(void)
  952 {
  953    /* call action function */
  954    if (mp_bc_action_func != NULL)
  955        mp_bc_action_func(mp_bc_func_arg);
  956 
  957    /* if we're the last one through, wake up the instigator */
  958    if (atomic_decl_and_test(&mp_bc_count, 1))
  959        thread_wakeup(((event_t)(uintptr_t) &mp_bc_count));
  960 }
  961 
  962 /*
  963  * mp_broadcast() runs a given function on all active cpus.
  964  * The caller blocks until the functions has run on all cpus.
  965  * The caller will also block if there is another pending braodcast.
  966  */
  967 void
  968 mp_broadcast(
  969          void (*action_func)(void *),
  970          void *arg)
  971 {
  972    if (!smp_initialized) {
  973        if (action_func != NULL)
  974                    action_func(arg);
  975        return;
  976    }
  977        
  978    /* obtain broadcast lock */
  979    lck_mtx_lock(&mp_bc_lock);
  980 
  981    /* set static function pointers */
  982    mp_bc_action_func = action_func;
  983    mp_bc_func_arg = arg;
  984 
  985    assert_wait((event_t)(uintptr_t)&mp_bc_count, THREAD_UNINT);
  986 
  987    /*
  988     * signal other processors, which will call mp_broadcast_action()
  989     */
  990    simple_lock(&x86_topo_lock);
  991    mp_bc_ncpus = i386_active_cpus();   /* total including this cpu */
  992    mp_bc_count = mp_bc_ncpus;
  993    i386_signal_cpus(MP_BROADCAST, ASYNC);
  994 
  995    /* call executor function on this cpu */
  996    mp_broadcast_action();
  997    simple_unlock(&x86_topo_lock);
  998 
  999    /* block for all cpus to have run action_func */
 1000    if (mp_bc_ncpus > 1)
 1001        thread_block(THREAD_CONTINUE_NULL);
 1002    else
 1003        clear_wait(current_thread(), THREAD_AWAKENED);
 1004        
 1005    /* release lock */
 1006    lck_mtx_unlock(&mp_bc_lock);
 1007 }
 1008 
 1009 void
 1010 i386_activate_cpu(void)
 1011 {
 1012         cpu_data_t      *cdp = current_cpu_datap();
 1013 
 1014         assert(!ml_get_interrupts_enabled());
 1015 
 1016         if (!smp_initialized) {
 1017                 cdp->cpu_running = TRUE;
 1018                 return;
 1019         }
 1020 
 1021         simple_lock(&x86_topo_lock);
 1022         cdp->cpu_running = TRUE;
 1023         simple_unlock(&x86_topo_lock);
 1024 }
 1025 
 1026 extern void etimer_timer_expire(void    *arg);
 1027 
 1028 void
 1029 i386_deactivate_cpu(void)
 1030 {
 1031         cpu_data_t      *cdp = current_cpu_datap();
 1032 
 1033         assert(!ml_get_interrupts_enabled());
 1034 
 1035         simple_lock(&x86_topo_lock);
 1036         cdp->cpu_running = FALSE;
 1037         simple_unlock(&x86_topo_lock);
 1038 
 1039         timer_queue_shutdown(&cdp->rtclock_timer.queue);
 1040         cdp->rtclock_timer.deadline = EndOfAllTime;
 1041         mp_cpus_call(cpu_to_cpumask(master_cpu), ASYNC, etimer_timer_expire, NULL);
 1042 
 1043         /*
 1044          * In case a rendezvous/braodcast/call was initiated to this cpu
 1045          * before we cleared cpu_running, we must perform any actions due.
 1046          */
 1047         if (i_bit(MP_RENDEZVOUS, &cdp->cpu_signals))
 1048                 mp_rendezvous_action();
 1049         if (i_bit(MP_BROADCAST, &cdp->cpu_signals))
 1050                 mp_broadcast_action();
 1051         if (i_bit(MP_CALL, &cdp->cpu_signals))
 1052                 mp_cpus_call_action();
 1053         cdp->cpu_signals = 0;                   /* all clear */
 1054 }
 1055 
 1056 int     pmsafe_debug    = 1;
 1057 
 1058 #if     MACH_KDP
 1059 volatile boolean_t      mp_kdp_trap = FALSE;
 1060 volatile unsigned long  mp_kdp_ncpus;
 1061 boolean_t               mp_kdp_state;
 1062 
 1063 
 1064 void
 1065 mp_kdp_enter(void)
 1066 {
 1067         unsigned int    cpu;
 1068         unsigned int    ncpus;
 1069         unsigned int    my_cpu;
 1070         uint64_t        tsc_timeout;
 1071 
 1072         DBG("mp_kdp_enter()\n");
 1073 
 1074         /*
 1075          * Here to enter the debugger.
 1076          * In case of races, only one cpu is allowed to enter kdp after
 1077          * stopping others.
 1078          */
 1079         mp_kdp_state = ml_set_interrupts_enabled(FALSE);
 1080         simple_lock(&mp_kdp_lock);
 1081         debugger_entry_time = mach_absolute_time();
 1082         if (pmsafe_debug)
 1083             pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
 1084 
 1085         while (mp_kdp_trap) {
 1086                 simple_unlock(&mp_kdp_lock);
 1087                 DBG("mp_kdp_enter() race lost\n");
 1088 #if MACH_KDP
 1089                 mp_kdp_wait(TRUE, FALSE);
 1090 #endif
 1091                 simple_lock(&mp_kdp_lock);
 1092         }
 1093         my_cpu = cpu_number();
 1094         debugger_cpu = my_cpu;
 1095         mp_kdp_ncpus = 1;       /* self */
 1096         mp_kdp_trap = TRUE;
 1097         simple_unlock(&mp_kdp_lock);
 1098 
 1099         /*
 1100          * Deliver a nudge to other cpus, counting how many
 1101          */
 1102         DBG("mp_kdp_enter() signaling other processors\n");
 1103         if (force_immediate_debugger_NMI == FALSE) {
 1104                 for (ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
 1105                         if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
 1106                                 continue;
 1107                         ncpus++;
 1108                         i386_signal_cpu(cpu, MP_KDP, ASYNC);
 1109                 }
 1110                 /*
 1111                  * Wait other processors to synchronize
 1112                  */
 1113                 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
 1114 
 1115                 /*
 1116                  * This timeout is rather arbitrary; we don't want to NMI
 1117                  * processors that are executing at potentially
 1118                  * "unsafe-to-interrupt" points such as the trampolines,
 1119                  * but neither do we want to lose state by waiting too long.
 1120                  */
 1121                 tsc_timeout = rdtsc64() + (ncpus * 1000 * 1000);
 1122 
 1123                 while (mp_kdp_ncpus != ncpus && rdtsc64() < tsc_timeout) {
 1124                         /*
 1125                          * A TLB shootdown request may be pending--this would
 1126                          * result in the requesting processor waiting in
 1127                          * PMAP_UPDATE_TLBS() until this processor deals with it.
 1128                          * Process it, so it can now enter mp_kdp_wait()
 1129                          */
 1130                         handle_pending_TLB_flushes();
 1131                         cpu_pause();
 1132                 }
 1133                 /* If we've timed out, and some processor(s) are still unresponsive,
 1134                  * interrupt them with an NMI via the local APIC.
 1135                  */
 1136                 if (mp_kdp_ncpus != ncpus) {
 1137                         for (cpu = 0; cpu < real_ncpus; cpu++) {
 1138                                 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
 1139                                         continue;
 1140                                 if (cpu_signal_pending(cpu, MP_KDP))
 1141                                         cpu_NMI_interrupt(cpu);
 1142                         }
 1143                 }
 1144         }
 1145         else
 1146                 for (cpu = 0; cpu < real_ncpus; cpu++) {
 1147                         if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
 1148                                 continue;
 1149                         cpu_NMI_interrupt(cpu);
 1150                 }
 1151 
 1152         DBG("mp_kdp_enter() %u processors done %s\n",
 1153             mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
 1154         
 1155         postcode(MP_KDP_ENTER);
 1156 }
 1157 
 1158 static boolean_t
 1159 cpu_signal_pending(int cpu, mp_event_t event)
 1160 {
 1161         volatile int    *signals = &cpu_datap(cpu)->cpu_signals;
 1162         boolean_t retval = FALSE;
 1163 
 1164         if (i_bit(event, signals))
 1165                 retval = TRUE;
 1166         return retval;
 1167 }
 1168 
 1169 long kdp_x86_xcpu_invoke(const uint16_t lcpu, kdp_x86_xcpu_func_t func,
 1170                          void *arg0, void *arg1)
 1171 {
 1172         if (lcpu > (real_ncpus - 1))
 1173                 return -1;
 1174 
 1175         if (func == NULL)
 1176                 return -1;
 1177 
 1178         kdp_xcpu_call_func.func = func;
 1179         kdp_xcpu_call_func.ret  = -1;
 1180         kdp_xcpu_call_func.arg0 = arg0;
 1181         kdp_xcpu_call_func.arg1 = arg1;
 1182         kdp_xcpu_call_func.cpu  = lcpu;
 1183         DBG("Invoking function %p on CPU %d\n", func, (int32_t)lcpu);
 1184         while (kdp_xcpu_call_func.cpu != KDP_XCPU_NONE)
 1185                 cpu_pause();
 1186         return kdp_xcpu_call_func.ret;
 1187 }
 1188 
 1189 static void
 1190 kdp_x86_xcpu_poll(void)
 1191 {
 1192         if ((uint16_t)cpu_number() == kdp_xcpu_call_func.cpu) {
 1193             kdp_xcpu_call_func.ret = 
 1194                     kdp_xcpu_call_func.func(kdp_xcpu_call_func.arg0,
 1195                                             kdp_xcpu_call_func.arg1,
 1196                                             cpu_number());
 1197                 kdp_xcpu_call_func.cpu = KDP_XCPU_NONE;
 1198         }
 1199 }
 1200 
 1201 static void
 1202 mp_kdp_wait(boolean_t flush, boolean_t isNMI)
 1203 {
 1204         DBG("mp_kdp_wait()\n");
 1205         /* If an I/O port has been specified as a debugging aid, issue a read */
 1206         panic_io_port_read();
 1207 
 1208 #if CONFIG_MCA
 1209         /* If we've trapped due to a machine-check, save MCA registers */
 1210         mca_check_save();
 1211 #endif
 1212 
 1213         if (pmsafe_debug)
 1214             pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
 1215 
 1216         atomic_incl((volatile long *)&mp_kdp_ncpus, 1);
 1217         while (mp_kdp_trap || (isNMI == TRUE)) {
 1218                 /*
 1219                  * A TLB shootdown request may be pending--this would result
 1220                  * in the requesting processor waiting in PMAP_UPDATE_TLBS()
 1221                  * until this processor handles it.
 1222                  * Process it, so it can now enter mp_kdp_wait()
 1223                  */
 1224                 if (flush)
 1225                         handle_pending_TLB_flushes();
 1226 
 1227                 kdp_x86_xcpu_poll();
 1228                 cpu_pause();
 1229         }
 1230 
 1231         if (pmsafe_debug)
 1232             pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL);
 1233 
 1234         atomic_decl((volatile long *)&mp_kdp_ncpus, 1);
 1235         DBG("mp_kdp_wait() done\n");
 1236 }
 1237 
 1238 void
 1239 mp_kdp_exit(void)
 1240 {
 1241         DBG("mp_kdp_exit()\n");
 1242         debugger_cpu = -1;
 1243         atomic_decl((volatile long *)&mp_kdp_ncpus, 1);
 1244 
 1245         debugger_exit_time = mach_absolute_time();
 1246 
 1247         mp_kdp_trap = FALSE;
 1248         __asm__ volatile("mfence");
 1249 
 1250         /* Wait other processors to stop spinning. XXX needs timeout */
 1251         DBG("mp_kdp_exit() waiting for processors to resume\n");
 1252         while (mp_kdp_ncpus > 0) {
 1253                 /*
 1254                  * a TLB shootdown request may be pending... this would result in the requesting
 1255                  * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
 1256                  * Process it, so it can now enter mp_kdp_wait()
 1257                  */
 1258                 handle_pending_TLB_flushes();
 1259 
 1260                 cpu_pause();
 1261         }
 1262 
 1263         if (pmsafe_debug)
 1264             pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL);
 1265 
 1266         DBG("mp_kdp_exit() done\n");
 1267         (void) ml_set_interrupts_enabled(mp_kdp_state);
 1268         postcode(0);
 1269 }
 1270 #endif  /* MACH_KDP */
 1271 
 1272 boolean_t
 1273 mp_recent_debugger_activity() {
 1274         return (((mach_absolute_time() - debugger_entry_time) < LastDebuggerEntryAllowance) ||
 1275             ((mach_absolute_time() - debugger_exit_time) < LastDebuggerEntryAllowance));
 1276 }
 1277 
 1278 /*ARGSUSED*/
 1279 void
 1280 init_ast_check(
 1281         __unused processor_t    processor)
 1282 {
 1283 }
 1284 
 1285 void
 1286 cause_ast_check(
 1287         processor_t     processor)
 1288 {
 1289         int     cpu = processor->cpu_id;
 1290 
 1291         if (cpu != cpu_number()) {
 1292                 i386_signal_cpu(cpu, MP_AST, ASYNC);
 1293         }
 1294 }
 1295 
 1296 #if MACH_KDB
 1297 /*
 1298  * invoke kdb on slave processors 
 1299  */
 1300 
 1301 void
 1302 remote_kdb(void)
 1303 {
 1304         unsigned int    my_cpu = cpu_number();
 1305         unsigned int    cpu;
 1306         int kdb_ncpus;
 1307         uint64_t tsc_timeout = 0;
 1308         
 1309         mp_kdb_trap = TRUE;
 1310         mp_kdb_ncpus = 1;
 1311         for (kdb_ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
 1312                 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
 1313                         continue;
 1314                 kdb_ncpus++;
 1315                 i386_signal_cpu(cpu, MP_KDB, ASYNC);
 1316         }
 1317         DBG("remote_kdb() waiting for (%d) processors to suspend\n",kdb_ncpus);
 1318 
 1319         tsc_timeout = rdtsc64() + (kdb_ncpus * 100 * 1000 * 1000);
 1320 
 1321         while (mp_kdb_ncpus != kdb_ncpus && rdtsc64() < tsc_timeout) {
 1322                 /*
 1323                  * a TLB shootdown request may be pending... this would result in the requesting
 1324                  * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
 1325                  * Process it, so it can now enter mp_kdp_wait()
 1326                  */
 1327                 handle_pending_TLB_flushes();
 1328 
 1329                 cpu_pause();
 1330         }
 1331         DBG("mp_kdp_enter() %d processors done %s\n",
 1332                 mp_kdb_ncpus, (mp_kdb_ncpus == kdb_ncpus) ? "OK" : "timed out");
 1333 }
 1334 
 1335 static void
 1336 mp_kdb_wait(void)
 1337 {
 1338         DBG("mp_kdb_wait()\n");
 1339 
 1340         /* If an I/O port has been specified as a debugging aid, issue a read */
 1341         panic_io_port_read();
 1342 
 1343         atomic_incl(&mp_kdb_ncpus, 1);
 1344         while (mp_kdb_trap) {
 1345                 /*
 1346                  * a TLB shootdown request may be pending... this would result in the requesting
 1347                  * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
 1348                  * Process it, so it can now enter mp_kdp_wait()
 1349                  */
 1350                 handle_pending_TLB_flushes();
 1351 
 1352                 cpu_pause();
 1353         }
 1354         atomic_decl((volatile long *)&mp_kdb_ncpus, 1);
 1355         DBG("mp_kdb_wait() done\n");
 1356 }
 1357 
 1358 /*
 1359  * Clear kdb interrupt
 1360  */
 1361 
 1362 void
 1363 clear_kdb_intr(void)
 1364 {
 1365         mp_disable_preemption();
 1366         i_bit_clear(MP_KDB, &current_cpu_datap()->cpu_signals);
 1367         mp_enable_preemption();
 1368 }
 1369 
 1370 void
 1371 mp_kdb_exit(void)
 1372 {
 1373         DBG("mp_kdb_exit()\n");
 1374         atomic_decl((volatile long *)&mp_kdb_ncpus, 1);
 1375         mp_kdb_trap = FALSE;
 1376         __asm__ volatile("mfence");
 1377 
 1378         while (mp_kdb_ncpus > 0) {
 1379                 /*
 1380                  * a TLB shootdown request may be pending... this would result in the requesting
 1381                  * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
 1382                  * Process it, so it can now enter mp_kdp_wait()
 1383                  */
 1384                 handle_pending_TLB_flushes();
 1385 
 1386                 cpu_pause();
 1387         }
 1388 
 1389         DBG("mp_kdb_exit() done\n");
 1390 }
 1391 
 1392 #endif /* MACH_KDB */
 1393 
 1394 void
 1395 slave_machine_init(void *param)
 1396 {
 1397         /*
 1398          * Here in process context, but with interrupts disabled.
 1399          */
 1400         DBG("slave_machine_init() CPU%d\n", get_cpu_number());
 1401 
 1402         if (param == FULL_SLAVE_INIT) {
 1403                 /*
 1404                  * Cold start
 1405                  */
 1406                 clock_init();
 1407 
 1408                 cpu_machine_init();     /* Interrupts enabled hereafter */
 1409         }
 1410 }
 1411 
 1412 #undef cpu_number
 1413 int cpu_number(void)
 1414 {
 1415         return get_cpu_number();
 1416 }
 1417 
 1418 #if     MACH_KDB
 1419 #include <ddb/db_output.h>
 1420 
 1421 #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
 1422 
 1423 
 1424 #if     TRAP_DEBUG
 1425 #define MTRAPS 100
 1426 struct mp_trap_hist_struct {
 1427         unsigned char type;
 1428         unsigned char data[5];
 1429 } trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
 1430     *max_trap_hist = &trap_hist[MTRAPS];
 1431 
 1432 void db_trap_hist(void);
 1433 
 1434 /*
 1435  * SPL:
 1436  *      1: new spl
 1437  *      2: old spl
 1438  *      3: new tpr
 1439  *      4: old tpr
 1440  * INT:
 1441  *      1: int vec
 1442  *      2: old spl
 1443  *      3: new spl
 1444  *      4: post eoi tpr
 1445  *      5: exit tpr
 1446  */
 1447 
 1448 void
 1449 db_trap_hist(void)
 1450 {
 1451         int i,j;
 1452         for(i=0;i<MTRAPS;i++)
 1453             if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
 1454                     db_printf("%s%s",
 1455                               (&trap_hist[i]>=cur_trap_hist)?"*":" ",
 1456                               (trap_hist[i].type == 1)?"SPL":"INT");
 1457                     for(j=0;j<5;j++)
 1458                         db_printf(" %02x", trap_hist[i].data[j]);
 1459                     db_printf("\n");
 1460             }
 1461                 
 1462 }
 1463 #endif  /* TRAP_DEBUG */
 1464 #endif  /* MACH_KDB */
 1465 

Cache object: 0fc77de3d8429cc422181e53f4f60c1d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.