The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/i386/mp.c

Version: -  FREEBSD  -  FREEBSD11  -  FREEBSD10  -  FREEBSD9  -  FREEBSD92  -  FREEBSD91  -  FREEBSD90  -  FREEBSD8  -  FREEBSD82  -  FREEBSD81  -  FREEBSD80  -  FREEBSD7  -  FREEBSD74  -  FREEBSD73  -  FREEBSD72  -  FREEBSD71  -  FREEBSD70  -  FREEBSD6  -  FREEBSD64  -  FREEBSD63  -  FREEBSD62  -  FREEBSD61  -  FREEBSD60  -  FREEBSD5  -  FREEBSD55  -  FREEBSD54  -  FREEBSD53  -  FREEBSD52  -  FREEBSD51  -  FREEBSD50  -  FREEBSD4  -  FREEBSD3  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * The contents of this file constitute Original Code as defined in and
    7  * are subject to the Apple Public Source License Version 1.1 (the
    8  * "License").  You may not use this file except in compliance with the
    9  * License.  Please obtain a copy of the License at
   10  * http://www.apple.com/publicsource and read it before using this file.
   11  * 
   12  * This Original Code and all software distributed under the License are
   13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
   17  * License for the specific language governing rights and limitations
   18  * under the License.
   19  * 
   20  * @APPLE_LICENSE_HEADER_END@
   21  */
   22 /*
   23  * @OSF_COPYRIGHT@
   24  */
   25 
   26 #include <mach_rt.h>
   27 #include <mach_kdb.h>
   28 #include <mach_kdp.h>
   29 #include <mach_ldebug.h>
   30 #include <gprof.h>
   31 
   32 #include <mach/mach_types.h>
   33 #include <mach/kern_return.h>
   34 
   35 #include <kern/kern_types.h>
   36 #include <kern/startup.h>
   37 #include <kern/processor.h>
   38 #include <kern/cpu_number.h>
   39 #include <kern/cpu_data.h>
   40 #include <kern/assert.h>
   41 #include <kern/machine.h>
   42 
   43 #include <vm/vm_map.h>
   44 #include <vm/vm_kern.h>
   45 
   46 #include <profiling/profile-mk.h>
   47 
   48 #include <i386/mp.h>
   49 #include <i386/mp_events.h>
   50 #include <i386/mp_slave_boot.h>
   51 #include <i386/apic.h>
   52 #include <i386/ipl.h>
   53 #include <i386/fpu.h>
   54 #include <i386/pio.h>
   55 #include <i386/cpuid.h>
   56 #include <i386/proc_reg.h>
   57 #include <i386/machine_cpu.h>
   58 #include <i386/misc_protos.h>
   59 #include <i386/mtrr.h>
   60 #include <i386/postcode.h>
   61 #include <i386/perfmon.h>
   62 #include <i386/cpu_threads.h>
   63 #include <i386/mp_desc.h>
   64 
   65 #if     MP_DEBUG
   66 #define PAUSE           delay(1000000)
   67 #define DBG(x...)       kprintf(x)
   68 #else
   69 #define DBG(x...)
   70 #define PAUSE
   71 #endif  /* MP_DEBUG */
   72 
   73 /*
   74  * By default, use high vectors to leave vector space for systems
   75  * with multiple I/O APIC's. However some systems that boot with
   76  * local APIC disabled will hang in SMM when vectors greater than
   77  * 0x5F are used. Those systems are not expected to have I/O APIC
   78  * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect.
   79  */
   80 #define LAPIC_DEFAULT_INTERRUPT_BASE    0xD0
   81 #define LAPIC_REDUCED_INTERRUPT_BASE    0x50
   82 /*
   83  * Specific lapic interrupts are relative to this base:
   84  */ 
   85 #define LAPIC_PERFCNT_INTERRUPT         0xB
   86 #define LAPIC_TIMER_INTERRUPT           0xC
   87 #define LAPIC_SPURIOUS_INTERRUPT        0xD     
   88 #define LAPIC_INTERPROCESSOR_INTERRUPT  0xE
   89 #define LAPIC_ERROR_INTERRUPT           0xF
   90 
   91 /* Initialize lapic_id so cpu_number() works on non SMP systems */
   92 unsigned long   lapic_id_initdata = 0;
   93 unsigned long   lapic_id = (unsigned long)&lapic_id_initdata;
   94 vm_offset_t     lapic_start;
   95 
   96 static i386_intr_func_t lapic_timer_func;
   97 static i386_intr_func_t lapic_pmi_func;
   98 
   99 /* TRUE if local APIC was enabled by the OS not by the BIOS */
  100 static boolean_t lapic_os_enabled = FALSE;
  101 
  102 /* Base vector for local APIC interrupt sources */
  103 int lapic_interrupt_base = LAPIC_DEFAULT_INTERRUPT_BASE;
  104 
  105 void            slave_boot_init(void);
  106 
  107 static void     mp_kdp_wait(void);
  108 static void     mp_rendezvous_action(void);
  109 
  110 boolean_t       smp_initialized = FALSE;
  111 
  112 decl_simple_lock_data(,mp_kdp_lock);
  113 
  114 decl_mutex_data(static, mp_cpu_boot_lock);
  115 
  116 /* Variables needed for MP rendezvous. */
  117 static void             (*mp_rv_setup_func)(void *arg);
  118 static void             (*mp_rv_action_func)(void *arg);
  119 static void             (*mp_rv_teardown_func)(void *arg);
  120 static void             *mp_rv_func_arg;
  121 static int              mp_rv_ncpus;
  122 static long             mp_rv_waiters[2];
  123 decl_simple_lock_data(,mp_rv_lock);
  124 
  125 int             lapic_to_cpu[MAX_CPUS];
  126 int             cpu_to_lapic[MAX_CPUS];
  127 
  128 static void
  129 lapic_cpu_map_init(void)
  130 {
  131         int     i;
  132 
  133         for (i = 0; i < MAX_CPUS; i++) {
  134                 lapic_to_cpu[i] = -1;
  135                 cpu_to_lapic[i] = -1;
  136         }
  137 }
  138 
  139 void
  140 lapic_cpu_map(int apic_id, int cpu)
  141 {
  142         cpu_to_lapic[cpu] = apic_id;
  143         lapic_to_cpu[apic_id] = cpu;
  144 }
  145 
  146 #ifdef MP_DEBUG
  147 static void
  148 lapic_cpu_map_dump(void)
  149 {
  150         int     i;
  151 
  152         for (i = 0; i < MAX_CPUS; i++) {
  153                 if (cpu_to_lapic[i] == -1)
  154                         continue;
  155                 kprintf("cpu_to_lapic[%d]: %d\n",
  156                         i, cpu_to_lapic[i]);
  157         }
  158         for (i = 0; i < MAX_CPUS; i++) {
  159                 if (lapic_to_cpu[i] == -1)
  160                         continue;
  161                 kprintf("lapic_to_cpu[%d]: %d\n",
  162                         i, lapic_to_cpu[i]);
  163         }
  164 }
  165 #define LAPIC_CPU_MAP_DUMP()    lapic_cpu_map_dump()
  166 #define LAPIC_DUMP()            lapic_dump()
  167 #else
  168 #define LAPIC_CPU_MAP_DUMP()
  169 #define LAPIC_DUMP()
  170 #endif /* MP_DEBUG */
  171 
  172 #define LAPIC_REG(reg) \
  173         (*((volatile int *)(lapic_start + LAPIC_##reg)))
  174 #define LAPIC_REG_OFFSET(reg,off) \
  175         (*((volatile int *)(lapic_start + LAPIC_##reg + (off))))
  176 
  177 #define LAPIC_VECTOR(src) \
  178         (lapic_interrupt_base + LAPIC_##src##_INTERRUPT)
  179 
  180 #define LAPIC_ISR_IS_SET(base,src) \
  181         (LAPIC_REG_OFFSET(ISR_BASE,((base+LAPIC_##src##_INTERRUPT)/32)*0x10) & \
  182                 (1 <<((base + LAPIC_##src##_INTERRUPT)%32)))
  183 
  184 #if GPROF
  185 /*
  186  * Initialize dummy structs for profiling. These aren't used but
  187  * allows hertz_tick() to be built with GPROF defined.
  188  */
  189 struct profile_vars _profile_vars;
  190 struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars };
  191 #define GPROF_INIT()                                                    \
  192 {                                                                       \
  193         int     i;                                                      \
  194                                                                         \
  195         /* Hack to initialize pointers to unused profiling structs */   \
  196         for (i = 1; i < MAX_CPUS; i++)                          \
  197                 _profile_vars_cpus[i] = &_profile_vars;                 \
  198 }
  199 #else
  200 #define GPROF_INIT()
  201 #endif /* GPROF */
  202 
  203 extern void     master_up(void);
  204 
  205 void
  206 smp_init(void)
  207 {
  208         int             result;
  209         vm_map_entry_t  entry;
  210         uint32_t        lo;
  211         uint32_t        hi;
  212         boolean_t       is_boot_processor;
  213         boolean_t       is_lapic_enabled;
  214         vm_offset_t     lapic_base;
  215 
  216         simple_lock_init(&mp_kdp_lock, 0);
  217         simple_lock_init(&mp_rv_lock, 0);
  218         mutex_init(&mp_cpu_boot_lock, 0);
  219         console_init();
  220 
  221         /* Local APIC? */
  222         if (!lapic_probe())
  223                 return;
  224 
  225         /* Examine the local APIC state */
  226         rdmsr(MSR_IA32_APIC_BASE, lo, hi);
  227         is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
  228         is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
  229         lapic_base = (lo &  MSR_IA32_APIC_BASE_BASE);
  230         kprintf("MSR_IA32_APIC_BASE 0x%x %s %s\n", lapic_base,
  231                 is_lapic_enabled ? "enabled" : "disabled",
  232                 is_boot_processor ? "BSP" : "AP");
  233         if (!is_boot_processor || !is_lapic_enabled)
  234                 panic("Unexpected local APIC state\n");
  235 
  236         /* Establish a map to the local apic */
  237         lapic_start = vm_map_min(kernel_map);
  238         result = vm_map_find_space(kernel_map, &lapic_start,
  239                                    round_page(LAPIC_SIZE), 0, &entry);
  240         if (result != KERN_SUCCESS) {
  241                 panic("smp_init: vm_map_find_entry FAILED (err=%d)", result);
  242         }
  243         vm_map_unlock(kernel_map);
  244         pmap_enter(pmap_kernel(),
  245                         lapic_start,
  246                         (ppnum_t) i386_btop(lapic_base),
  247                         VM_PROT_READ|VM_PROT_WRITE,
  248                         VM_WIMG_USE_DEFAULT,
  249                         TRUE);
  250         lapic_id = (unsigned long)(lapic_start + LAPIC_ID);
  251 
  252         if ((LAPIC_REG(VERSION)&LAPIC_VERSION_MASK) != 0x14) {
  253                 printf("Local APIC version not 0x14 as expected\n");
  254         }
  255 
  256         /* Set up the lapic_id <-> cpu_number map and add this boot processor */
  257         lapic_cpu_map_init();
  258         lapic_cpu_map((LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
  259 
  260         lapic_init();
  261 
  262         cpu_thread_init();
  263 
  264         if (pmc_init() != KERN_SUCCESS)
  265                 printf("Performance counters not available\n");
  266 
  267         GPROF_INIT();
  268         DBGLOG_CPU_INIT(master_cpu);
  269 
  270         slave_boot_init();
  271         master_up();
  272 
  273         smp_initialized = TRUE;
  274 
  275         return;
  276 }
  277 
  278 
  279 static int
  280 lapic_esr_read(void)
  281 {
  282         /* write-read register */
  283         LAPIC_REG(ERROR_STATUS) = 0;
  284         return LAPIC_REG(ERROR_STATUS);
  285 }
  286 
  287 static void 
  288 lapic_esr_clear(void)
  289 {
  290         LAPIC_REG(ERROR_STATUS) = 0;
  291         LAPIC_REG(ERROR_STATUS) = 0;
  292 }
  293 
  294 static const char *DM[8] = {
  295         "Fixed",
  296         "Lowest Priority",
  297         "Invalid",
  298         "Invalid",
  299         "NMI",
  300         "Reset",
  301         "Invalid",
  302         "ExtINT"};
  303 
  304 void
  305 lapic_dump(void)
  306 {
  307         int     i;
  308 
  309 #define BOOL(a) ((a)?' ':'!')
  310 
  311         kprintf("LAPIC %d at 0x%x version 0x%x\n", 
  312                 (LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
  313                 lapic_start,
  314                 LAPIC_REG(VERSION)&LAPIC_VERSION_MASK);
  315         kprintf("Priorities: Task 0x%x  Arbitration 0x%x  Processor 0x%x\n",
  316                 LAPIC_REG(TPR)&LAPIC_TPR_MASK,
  317                 LAPIC_REG(APR)&LAPIC_APR_MASK,
  318                 LAPIC_REG(PPR)&LAPIC_PPR_MASK);
  319         kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
  320                 LAPIC_REG(DFR)>>LAPIC_DFR_SHIFT,
  321                 LAPIC_REG(LDR)>>LAPIC_LDR_SHIFT);
  322         kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
  323                 BOOL(LAPIC_REG(SVR)&LAPIC_SVR_ENABLE),
  324                 BOOL(!(LAPIC_REG(SVR)&LAPIC_SVR_FOCUS_OFF)),
  325                 LAPIC_REG(SVR) & LAPIC_SVR_MASK);
  326         kprintf("LVT_TIMER:   Vector 0x%02x %s %cmasked %s\n",
  327                 LAPIC_REG(LVT_TIMER)&LAPIC_LVT_VECTOR_MASK,
  328                 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
  329                 BOOL(LAPIC_REG(LVT_TIMER)&LAPIC_LVT_MASKED),
  330                 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_PERIODIC)?"Periodic":"OneShot");
  331         kprintf("  Initial Count: 0x%08x \n", LAPIC_REG(TIMER_INITIAL_COUNT));
  332         kprintf("  Current Count: 0x%08x \n", LAPIC_REG(TIMER_CURRENT_COUNT));
  333         kprintf("  Divide Config: 0x%08x \n", LAPIC_REG(TIMER_DIVIDE_CONFIG));
  334         kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
  335                 LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_VECTOR_MASK,
  336                 DM[(LAPIC_REG(LVT_PERFCNT)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
  337                 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
  338                 BOOL(LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_MASKED));
  339         kprintf("LVT_LINT0:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
  340                 LAPIC_REG(LVT_LINT0)&LAPIC_LVT_VECTOR_MASK,
  341                 DM[(LAPIC_REG(LVT_LINT0)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
  342                 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
  343                 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
  344                 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
  345                 BOOL(LAPIC_REG(LVT_LINT0)&LAPIC_LVT_MASKED));
  346         kprintf("LVT_LINT1:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
  347                 LAPIC_REG(LVT_LINT1)&LAPIC_LVT_VECTOR_MASK,
  348                 DM[(LAPIC_REG(LVT_LINT1)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
  349                 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
  350                 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
  351                 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
  352                 BOOL(LAPIC_REG(LVT_LINT1)&LAPIC_LVT_MASKED));
  353         kprintf("LVT_ERROR:   Vector 0x%02x %s %cmasked\n",
  354                 LAPIC_REG(LVT_ERROR)&LAPIC_LVT_VECTOR_MASK,
  355                 (LAPIC_REG(LVT_ERROR)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
  356                 BOOL(LAPIC_REG(LVT_ERROR)&LAPIC_LVT_MASKED));
  357         kprintf("ESR: %08x \n", lapic_esr_read());
  358         kprintf("       ");
  359         for(i=0xf; i>=0; i--)
  360                 kprintf("%x%x%x%x",i,i,i,i);
  361         kprintf("\n");
  362         kprintf("TMR: 0x");
  363         for(i=7; i>=0; i--)
  364                 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE, i*0x10));
  365         kprintf("\n");
  366         kprintf("IRR: 0x");
  367         for(i=7; i>=0; i--)
  368                 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE, i*0x10));
  369         kprintf("\n");
  370         kprintf("ISR: 0x");
  371         for(i=7; i >= 0; i--)
  372                 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE, i*0x10));
  373         kprintf("\n");
  374 }
  375 
  376 boolean_t
  377 lapic_probe(void)
  378 {
  379         uint32_t        lo;
  380         uint32_t        hi;
  381 
  382         if (cpuid_features() & CPUID_FEATURE_APIC)
  383                 return TRUE;
  384 
  385         if (cpuid_family() == 6 || cpuid_family() == 15) {
  386                 /*
  387                  * Mobile Pentiums:
  388                  * There may be a local APIC which wasn't enabled by BIOS.
  389                  * So we try to enable it explicitly.
  390                  */
  391                 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
  392                 lo &= ~MSR_IA32_APIC_BASE_BASE;
  393                 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
  394                 lo |= MSR_IA32_APIC_BASE_ENABLE;
  395                 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
  396 
  397                 /*
  398                  * Re-initialize cpu features info and re-check.
  399                  */
  400                 set_cpu_model();
  401                 if (cpuid_features() & CPUID_FEATURE_APIC) {
  402                         printf("Local APIC discovered and enabled\n");
  403                         lapic_os_enabled = TRUE;
  404                         lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
  405                         return TRUE;
  406                 }
  407         }
  408 
  409         return FALSE;
  410 }
  411 
  412 void
  413 lapic_shutdown(void)
  414 {
  415         uint32_t lo;
  416         uint32_t hi;
  417         uint32_t value;
  418 
  419         /* Shutdown if local APIC was enabled by OS */
  420         if (lapic_os_enabled == FALSE)
  421                 return;
  422 
  423         mp_disable_preemption();
  424 
  425         /* ExtINT: masked */
  426         if (get_cpu_number() == master_cpu) {
  427                 value = LAPIC_REG(LVT_LINT0);
  428                 value |= LAPIC_LVT_MASKED;
  429                 LAPIC_REG(LVT_LINT0) = value;
  430         }
  431 
  432         /* Timer: masked */
  433         LAPIC_REG(LVT_TIMER) |= LAPIC_LVT_MASKED;
  434 
  435         /* Perfmon: masked */
  436         LAPIC_REG(LVT_PERFCNT) |= LAPIC_LVT_MASKED;
  437 
  438         /* Error: masked */
  439         LAPIC_REG(LVT_ERROR) |= LAPIC_LVT_MASKED;
  440 
  441         /* APIC software disabled */
  442         LAPIC_REG(SVR) &= ~LAPIC_SVR_ENABLE;
  443 
  444         /* Bypass the APIC completely and update cpu features */
  445         rdmsr(MSR_IA32_APIC_BASE, lo, hi);
  446         lo &= ~MSR_IA32_APIC_BASE_ENABLE;
  447         wrmsr(MSR_IA32_APIC_BASE, lo, hi);
  448         set_cpu_model();
  449 
  450         mp_enable_preemption();
  451 }
  452 
  453 void
  454 lapic_init(void)
  455 {
  456         int     value;
  457 
  458         /* Set flat delivery model, logical processor id */
  459         LAPIC_REG(DFR) = LAPIC_DFR_FLAT;
  460         LAPIC_REG(LDR) = (get_cpu_number()) << LAPIC_LDR_SHIFT;
  461 
  462         /* Accept all */
  463         LAPIC_REG(TPR) =  0;
  464 
  465         LAPIC_REG(SVR) = LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE;
  466 
  467         /* ExtINT */
  468         if (get_cpu_number() == master_cpu) {
  469                 value = LAPIC_REG(LVT_LINT0);
  470                 value &= ~LAPIC_LVT_MASKED;
  471                 value |= LAPIC_LVT_DM_EXTINT;
  472                 LAPIC_REG(LVT_LINT0) = value;
  473         }
  474 
  475         /* Timer: unmasked, one-shot */
  476         LAPIC_REG(LVT_TIMER) = LAPIC_VECTOR(TIMER);
  477 
  478         /* Perfmon: unmasked */
  479         LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
  480 
  481         lapic_esr_clear();
  482 
  483         LAPIC_REG(LVT_ERROR) = LAPIC_VECTOR(ERROR);
  484 
  485 }
  486 
  487 void
  488 lapic_set_timer_func(i386_intr_func_t func)
  489 {
  490         lapic_timer_func = func;
  491 }
  492 
  493 void
  494 lapic_set_timer(
  495         boolean_t               interrupt,
  496         lapic_timer_mode_t      mode,
  497         lapic_timer_divide_t    divisor,
  498         lapic_timer_count_t     initial_count)
  499 {
  500         boolean_t       state;
  501         uint32_t        timer_vector;
  502 
  503         state = ml_set_interrupts_enabled(FALSE);
  504         timer_vector = LAPIC_REG(LVT_TIMER);
  505         timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
  506         timer_vector |= interrupt ? 0 : LAPIC_LVT_MASKED;
  507         timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
  508         LAPIC_REG(LVT_TIMER) = timer_vector;
  509         LAPIC_REG(TIMER_DIVIDE_CONFIG) = divisor;
  510         LAPIC_REG(TIMER_INITIAL_COUNT) = initial_count;
  511         ml_set_interrupts_enabled(state);
  512 }
  513 
  514 void
  515 lapic_get_timer(
  516         lapic_timer_mode_t      *mode,
  517         lapic_timer_divide_t    *divisor,
  518         lapic_timer_count_t     *initial_count,
  519         lapic_timer_count_t     *current_count)
  520 {
  521         boolean_t       state;
  522 
  523         state = ml_set_interrupts_enabled(FALSE);
  524         if (mode)
  525                 *mode = (LAPIC_REG(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
  526                                 periodic : one_shot;
  527         if (divisor)
  528                 *divisor = LAPIC_REG(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
  529         if (initial_count)
  530                 *initial_count = LAPIC_REG(TIMER_INITIAL_COUNT);
  531         if (current_count)
  532                 *current_count = LAPIC_REG(TIMER_CURRENT_COUNT);
  533         ml_set_interrupts_enabled(state);
  534 } 
  535 
  536 void
  537 lapic_set_pmi_func(i386_intr_func_t func)
  538 {
  539         lapic_pmi_func = func;
  540 }
  541 
  542 static inline void
  543 _lapic_end_of_interrupt(void)
  544 {
  545         LAPIC_REG(EOI) = 0;
  546 }
  547 
  548 void
  549 lapic_end_of_interrupt(void)
  550 {
  551         _lapic_end_of_interrupt();
  552 }
  553 
  554 int
  555 lapic_interrupt(int interrupt, void *state)
  556 {
  557         interrupt -= lapic_interrupt_base;
  558         if (interrupt < 0)
  559                 return 0;
  560 
  561         switch(interrupt) {
  562         case LAPIC_PERFCNT_INTERRUPT:
  563                 if (lapic_pmi_func != NULL)
  564                         (*lapic_pmi_func)(
  565                                 (struct i386_interrupt_state *) state);
  566                 /* Clear interrupt masked */
  567                 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
  568                 _lapic_end_of_interrupt();
  569                 return 1;
  570         case LAPIC_TIMER_INTERRUPT:
  571                 _lapic_end_of_interrupt();
  572                 if (lapic_timer_func != NULL)
  573                         (*lapic_timer_func)(
  574                                 (struct i386_interrupt_state *) state);
  575                 return 1;
  576         case LAPIC_ERROR_INTERRUPT:
  577                 lapic_dump();
  578                 panic("Local APIC error\n");
  579                 _lapic_end_of_interrupt();
  580                 return 1;
  581         case LAPIC_SPURIOUS_INTERRUPT:
  582                 kprintf("SPIV\n");
  583                 /* No EOI required here */
  584                 return 1;
  585         case LAPIC_INTERPROCESSOR_INTERRUPT:
  586                 cpu_signal_handler((struct i386_interrupt_state *) state);
  587                 _lapic_end_of_interrupt();
  588                 return 1;
  589         }
  590         return 0;
  591 }
  592 
  593 void
  594 lapic_smm_restore(void)
  595 {
  596         boolean_t state;
  597 
  598         if (lapic_os_enabled == FALSE)
  599                 return;
  600 
  601         state = ml_set_interrupts_enabled(FALSE);
  602 
  603         if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
  604                 /*
  605                  * Bogus SMI handler enables interrupts but does not know about
  606                  * local APIC interrupt sources. When APIC timer counts down to
  607                  * zero while in SMM, local APIC will end up waiting for an EOI
  608                  * but no interrupt was delivered to the OS.
  609                  */
  610                 _lapic_end_of_interrupt();
  611 
  612                 /*
  613                  * timer is one-shot, trigger another quick countdown to trigger
  614                  * another timer interrupt.
  615                  */
  616                 if (LAPIC_REG(TIMER_CURRENT_COUNT) == 0) {
  617                         LAPIC_REG(TIMER_INITIAL_COUNT) = 1;
  618                 }
  619 
  620                 kprintf("lapic_smm_restore\n");
  621         }
  622 
  623         ml_set_interrupts_enabled(state);
  624 }
  625 
  626 kern_return_t
  627 intel_startCPU(
  628         int     slot_num)
  629 {
  630 
  631         int     i = 1000;
  632         int     lapic = cpu_to_lapic[slot_num];
  633 
  634         assert(lapic != -1);
  635 
  636         DBGLOG_CPU_INIT(slot_num);
  637 
  638         DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic);
  639         DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) IdlePTD);
  640 
  641         /* Initialize (or re-initialize) the descriptor tables for this cpu. */
  642         mp_desc_init(cpu_datap(slot_num), FALSE);
  643 
  644         /* Serialize use of the slave boot stack. */
  645         mutex_lock(&mp_cpu_boot_lock);
  646 
  647         mp_disable_preemption();
  648         if (slot_num == get_cpu_number()) {
  649                 mp_enable_preemption();
  650                 mutex_unlock(&mp_cpu_boot_lock);
  651                 return KERN_SUCCESS;
  652         }
  653 
  654         LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
  655         LAPIC_REG(ICR) = LAPIC_ICR_DM_INIT;
  656         delay(10000);
  657 
  658         LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
  659         LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
  660         delay(200);
  661 
  662         LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
  663         LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
  664         delay(200);
  665 
  666 #ifdef  POSTCODE_DELAY
  667         /* Wait much longer if postcodes are displayed for a delay period. */
  668         i *= 10000;
  669 #endif
  670         while(i-- > 0) {
  671                 if (cpu_datap(slot_num)->cpu_running)
  672                         break;
  673                 delay(10000);
  674         }
  675 
  676         mp_enable_preemption();
  677         mutex_unlock(&mp_cpu_boot_lock);
  678 
  679         if (!cpu_datap(slot_num)->cpu_running) {
  680                 DBG("Failed to start CPU %02d\n", slot_num);
  681                 printf("Failed to start CPU %02d, rebooting...\n", slot_num);
  682                 delay(1000000);
  683                 cpu_shutdown();
  684                 return KERN_SUCCESS;
  685         } else {
  686                 DBG("Started CPU %02d\n", slot_num);
  687                 printf("Started CPU %02d\n", slot_num);
  688                 return KERN_SUCCESS;
  689         }
  690 }
  691 
  692 extern char     slave_boot_base[];
  693 extern char     slave_boot_end[];
  694 extern void     pstart(void);
  695 
  696 void
  697 slave_boot_init(void)
  698 {
  699         DBG("V(slave_boot_base)=%p P(slave_boot_base)=%p MP_BOOT=%p sz=0x%x\n",
  700                 slave_boot_base,
  701                 kvtophys((vm_offset_t) slave_boot_base),
  702                 MP_BOOT,
  703                 slave_boot_end-slave_boot_base);
  704 
  705         /*
  706          * Copy the boot entry code to the real-mode vector area MP_BOOT.
  707          * This is in page 1 which has been reserved for this purpose by
  708          * machine_startup() from the boot processor.
  709          * The slave boot code is responsible for switching to protected
  710          * mode and then jumping to the common startup, _start().
  711          */
  712         bcopy_phys((addr64_t) kvtophys((vm_offset_t) slave_boot_base),
  713                    (addr64_t) MP_BOOT,
  714                    slave_boot_end-slave_boot_base);
  715 
  716         /*
  717          * Zero a stack area above the boot code.
  718          */
  719         DBG("bzero_phys 0x%x sz 0x%x\n",MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
  720         bzero_phys((addr64_t)MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
  721 
  722         /*
  723          * Set the location at the base of the stack to point to the
  724          * common startup entry.
  725          */
  726         DBG("writing 0x%x at phys 0x%x\n",
  727                 kvtophys((vm_offset_t) &pstart), MP_MACH_START+MP_BOOT);
  728         ml_phys_write_word(MP_MACH_START+MP_BOOT,
  729                            kvtophys((vm_offset_t) &pstart));
  730         
  731         /* Flush caches */
  732         __asm__("wbinvd");
  733 }
  734 
  735 #if     MP_DEBUG
  736 cpu_signal_event_log_t  *cpu_signal[MAX_CPUS];
  737 cpu_signal_event_log_t  *cpu_handle[MAX_CPUS];
  738 
  739 MP_EVENT_NAME_DECL();
  740 
  741 #endif  /* MP_DEBUG */
  742 
  743 void
  744 cpu_signal_handler(__unused struct i386_interrupt_state *regs)
  745 {
  746         int             my_cpu;
  747         volatile int    *my_word;
  748 #if     MACH_KDB && MACH_ASSERT
  749         int             i=100;
  750 #endif  /* MACH_KDB && MACH_ASSERT */
  751 
  752         mp_disable_preemption();
  753 
  754         my_cpu = cpu_number();
  755         my_word = &current_cpu_datap()->cpu_signals;
  756 
  757         do {
  758 #if     MACH_KDB && MACH_ASSERT
  759                 if (i-- <= 0)
  760                     Debugger("cpu_signal_handler");
  761 #endif  /* MACH_KDB && MACH_ASSERT */
  762 #if     MACH_KDP
  763                 if (i_bit(MP_KDP, my_word)) {
  764                         DBGLOG(cpu_handle,my_cpu,MP_KDP);
  765                         i_bit_clear(MP_KDP, my_word);
  766                         mp_kdp_wait();
  767                 } else
  768 #endif  /* MACH_KDP */
  769                 if (i_bit(MP_TLB_FLUSH, my_word)) {
  770                         DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
  771                         i_bit_clear(MP_TLB_FLUSH, my_word);
  772                         pmap_update_interrupt();
  773                 } else if (i_bit(MP_AST, my_word)) {
  774                         DBGLOG(cpu_handle,my_cpu,MP_AST);
  775                         i_bit_clear(MP_AST, my_word);
  776                         ast_check(cpu_to_processor(my_cpu));
  777 #if     MACH_KDB
  778                 } else if (i_bit(MP_KDB, my_word)) {
  779                         extern kdb_is_slave[];
  780 
  781                         i_bit_clear(MP_KDB, my_word);
  782                         kdb_is_slave[my_cpu]++;
  783                         kdb_kintr();
  784 #endif  /* MACH_KDB */
  785                 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
  786                         DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
  787                         i_bit_clear(MP_RENDEZVOUS, my_word);
  788                         mp_rendezvous_action();
  789                 }
  790         } while (*my_word);
  791 
  792         mp_enable_preemption();
  793 
  794 }
  795 
  796 #ifdef  MP_DEBUG
  797 extern int      max_lock_loops;
  798 #endif  /* MP_DEBUG */
  799 void
  800 cpu_interrupt(int cpu)
  801 {
  802         boolean_t       state;
  803 
  804         if (smp_initialized) {
  805 
  806                 /* Wait for previous interrupt to be delivered... */
  807 #ifdef  MP_DEBUG
  808                 int     pending_busy_count = 0;
  809                 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
  810                         if (++pending_busy_count > max_lock_loops)
  811                                 panic("cpus_interrupt() deadlock\n");
  812 #else
  813                 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
  814 #endif  /* MP_DEBUG */
  815                         cpu_pause();
  816                 }
  817 
  818                 state = ml_set_interrupts_enabled(FALSE);
  819                 LAPIC_REG(ICRD) =
  820                         cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
  821                 LAPIC_REG(ICR)  =
  822                         LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_FIXED;
  823                 (void) ml_set_interrupts_enabled(state);
  824         }
  825 
  826 }
  827 
  828 void
  829 i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
  830 {
  831         volatile int    *signals = &cpu_datap(cpu)->cpu_signals;
  832         uint64_t        tsc_timeout;
  833         
  834 
  835         if (!cpu_datap(cpu)->cpu_running)
  836                 return;
  837 
  838         DBGLOG(cpu_signal, cpu, event);
  839 
  840         i_bit_set(event, signals);
  841         cpu_interrupt(cpu);
  842         if (mode == SYNC) {
  843            again:
  844                 tsc_timeout = rdtsc64() + (1000*1000*1000);
  845                 while (i_bit(event, signals) && rdtsc64() < tsc_timeout) {
  846                         cpu_pause();
  847                 }
  848                 if (i_bit(event, signals)) {
  849                         DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
  850                                 cpu, event);
  851                         goto again;
  852                 }
  853         }
  854 }
  855 
  856 void
  857 i386_signal_cpus(mp_event_t event, mp_sync_t mode)
  858 {
  859         unsigned int    cpu;
  860         unsigned int    my_cpu = cpu_number();
  861 
  862         for (cpu = 0; cpu < real_ncpus; cpu++) {
  863                 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
  864                         continue;
  865                 i386_signal_cpu(cpu, event, mode);
  866         }
  867 }
  868 
  869 int
  870 i386_active_cpus(void)
  871 {
  872         unsigned int    cpu;
  873         unsigned int    ncpus = 0;
  874 
  875         for (cpu = 0; cpu < real_ncpus; cpu++) {
  876                 if (cpu_datap(cpu)->cpu_running)
  877                         ncpus++;
  878         }
  879         return(ncpus);
  880 }
  881 
  882 /*
  883  * All-CPU rendezvous:
  884  *      - CPUs are signalled,
  885  *      - all execute the setup function (if specified),
  886  *      - rendezvous (i.e. all cpus reach a barrier),
  887  *      - all execute the action function (if specified),
  888  *      - rendezvous again,
  889  *      - execute the teardown function (if specified), and then
  890  *      - resume.
  891  *
  892  * Note that the supplied external functions _must_ be reentrant and aware
  893  * that they are running in parallel and in an unknown lock context.
  894  */
  895 
  896 static void
  897 mp_rendezvous_action(void)
  898 {
  899 
  900         /* setup function */
  901         if (mp_rv_setup_func != NULL)
  902                 mp_rv_setup_func(mp_rv_func_arg);
  903         /* spin on entry rendezvous */
  904         atomic_incl(&mp_rv_waiters[0], 1);
  905         while (*((volatile long *) &mp_rv_waiters[0]) < mp_rv_ncpus)
  906                 cpu_pause();
  907         /* action function */
  908         if (mp_rv_action_func != NULL)
  909                 mp_rv_action_func(mp_rv_func_arg);
  910         /* spin on exit rendezvous */
  911         atomic_incl(&mp_rv_waiters[1], 1);
  912         while (*((volatile long *) &mp_rv_waiters[1]) < mp_rv_ncpus)
  913                 cpu_pause();
  914         /* teardown function */
  915         if (mp_rv_teardown_func != NULL)
  916                 mp_rv_teardown_func(mp_rv_func_arg);
  917 }
  918 
  919 void
  920 mp_rendezvous(void (*setup_func)(void *), 
  921               void (*action_func)(void *),
  922               void (*teardown_func)(void *),
  923               void *arg)
  924 {
  925 
  926         if (!smp_initialized) {
  927                 if (setup_func != NULL)
  928                         setup_func(arg);
  929                 if (action_func != NULL)
  930                         action_func(arg);
  931                 if (teardown_func != NULL)
  932                         teardown_func(arg);
  933                 return;
  934         }
  935                 
  936         /* obtain rendezvous lock */
  937         simple_lock(&mp_rv_lock);
  938 
  939         /* set static function pointers */
  940         mp_rv_setup_func = setup_func;
  941         mp_rv_action_func = action_func;
  942         mp_rv_teardown_func = teardown_func;
  943         mp_rv_func_arg = arg;
  944 
  945         mp_rv_waiters[0] = 0;           /* entry rendezvous count */
  946         mp_rv_waiters[1] = 0;           /* exit  rendezvous count */
  947         mp_rv_ncpus = i386_active_cpus();
  948 
  949         /*
  950          * signal other processors, which will call mp_rendezvous_action()
  951          * with interrupts disabled
  952          */
  953         i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
  954 
  955         /* call executor function on this cpu */
  956         mp_rendezvous_action();
  957 
  958         /* release lock */
  959         simple_unlock(&mp_rv_lock);
  960 }
  961 
  962 #if     MACH_KDP
  963 volatile boolean_t      mp_kdp_trap = FALSE;
  964 long                    mp_kdp_ncpus;
  965 boolean_t               mp_kdp_state;
  966 
  967 
  968 void
  969 mp_kdp_enter(void)
  970 {
  971         unsigned int    cpu;
  972         unsigned int    ncpus;
  973         unsigned int    my_cpu = cpu_number();
  974         uint64_t        tsc_timeout;
  975 
  976         DBG("mp_kdp_enter()\n");
  977 
  978         /*
  979          * Here to enter the debugger.
  980          * In case of races, only one cpu is allowed to enter kdp after
  981          * stopping others.
  982          */
  983         mp_kdp_state = ml_set_interrupts_enabled(FALSE);
  984         simple_lock(&mp_kdp_lock);
  985         while (mp_kdp_trap) {
  986                 simple_unlock(&mp_kdp_lock);
  987                 DBG("mp_kdp_enter() race lost\n");
  988                 mp_kdp_wait();
  989                 simple_lock(&mp_kdp_lock);
  990         }
  991         mp_kdp_ncpus = 1;       /* self */
  992         mp_kdp_trap = TRUE;
  993         simple_unlock(&mp_kdp_lock);
  994 
  995         /* Deliver a nudge to other cpus, counting how many */
  996         DBG("mp_kdp_enter() signaling other processors\n");
  997         for (ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
  998                 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
  999                         continue;
 1000                 ncpus++;
 1001                 i386_signal_cpu(cpu, MP_KDP, ASYNC); 
 1002         }
 1003 
 1004         /* Wait other processors to spin. */
 1005         DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
 1006         tsc_timeout = rdtsc64() + (1000*1000*1000);
 1007         while (*((volatile unsigned int *) &mp_kdp_ncpus) != ncpus
 1008                 && rdtsc64() < tsc_timeout) {
 1009                 cpu_pause();
 1010         }
 1011         DBG("mp_kdp_enter() %d processors done %s\n",
 1012                 mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
 1013         postcode(MP_KDP_ENTER);
 1014 }
 1015 
 1016 static void
 1017 mp_kdp_wait(void)
 1018 {
 1019         boolean_t       state;
 1020 
 1021         state = ml_set_interrupts_enabled(TRUE);
 1022         DBG("mp_kdp_wait()\n");
 1023         atomic_incl(&mp_kdp_ncpus, 1);
 1024         while (mp_kdp_trap) {
 1025                 cpu_pause();
 1026         }
 1027         atomic_decl(&mp_kdp_ncpus, 1);
 1028         DBG("mp_kdp_wait() done\n");
 1029         (void) ml_set_interrupts_enabled(state);
 1030 }
 1031 
 1032 void
 1033 mp_kdp_exit(void)
 1034 {
 1035         DBG("mp_kdp_exit()\n");
 1036         atomic_decl(&mp_kdp_ncpus, 1);
 1037         mp_kdp_trap = FALSE;
 1038 
 1039         /* Wait other processors to stop spinning. XXX needs timeout */
 1040         DBG("mp_kdp_exit() waiting for processors to resume\n");
 1041         while (*((volatile long *) &mp_kdp_ncpus) > 0) {
 1042                 cpu_pause();
 1043         }
 1044         DBG("mp_kdp_exit() done\n");
 1045         (void) ml_set_interrupts_enabled(mp_kdp_state);
 1046         postcode(0);
 1047 }
 1048 #endif  /* MACH_KDP */
 1049 
 1050 /*ARGSUSED*/
 1051 void
 1052 init_ast_check(
 1053         __unused processor_t    processor)
 1054 {
 1055 }
 1056 
 1057 void
 1058 cause_ast_check(
 1059         processor_t     processor)
 1060 {
 1061         int     cpu = PROCESSOR_DATA(processor, slot_num);
 1062 
 1063         if (cpu != cpu_number()) {
 1064                 i386_signal_cpu(cpu, MP_AST, ASYNC);
 1065         }
 1066 }
 1067 
 1068 /*
 1069  * invoke kdb on slave processors 
 1070  */
 1071 
 1072 void
 1073 remote_kdb(void)
 1074 {
 1075         unsigned int    my_cpu = cpu_number();
 1076         unsigned int    cpu;
 1077         
 1078         mp_disable_preemption();
 1079         for (cpu = 0; cpu < real_ncpus; cpu++) {
 1080                 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
 1081                         continue;
 1082                 i386_signal_cpu(cpu, MP_KDB, SYNC);
 1083         }
 1084         mp_enable_preemption();
 1085 }
 1086 
 1087 /*
 1088  * Clear kdb interrupt
 1089  */
 1090 
 1091 void
 1092 clear_kdb_intr(void)
 1093 {
 1094         mp_disable_preemption();
 1095         i_bit_clear(MP_KDB, &current_cpu_datap()->cpu_signals);
 1096         mp_enable_preemption();
 1097 }
 1098 
 1099 /*
 1100  * i386_init_slave() is called from pstart.
 1101  * We're in the cpu's interrupt stack with interrupts disabled.
 1102  */
 1103 void
 1104 i386_init_slave(void)
 1105 {
 1106         postcode(I386_INIT_SLAVE);
 1107 
 1108         /* Ensure that caching and write-through are enabled */
 1109         set_cr0(get_cr0() & ~(CR0_NW|CR0_CD));
 1110 
 1111         DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
 1112                 get_cpu_number(), get_cpu_phys_number());
 1113 
 1114         lapic_init();
 1115 
 1116         LAPIC_DUMP();
 1117         LAPIC_CPU_MAP_DUMP();
 1118 
 1119         mtrr_update_cpu();
 1120 
 1121         pat_init();
 1122 
 1123         cpu_init();
 1124 
 1125         slave_main();
 1126 
 1127         panic("i386_init_slave() returned from slave_main()");
 1128 }
 1129 
 1130 void
 1131 slave_machine_init(void)
 1132 {
 1133         /*
 1134          * Here in process context.
 1135          */
 1136         DBG("slave_machine_init() CPU%d\n", get_cpu_number());
 1137 
 1138         init_fpu();
 1139 
 1140         cpu_thread_init();
 1141 
 1142         pmc_init();
 1143 
 1144         cpu_machine_init();
 1145 
 1146         clock_init();
 1147 }
 1148 
 1149 #undef cpu_number()
 1150 int cpu_number(void)
 1151 {
 1152         return get_cpu_number();
 1153 }
 1154 
 1155 #if     MACH_KDB
 1156 #include <ddb/db_output.h>
 1157 
 1158 #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
 1159 
 1160 
 1161 #if     TRAP_DEBUG
 1162 #define MTRAPS 100
 1163 struct mp_trap_hist_struct {
 1164         unsigned char type;
 1165         unsigned char data[5];
 1166 } trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
 1167     *max_trap_hist = &trap_hist[MTRAPS];
 1168 
 1169 void db_trap_hist(void);
 1170 
 1171 /*
 1172  * SPL:
 1173  *      1: new spl
 1174  *      2: old spl
 1175  *      3: new tpr
 1176  *      4: old tpr
 1177  * INT:
 1178  *      1: int vec
 1179  *      2: old spl
 1180  *      3: new spl
 1181  *      4: post eoi tpr
 1182  *      5: exit tpr
 1183  */
 1184 
 1185 void
 1186 db_trap_hist(void)
 1187 {
 1188         int i,j;
 1189         for(i=0;i<MTRAPS;i++)
 1190             if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
 1191                     db_printf("%s%s",
 1192                               (&trap_hist[i]>=cur_trap_hist)?"*":" ",
 1193                               (trap_hist[i].type == 1)?"SPL":"INT");
 1194                     for(j=0;j<5;j++)
 1195                         db_printf(" %02x", trap_hist[i].data[j]);
 1196                     db_printf("\n");
 1197             }
 1198                 
 1199 }
 1200 #endif  /* TRAP_DEBUG */
 1201 
 1202 void db_lapic(int cpu);
 1203 unsigned int db_remote_read(int cpu, int reg);
 1204 void db_ioapic(unsigned int);
 1205 void kdb_console(void);
 1206 
 1207 void
 1208 kdb_console(void)
 1209 {
 1210 }
 1211 
 1212 #define BOOLP(a) ((a)?' ':'!')
 1213 
 1214 static char *DM[8] = {
 1215         "Fixed",
 1216         "Lowest Priority",
 1217         "Invalid",
 1218         "Invalid",
 1219         "NMI",
 1220         "Reset",
 1221         "Invalid",
 1222         "ExtINT"};
 1223 
 1224 unsigned int
 1225 db_remote_read(int cpu, int reg)
 1226 {
 1227         return -1;
 1228 }
 1229 
 1230 void
 1231 db_lapic(int cpu)
 1232 {
 1233 }
 1234 
 1235 void
 1236 db_ioapic(unsigned int ind)
 1237 {
 1238 }
 1239 
 1240 #endif  /* MACH_KDB */
 1241 

Cache object: bc593401b06506c2390d86576a7e53f3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.