The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/i386/mp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */
   25 /*
   26  * @OSF_COPYRIGHT@
   27  */
   28 
   29 #include <cpus.h>
   30 #include <mach_rt.h>
   31 #include <mach_kdb.h>
   32 #include <mach_kdp.h>
   33 #include <mach_ldebug.h>
   34 
   35 #include <i386/mp.h>
   36 #include <i386/mp_events.h>
   37 #include <i386/mp_slave_boot.h>
   38 #include <i386/apic.h>
   39 #include <i386/ipl.h>
   40 #include <i386/fpu.h>
   41 #include <i386/pio.h>
   42 #include <i386/cpuid.h>
   43 #include <i386/proc_reg.h>
   44 #include <i386/machine_cpu.h>
   45 #include <i386/misc_protos.h>
   46 #include <vm/vm_kern.h>
   47 #include <mach/mach_types.h>
   48 #include <mach/kern_return.h>
   49 #include <kern/startup.h>
   50 #include <kern/processor.h>
   51 #include <kern/cpu_number.h>
   52 #include <kern/cpu_data.h>
   53 #include <kern/assert.h>
   54 
   55 #if     MP_DEBUG
   56 #define PAUSE           delay(1000000)
   57 #define DBG(x...)       kprintf(x)
   58 #else
   59 #define DBG(x...)
   60 #define PAUSE
   61 #endif  /* MP_DEBUG */
   62 
   63 /* Initialize lapic_id so cpu_number() works on non SMP systems */
   64 unsigned long   lapic_id_initdata = 0;
   65 unsigned long   lapic_id = (unsigned long)&lapic_id_initdata;
   66 vm_offset_t     lapic_start;
   67 
   68 void            lapic_init(void);
   69 void            slave_boot_init(void);
   70 
   71 static void     mp_kdp_wait(void);
   72 static void     mp_rendezvous_action(void);
   73 
   74 boolean_t       smp_initialized = FALSE;
   75 
   76 decl_simple_lock_data(,mp_kdp_lock);
   77 decl_simple_lock_data(,mp_putc_lock);
   78 
   79 /* Variables needed for MP rendezvous. */
   80 static void             (*mp_rv_setup_func)(void *arg);
   81 static void             (*mp_rv_action_func)(void *arg);
   82 static void             (*mp_rv_teardown_func)(void *arg);
   83 static void             *mp_rv_func_arg;
   84 static int              mp_rv_ncpus;
   85 static volatile long    mp_rv_waiters[2];
   86 decl_simple_lock_data(,mp_rv_lock);
   87 
   88 int             lapic_to_cpu[LAPIC_ID_MAX+1];
   89 int             cpu_to_lapic[NCPUS];
   90 
   91 static void
   92 lapic_cpu_map_init(void)
   93 {
   94         int     i;
   95 
   96         for (i = 0; i < NCPUS; i++)
   97                 cpu_to_lapic[i] = -1;
   98         for (i = 0; i <= LAPIC_ID_MAX; i++)
   99                 lapic_to_cpu[i] = -1;
  100 }
  101 
  102 void
  103 lapic_cpu_map(int apic_id, int cpu_number)
  104 {
  105         cpu_to_lapic[cpu_number] = apic_id;
  106         lapic_to_cpu[apic_id] = cpu_number;
  107 }
  108 
  109 #ifdef MP_DEBUG
  110 static void
  111 lapic_cpu_map_dump(void)
  112 {
  113         int     i;
  114 
  115         for (i = 0; i < NCPUS; i++) {
  116                 if (cpu_to_lapic[i] == -1)
  117                         continue;
  118                 kprintf("cpu_to_lapic[%d]: %d\n",
  119                         i, cpu_to_lapic[i]);
  120         }
  121         for (i = 0; i <= LAPIC_ID_MAX; i++) {
  122                 if (lapic_to_cpu[i] == -1)
  123                         continue;
  124                 kprintf("lapic_to_cpu[%d]: %d\n",
  125                         i, lapic_to_cpu[i]);
  126         }
  127 }
  128 #endif /* MP_DEBUG */
  129 
  130 #define LAPIC_REG(reg) \
  131         (*((volatile int *)(lapic_start + LAPIC_##reg)))
  132 #define LAPIC_REG_OFFSET(reg,off) \
  133         (*((volatile int *)(lapic_start + LAPIC_##reg + (off))))
  134 
  135 
  136 void
  137 smp_init(void)
  138 
  139 {
  140         int             result;
  141         vm_map_entry_t  entry;
  142         uint32_t        lo;
  143         uint32_t        hi;
  144         boolean_t       is_boot_processor;
  145         boolean_t       is_lapic_enabled;
  146 
  147         /* Local APIC? */
  148         if ((cpuid_features() & CPUID_FEATURE_APIC) == 0)
  149                 return;
  150 
  151         simple_lock_init(&mp_kdp_lock, ETAP_MISC_PRINTF);
  152         simple_lock_init(&mp_rv_lock, ETAP_MISC_PRINTF);
  153         simple_lock_init(&mp_putc_lock, ETAP_MISC_PRINTF);
  154 
  155         /* Examine the local APIC state */
  156         rdmsr(MSR_IA32_APIC_BASE, lo, hi);
  157         is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
  158         is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
  159         DBG("MSR_IA32_APIC_BASE 0x%x:0x%x %s %s\n", hi, lo,
  160                 is_lapic_enabled ? "enabled" : "disabled",
  161                 is_boot_processor ? "BSP" : "AP");
  162         assert(is_boot_processor);
  163         assert(is_lapic_enabled);
  164 
  165         /* Establish a map to the local apic */
  166         lapic_start = vm_map_min(kernel_map);
  167         result = vm_map_find_space(kernel_map, &lapic_start,
  168                                    round_page(LAPIC_SIZE), 0, &entry);
  169         if (result != KERN_SUCCESS) {
  170                 printf("smp_init: vm_map_find_entry FAILED (err=%d). "
  171                         "Only supporting ONE cpu.\n", result);
  172                 return;
  173         }
  174         vm_map_unlock(kernel_map);
  175         pmap_enter(pmap_kernel(),
  176                         lapic_start,
  177                         (ppnum_t) i386_btop(i386_trunc_page(LAPIC_START)),
  178                         VM_PROT_READ|VM_PROT_WRITE,
  179                         VM_WIMG_USE_DEFAULT,
  180                         TRUE);
  181         lapic_id = (unsigned long)(lapic_start + LAPIC_ID);
  182 
  183         /* Set up the lapic_id <-> cpu_number map and add this boot processor */
  184         lapic_cpu_map_init();
  185         lapic_cpu_map((LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
  186 
  187         lapic_init();
  188 
  189         slave_boot_init();
  190         master_up();
  191 
  192         smp_initialized = TRUE;
  193 
  194         return;
  195 }
  196 
  197 
  198 int
  199 lapic_esr_read(void)
  200 {
  201         /* write-read register */
  202         LAPIC_REG(ERROR_STATUS) = 0;
  203         return LAPIC_REG(ERROR_STATUS);
  204 }
  205 
  206 void 
  207 lapic_esr_clear(void)
  208 {
  209         LAPIC_REG(ERROR_STATUS) = 0;
  210         LAPIC_REG(ERROR_STATUS) = 0;
  211 }
  212 
  213 static char *DM[8] = {
  214         "Fixed",
  215         "Lowest Priority",
  216         "Invalid",
  217         "Invalid",
  218         "NMI",
  219         "Reset",
  220         "Invalid",
  221         "ExtINT"};
  222 
  223 void
  224 lapic_dump(void)
  225 {
  226         int     i;
  227         char    buf[128];
  228 
  229 #define BOOL(a) ((a)?' ':'!')
  230 
  231         kprintf("LAPIC %d at 0x%x version 0x%x\n", 
  232                 (LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
  233                 lapic_start,
  234                 LAPIC_REG(VERSION)&LAPIC_VERSION_MASK);
  235         kprintf("Priorities: Task 0x%x  Arbitration 0x%x  Processor 0x%x\n",
  236                 LAPIC_REG(TPR)&LAPIC_TPR_MASK,
  237                 LAPIC_REG(APR)&LAPIC_APR_MASK,
  238                 LAPIC_REG(PPR)&LAPIC_PPR_MASK);
  239         kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
  240                 LAPIC_REG(DFR)>>LAPIC_DFR_SHIFT,
  241                 LAPIC_REG(LDR)>>LAPIC_LDR_SHIFT);
  242         kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
  243                 BOOL(LAPIC_REG(SVR)&LAPIC_SVR_ENABLE),
  244                 BOOL(!(LAPIC_REG(SVR)&LAPIC_SVR_FOCUS_OFF)),
  245                 LAPIC_REG(SVR) & LAPIC_SVR_MASK);
  246         kprintf("LVT_TIMER:   Vector 0x%02x %s %cmasked %s\n",
  247                 LAPIC_REG(LVT_TIMER)&LAPIC_LVT_VECTOR_MASK,
  248                 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
  249                 BOOL(LAPIC_REG(LVT_TIMER)&LAPIC_LVT_MASKED),
  250                 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_PERIODIC)?"Periodic":"OneShot");
  251         kprintf("LVT_PERFCNT: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
  252                 LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_VECTOR_MASK,
  253                 DM[(LAPIC_REG(LVT_PERFCNT)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
  254                 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
  255                 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
  256                 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
  257                 BOOL(LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_MASKED));
  258         kprintf("LVT_LINT0:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
  259                 LAPIC_REG(LVT_LINT0)&LAPIC_LVT_VECTOR_MASK,
  260                 DM[(LAPIC_REG(LVT_LINT0)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
  261                 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
  262                 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
  263                 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
  264                 BOOL(LAPIC_REG(LVT_LINT0)&LAPIC_LVT_MASKED));
  265         kprintf("LVT_LINT1:   Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
  266                 LAPIC_REG(LVT_LINT1)&LAPIC_LVT_VECTOR_MASK,
  267                 DM[(LAPIC_REG(LVT_LINT1)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
  268                 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
  269                 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
  270                 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
  271                 BOOL(LAPIC_REG(LVT_LINT1)&LAPIC_LVT_MASKED));
  272         kprintf("LVT_ERROR:   Vector 0x%02x %s %cmasked\n",
  273                 LAPIC_REG(LVT_ERROR)&LAPIC_LVT_VECTOR_MASK,
  274                 (LAPIC_REG(LVT_ERROR)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
  275                 BOOL(LAPIC_REG(LVT_ERROR)&LAPIC_LVT_MASKED));
  276         kprintf("ESR: %08x \n", lapic_esr_read());
  277         kprintf("       ");
  278         for(i=0xf; i>=0; i--)
  279                 kprintf("%x%x%x%x",i,i,i,i);
  280         kprintf("\n");
  281         kprintf("TMR: 0x");
  282         for(i=7; i>=0; i--)
  283                 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE, i*0x10));
  284         kprintf("\n");
  285         kprintf("IRR: 0x");
  286         for(i=7; i>=0; i--)
  287                 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE, i*0x10));
  288         kprintf("\n");
  289         kprintf("ISR: 0x");
  290         for(i=7; i >= 0; i--)
  291                 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE, i*0x10));
  292         kprintf("\n");
  293 }
  294 
  295 void
  296 lapic_init(void)
  297 {
  298         int     value;
  299 
  300         mp_disable_preemption();
  301 
  302         /* Set flat delivery model, logical processor id */
  303         LAPIC_REG(DFR) = LAPIC_DFR_FLAT;
  304         LAPIC_REG(LDR) = (get_cpu_number()) << LAPIC_LDR_SHIFT;
  305 
  306         /* Accept all */
  307         LAPIC_REG(TPR) =  0;
  308 
  309         LAPIC_REG(SVR) = SPURIOUS_INTERRUPT | LAPIC_SVR_ENABLE;
  310 
  311         /* ExtINT */
  312         if (get_cpu_number() == master_cpu) {
  313                 value = LAPIC_REG(LVT_LINT0);
  314                 value |= LAPIC_LVT_DM_EXTINT;
  315                 LAPIC_REG(LVT_LINT0) = value;
  316         }
  317 
  318         lapic_esr_clear();
  319 
  320         LAPIC_REG(LVT_ERROR) = APIC_ERROR_INTERRUPT;
  321 
  322         mp_enable_preemption();
  323 }
  324 
  325 
  326 void
  327 lapic_end_of_interrupt(void)
  328 {
  329         LAPIC_REG(EOI) = 0;
  330 }
  331 
  332 void
  333 lapic_interrupt(int interrupt, void *state)
  334 {
  335 
  336         switch(interrupt) {
  337         case APIC_ERROR_INTERRUPT:
  338                 panic("Local APIC error\n");
  339                 break;
  340         case SPURIOUS_INTERRUPT:
  341                 kprintf("SPIV\n");
  342                 break;
  343         case INTERPROCESS_INTERRUPT:
  344                 cpu_signal_handler((struct i386_interrupt_state *) state);
  345                 break;
  346         }
  347         lapic_end_of_interrupt();
  348 }
  349 
  350 kern_return_t
  351 intel_startCPU(
  352         int     slot_num)
  353 {
  354 
  355         int     i = 1000;
  356         int     lapic_id = cpu_to_lapic[slot_num];
  357 
  358         if (slot_num == get_cpu_number())
  359                 return KERN_SUCCESS;
  360 
  361         assert(lapic_id != -1);
  362 
  363         DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic_id);
  364 
  365         mp_disable_preemption();
  366 
  367         LAPIC_REG(ICRD) = lapic_id << LAPIC_ICRD_DEST_SHIFT;
  368         LAPIC_REG(ICR) = LAPIC_ICR_DM_INIT;
  369         delay(10000);
  370 
  371         LAPIC_REG(ICRD) = lapic_id << LAPIC_ICRD_DEST_SHIFT;
  372         LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
  373         delay(200);
  374 
  375         while(i-- > 0) {
  376                 delay(10000);
  377                 if (machine_slot[slot_num].running)
  378                         break;
  379         }
  380 
  381         mp_enable_preemption();
  382 
  383         if (!machine_slot[slot_num].running) {
  384                 DBG("Failed to start CPU %02d\n", slot_num);
  385                 printf("Failed to start CPU %02d\n", slot_num);
  386                 return KERN_SUCCESS;
  387         } else {
  388                 DBG("Started CPU %02d\n", slot_num);
  389                 printf("Started CPU %02d\n", slot_num);
  390                 return KERN_SUCCESS;
  391         }
  392 }
  393 
  394 void
  395 slave_boot_init(void)
  396 {
  397         extern char     slave_boot_base[];
  398         extern char     slave_boot_end[];
  399         extern void     pstart(void);
  400 
  401         DBG("slave_base=%p slave_end=%p MP_BOOT P=%p V=%p\n",
  402                 slave_boot_base, slave_boot_end, MP_BOOT, phystokv(MP_BOOT));
  403 
  404         /*
  405          * Copy the boot entry code to the real-mode vector area MP_BOOT.
  406          * This is in page 1 which has been reserved for this purpose by
  407          * machine_startup() from the boot processor.
  408          * The slave boot code is responsible for switching to protected
  409          * mode and then jumping to the common startup, pstart().
  410          */
  411         bcopy(slave_boot_base,
  412               (char *)phystokv(MP_BOOT),
  413               slave_boot_end-slave_boot_base);
  414 
  415         /*
  416          * Zero a stack area above the boot code.
  417          */
  418         bzero((char *)(phystokv(MP_BOOTSTACK+MP_BOOT)-0x400), 0x400);
  419 
  420         /*
  421          * Set the location at the base of the stack to point to the
  422          * common startup entry.
  423          */
  424         *((vm_offset_t *) phystokv(MP_MACH_START+MP_BOOT)) =
  425                                                 kvtophys((vm_offset_t)&pstart);
  426         
  427         /* Flush caches */
  428         __asm__("wbinvd");
  429 }
  430 
  431 #if     MP_DEBUG
  432 cpu_signal_event_log_t  cpu_signal[NCPUS] = { 0, 0, 0 };
  433 cpu_signal_event_log_t  cpu_handle[NCPUS] = { 0, 0, 0 };
  434 
  435 MP_EVENT_NAME_DECL();
  436 
  437 void
  438 cpu_signal_dump_last(int cpu)
  439 {
  440         cpu_signal_event_log_t  *logp = &cpu_signal[cpu];
  441         int                     last;
  442         cpu_signal_event_t      *eventp;
  443 
  444         last = (logp->next_entry == 0) ? 
  445                         LOG_NENTRIES - 1 : logp->next_entry - 1;
  446         
  447         eventp = &logp->entry[last];
  448 
  449         kprintf("cpu%d: tsc=%lld cpu_signal(%d,%s)\n",
  450                 cpu, eventp->time, eventp->cpu, mp_event_name[eventp->event]);
  451 }
  452 
  453 void
  454 cpu_handle_dump_last(int cpu)
  455 {
  456         cpu_signal_event_log_t  *logp = &cpu_handle[cpu];
  457         int                     last;
  458         cpu_signal_event_t      *eventp;
  459 
  460         last = (logp->next_entry == 0) ? 
  461                         LOG_NENTRIES - 1 : logp->next_entry - 1;
  462         
  463         eventp = &logp->entry[last];
  464 
  465         kprintf("cpu%d: tsc=%lld cpu_signal_handle%s\n",
  466                 cpu, eventp->time, mp_event_name[eventp->event]);
  467 }
  468 #endif  /* MP_DEBUG */
  469 
  470 void
  471 cpu_signal_handler(struct i386_interrupt_state *regs)
  472 {
  473         register        my_cpu;
  474         volatile int    *my_word;
  475 #if     MACH_KDB && MACH_ASSERT
  476         int             i=100;
  477 #endif  /* MACH_KDB && MACH_ASSERT */
  478 
  479         mp_disable_preemption();
  480 
  481         my_cpu = cpu_number();
  482         my_word = &cpu_data[my_cpu].cpu_signals;
  483 
  484         do {
  485 #if     MACH_KDB && MACH_ASSERT
  486                 if (i-- <= 0)
  487                     Debugger("cpu_signal_handler");
  488 #endif  /* MACH_KDB && MACH_ASSERT */
  489 #if     MACH_KDP
  490                 if (i_bit(MP_KDP, my_word)) {
  491                         DBGLOG(cpu_handle,my_cpu,MP_KDP);
  492                         i_bit_clear(MP_KDP, my_word);
  493                         mp_kdp_wait();
  494                 } else
  495 #endif  /* MACH_KDP */
  496                 if (i_bit(MP_CLOCK, my_word)) {
  497                         DBGLOG(cpu_handle,my_cpu,MP_CLOCK);
  498                         i_bit_clear(MP_CLOCK, my_word);
  499                         hardclock(regs);
  500                 } else if (i_bit(MP_TLB_FLUSH, my_word)) {
  501                         DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
  502                         i_bit_clear(MP_TLB_FLUSH, my_word);
  503                         pmap_update_interrupt();
  504                 } else if (i_bit(MP_AST, my_word)) {
  505                         DBGLOG(cpu_handle,my_cpu,MP_AST);
  506                         i_bit_clear(MP_AST, my_word);
  507                         ast_check(cpu_to_processor(my_cpu));
  508 #if     MACH_KDB
  509                 } else if (i_bit(MP_KDB, my_word)) {
  510                         extern kdb_is_slave[];
  511 
  512                         i_bit_clear(MP_KDB, my_word);
  513                         kdb_is_slave[my_cpu]++;
  514                         kdb_kintr();
  515 #endif  /* MACH_KDB */
  516                 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
  517                         DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
  518                         i_bit_clear(MP_RENDEZVOUS, my_word);
  519                         mp_rendezvous_action();
  520                 }
  521         } while (*my_word);
  522 
  523         mp_enable_preemption();
  524 
  525 }
  526 
  527 void
  528 cpu_interrupt(int cpu)
  529 {
  530         boolean_t       state;
  531 
  532         if (smp_initialized) {
  533 
  534                 /* Wait for previous interrupt to be delivered... */
  535                 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING)
  536                         cpu_pause();
  537 
  538                 state = ml_set_interrupts_enabled(FALSE);
  539                 LAPIC_REG(ICRD) =
  540                         cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
  541                 LAPIC_REG(ICR)  =
  542                         INTERPROCESS_INTERRUPT | LAPIC_ICR_DM_FIXED;
  543                 (void) ml_set_interrupts_enabled(state);
  544         }
  545 
  546 }
  547 
  548 void
  549 slave_clock(void)
  550 {
  551         int     cpu;
  552 
  553         /*
  554          * Clock interrupts are chained from the boot processor
  555          * to the next logical processor that is running and from
  556          * there on to any further running processor etc.
  557          */
  558         mp_disable_preemption();
  559         for (cpu=cpu_number()+1; cpu<NCPUS; cpu++)
  560                 if (machine_slot[cpu].running) {
  561                         i386_signal_cpu(cpu, MP_CLOCK, ASYNC);
  562                         mp_enable_preemption();
  563                         return;
  564                 }
  565         mp_enable_preemption();
  566 
  567 }
  568 
  569 void
  570 i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
  571 {
  572         volatile int    *signals = &cpu_data[cpu].cpu_signals;
  573         uint64_t        timeout;
  574         
  575 
  576         if (!cpu_data[cpu].cpu_status)
  577                 return;
  578 
  579         DBGLOG(cpu_signal, cpu, event);
  580 
  581         i_bit_set(event, signals);
  582         cpu_interrupt(cpu);
  583         if (mode == SYNC) {
  584            again:
  585                 timeout = rdtsc64() + (1000*1000*1000);
  586                 while (i_bit(event, signals) && rdtsc64() < timeout) {
  587                         cpu_pause();
  588                 }
  589                 if (i_bit(event, signals)) {
  590                         DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
  591                                 cpu, event);
  592                         goto again;
  593                 }
  594         }
  595 }
  596 
  597 void
  598 i386_signal_cpus(mp_event_t event, mp_sync_t mode)
  599 {
  600         int     cpu;
  601         int     my_cpu = cpu_number();
  602 
  603         for (cpu = 0; cpu < NCPUS; cpu++) {
  604                 if (cpu == my_cpu || !machine_slot[cpu].running)
  605                         continue;
  606                 i386_signal_cpu(cpu, event, mode);
  607         }
  608 }
  609 
  610 int
  611 i386_active_cpus(void)
  612 {
  613         int     cpu;
  614         int     ncpus = 0;
  615 
  616         for (cpu = 0; cpu < NCPUS; cpu++) {
  617                 if (machine_slot[cpu].running)
  618                         ncpus++;
  619         }
  620         return(ncpus);
  621 }
  622 
  623 /*
  624  * All-CPU rendezvous:
  625  *      - CPUs are signalled,
  626  *      - all execute the setup function (if specified),
  627  *      - rendezvous (i.e. all cpus reach a barrier),
  628  *      - all execute the action function (if specified),
  629  *      - rendezvous again,
  630  *      - execute the teardown function (if specified), and then
  631  *      - resume.
  632  *
  633  * Note that the supplied external functions _must_ be reentrant and aware
  634  * that they are running in parallel and in an unknown lock context.
  635  */
  636 
  637 static void
  638 mp_rendezvous_action(void)
  639 {
  640 
  641         /* setup function */
  642         if (mp_rv_setup_func != NULL)
  643                 mp_rv_setup_func(mp_rv_func_arg);
  644         /* spin on entry rendezvous */
  645         atomic_incl(&mp_rv_waiters[0], 1);
  646         while (mp_rv_waiters[0] < mp_rv_ncpus)
  647                 cpu_pause();
  648         /* action function */
  649         if (mp_rv_action_func != NULL)
  650                 mp_rv_action_func(mp_rv_func_arg);
  651         /* spin on exit rendezvous */
  652         atomic_incl(&mp_rv_waiters[1], 1);
  653         while (mp_rv_waiters[1] < mp_rv_ncpus)
  654                 cpu_pause();
  655         /* teardown function */
  656         if (mp_rv_teardown_func != NULL)
  657                 mp_rv_teardown_func(mp_rv_func_arg);
  658 }
  659 
  660 void
  661 mp_rendezvous(void (*setup_func)(void *), 
  662               void (*action_func)(void *),
  663               void (*teardown_func)(void *),
  664               void *arg)
  665 {
  666 
  667         if (!smp_initialized) {
  668                 if (setup_func != NULL)
  669                         setup_func(arg);
  670                 if (action_func != NULL)
  671                         action_func(arg);
  672                 if (teardown_func != NULL)
  673                         teardown_func(arg);
  674                 return;
  675         }
  676                 
  677         /* obtain rendezvous lock */
  678         simple_lock(&mp_rv_lock);
  679 
  680         /* set static function pointers */
  681         mp_rv_setup_func = setup_func;
  682         mp_rv_action_func = action_func;
  683         mp_rv_teardown_func = teardown_func;
  684         mp_rv_func_arg = arg;
  685 
  686         mp_rv_waiters[0] = 0;           /* entry rendezvous count */
  687         mp_rv_waiters[1] = 0;           /* exit  rendezvous count */
  688         mp_rv_ncpus = i386_active_cpus();
  689 
  690         /*
  691          * signal other processors, which will call mp_rendezvous_action()
  692          * with interrupts disabled
  693          */
  694         i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
  695 
  696         /* call executor function on this cpu */
  697         mp_rendezvous_action();
  698 
  699         /* release lock */
  700         simple_unlock(&mp_rv_lock);
  701 }
  702 
  703 #if     MACH_KDP
  704 volatile boolean_t      mp_kdp_trap = FALSE;
  705 long                    mp_kdp_ncpus;
  706 
  707 void
  708 mp_kdp_enter(void)
  709 {
  710         int             cpu;
  711         int             ncpus;
  712         int             my_cpu = cpu_number();
  713         boolean_t       state;
  714         uint64_t        timeout;
  715 
  716         DBG("mp_kdp_enter()\n");
  717 
  718         /*
  719          * Here to enter the debugger.
  720          * In case of races, only one cpu is allowed to enter kdp after
  721          * stopping others.
  722          */
  723         state = ml_set_interrupts_enabled(FALSE);
  724         simple_lock(&mp_kdp_lock);
  725         while (mp_kdp_trap) {
  726                 simple_unlock(&mp_kdp_lock);
  727                 DBG("mp_kdp_enter() race lost\n");
  728                 mp_kdp_wait();
  729                 simple_lock(&mp_kdp_lock);
  730         }
  731         mp_kdp_ncpus = 1;       /* self */
  732         mp_kdp_trap = TRUE;
  733         simple_unlock(&mp_kdp_lock);
  734         (void) ml_set_interrupts_enabled(state);
  735 
  736         /* Deliver a nudge to other cpus, counting how many */
  737         DBG("mp_kdp_enter() signaling other processors\n");
  738         for (ncpus = 1, cpu = 0; cpu < NCPUS; cpu++) {
  739                 if (cpu == my_cpu || !machine_slot[cpu].running)
  740                         continue;
  741                 ncpus++;
  742                 i386_signal_cpu(cpu, MP_KDP, ASYNC); 
  743         }
  744 
  745         /* Wait other processors to spin. */
  746         DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
  747         timeout = rdtsc64() + (1000*1000*1000);
  748         while (*((volatile long *) &mp_kdp_ncpus) != ncpus
  749                 && rdtsc64() < timeout) {
  750                 cpu_pause();
  751         }
  752         DBG("mp_kdp_enter() %d processors done %s\n",
  753                 mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
  754 }
  755 
  756 static void
  757 mp_kdp_wait(void)
  758 {
  759         DBG("mp_kdp_wait()\n");
  760         atomic_incl(&mp_kdp_ncpus, 1);
  761         while (mp_kdp_trap) {
  762                 cpu_pause();
  763         }
  764         atomic_decl(&mp_kdp_ncpus, 1);
  765         DBG("mp_kdp_wait() done\n");
  766 }
  767 
  768 void
  769 mp_kdp_exit(void)
  770 {
  771         DBG("mp_kdp_exit()\n");
  772         atomic_decl(&mp_kdp_ncpus, 1);
  773         mp_kdp_trap = FALSE;
  774 
  775         /* Wait other processors to stop spinning. XXX needs timeout */
  776         DBG("mp_kdp_exit() waiting for processors to resume\n");
  777         while (*((volatile long *) &mp_kdp_ncpus) > 0) {
  778                 cpu_pause();
  779         }
  780         DBG("mp_kdp_exit() done\n");
  781 }
  782 #endif  /* MACH_KDP */
  783 
  784 void
  785 lapic_test(void)
  786 {
  787         int     cpu = 1;
  788 
  789         lapic_dump();
  790         i_bit_set(0, &cpu_data[cpu].cpu_signals);
  791         cpu_interrupt(1);
  792 }
  793 
  794 /*ARGSUSED*/
  795 void
  796 init_ast_check(
  797         processor_t     processor)
  798 {
  799 }
  800 
  801 void
  802 cause_ast_check(
  803         processor_t     processor)
  804 {
  805         int     cpu = processor->slot_num;
  806 
  807         if (cpu != cpu_number()) {
  808                 i386_signal_cpu(cpu, MP_AST, ASYNC);
  809         }
  810 }
  811 
  812 /*
  813  * invoke kdb on slave processors 
  814  */
  815 
  816 void
  817 remote_kdb(void)
  818 {
  819         int     my_cpu = cpu_number();
  820         int     cpu;
  821         
  822         mp_disable_preemption();
  823         for (cpu = 0; cpu < NCPUS; cpu++) {
  824                 if (cpu == my_cpu || !machine_slot[cpu].running)
  825                         continue;
  826                 i386_signal_cpu(cpu, MP_KDB, SYNC);
  827         }
  828         mp_enable_preemption();
  829 }
  830 
  831 /*
  832  * Clear kdb interrupt
  833  */
  834 
  835 void
  836 clear_kdb_intr(void)
  837 {
  838         mp_disable_preemption();
  839         i_bit_clear(MP_KDB, &cpu_data[cpu_number()].cpu_signals);
  840         mp_enable_preemption();
  841 }
  842 
  843 void
  844 slave_machine_init(void)
  845 {
  846         int     my_cpu;
  847 
  848         /* Ensure that caching and write-through are enabled */
  849         set_cr0(get_cr0() & ~(CR0_NW|CR0_CD));
  850 
  851         mp_disable_preemption();
  852         my_cpu = get_cpu_number();
  853 
  854         DBG("slave_machine_init() CPU%d: phys (%d) active.\n",
  855                 my_cpu, get_cpu_phys_number());
  856 
  857         lapic_init();
  858 
  859         init_fpu();
  860 
  861         cpu_machine_init();
  862 
  863         mp_enable_preemption();
  864 
  865 #ifdef MP_DEBUG
  866         lapic_dump();
  867         lapic_cpu_map_dump();
  868 #endif /* MP_DEBUG */
  869 
  870 }
  871 
  872 #undef cpu_number()
  873 int cpu_number(void)
  874 {
  875         return get_cpu_number();
  876 }
  877 
  878 #if     MACH_KDB
  879 #include <ddb/db_output.h>
  880 
  881 #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
  882 
  883 
  884 #if     TRAP_DEBUG
  885 #define MTRAPS 100
  886 struct mp_trap_hist_struct {
  887         unsigned char type;
  888         unsigned char data[5];
  889 } trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
  890     *max_trap_hist = &trap_hist[MTRAPS];
  891 
  892 void db_trap_hist(void);
  893 
  894 /*
  895  * SPL:
  896  *      1: new spl
  897  *      2: old spl
  898  *      3: new tpr
  899  *      4: old tpr
  900  * INT:
  901  *      1: int vec
  902  *      2: old spl
  903  *      3: new spl
  904  *      4: post eoi tpr
  905  *      5: exit tpr
  906  */
  907 
  908 void
  909 db_trap_hist(void)
  910 {
  911         int i,j;
  912         for(i=0;i<MTRAPS;i++)
  913             if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
  914                     db_printf("%s%s",
  915                               (&trap_hist[i]>=cur_trap_hist)?"*":" ",
  916                               (trap_hist[i].type == 1)?"SPL":"INT");
  917                     for(j=0;j<5;j++)
  918                         db_printf(" %02x", trap_hist[i].data[j]);
  919                     db_printf("\n");
  920             }
  921                 
  922 }
  923 #endif  /* TRAP_DEBUG */
  924 
  925 void db_lapic(int cpu);
  926 unsigned int db_remote_read(int cpu, int reg);
  927 void db_ioapic(unsigned int);
  928 void kdb_console(void);
  929 
  930 void
  931 kdb_console(void)
  932 {
  933 }
  934 
  935 #define BOOLP(a) ((a)?' ':'!')
  936 
  937 static char *DM[8] = {
  938         "Fixed",
  939         "Lowest Priority",
  940         "Invalid",
  941         "Invalid",
  942         "NMI",
  943         "Reset",
  944         "Invalid",
  945         "ExtINT"};
  946 
  947 unsigned int
  948 db_remote_read(int cpu, int reg)
  949 {
  950         return -1;
  951 }
  952 
  953 void
  954 db_lapic(int cpu)
  955 {
  956 }
  957 
  958 void
  959 db_ioapic(unsigned int ind)
  960 {
  961 }
  962 
  963 #endif  /* MACH_KDB */
  964 

Cache object: fb80bf00862b9fb78e72f426ea10504b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.