The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/mp_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1996, by Steve Passe
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. The name of the developer may NOT be used to endorse or promote products
   11  *    derived from this software without specific prior written permission.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   23  * SUCH DAMAGE.
   24  */
   25 
   26 #include <sys/cdefs.h>
   27 __FBSDID("$FreeBSD$");
   28 
   29 #include "opt_apic.h"
   30 #include "opt_cpu.h"
   31 #include "opt_kdb.h"
   32 #include "opt_kstack_pages.h"
   33 #include "opt_mp_watchdog.h"
   34 #include "opt_sched.h"
   35 
   36 #if !defined(lint)
   37 #if !defined(SMP)
   38 #error How did you get here?
   39 #endif
   40 
   41 #ifndef DEV_APIC
   42 #error The apic device is required for SMP, add "device apic" to your config file.
   43 #endif
   44 #if defined(CPU_DISABLE_CMPXCHG) && !defined(COMPILING_LINT)
   45 #error SMP not supported with CPU_DISABLE_CMPXCHG
   46 #endif
   47 #endif /* not lint */
   48 
   49 #include <sys/param.h>
   50 #include <sys/systm.h>
   51 #include <sys/bus.h>
   52 #include <sys/cons.h>   /* cngetc() */
   53 #ifdef GPROF 
   54 #include <sys/gmon.h>
   55 #endif
   56 #include <sys/kernel.h>
   57 #include <sys/ktr.h>
   58 #include <sys/lock.h>
   59 #include <sys/malloc.h>
   60 #include <sys/memrange.h>
   61 #include <sys/mutex.h>
   62 #include <sys/pcpu.h>
   63 #include <sys/proc.h>
   64 #include <sys/smp.h>
   65 #include <sys/sysctl.h>
   66 
   67 #include <vm/vm.h>
   68 #include <vm/vm_param.h>
   69 #include <vm/pmap.h>
   70 #include <vm/vm_kern.h>
   71 #include <vm/vm_extern.h>
   72 
   73 #include <machine/apicreg.h>
   74 #include <machine/clock.h>
   75 #include <machine/md_var.h>
   76 #include <machine/mp_watchdog.h>
   77 #include <machine/pcb.h>
   78 #include <machine/psl.h>
   79 #include <machine/smp.h>
   80 #include <machine/smptests.h>   /** COUNT_XINVLTLB_HITS */
   81 #include <machine/specialreg.h>
   82 
   83 #define WARMBOOT_TARGET         0
   84 #define WARMBOOT_OFF            (KERNBASE + 0x0467)
   85 #define WARMBOOT_SEG            (KERNBASE + 0x0469)
   86 
   87 #define CMOS_REG                (0x70)
   88 #define CMOS_DATA               (0x71)
   89 #define BIOS_RESET              (0x0f)
   90 #define BIOS_WARM               (0x0a)
   91 
   92 /*
   93  * this code MUST be enabled here and in mpboot.s.
   94  * it follows the very early stages of AP boot by placing values in CMOS ram.
   95  * it NORMALLY will never be needed and thus the primitive method for enabling.
   96  *
   97 #define CHECK_POINTS
   98  */
   99 
  100 #if defined(CHECK_POINTS) && !defined(PC98)
  101 #define CHECK_READ(A)    (outb(CMOS_REG, (A)), inb(CMOS_DATA))
  102 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
  103 
  104 #define CHECK_INIT(D);                          \
  105         CHECK_WRITE(0x34, (D));                 \
  106         CHECK_WRITE(0x35, (D));                 \
  107         CHECK_WRITE(0x36, (D));                 \
  108         CHECK_WRITE(0x37, (D));                 \
  109         CHECK_WRITE(0x38, (D));                 \
  110         CHECK_WRITE(0x39, (D));
  111 
  112 #define CHECK_PRINT(S);                         \
  113         printf("%s: %d, %d, %d, %d, %d, %d\n",  \
  114            (S),                                 \
  115            CHECK_READ(0x34),                    \
  116            CHECK_READ(0x35),                    \
  117            CHECK_READ(0x36),                    \
  118            CHECK_READ(0x37),                    \
  119            CHECK_READ(0x38),                    \
  120            CHECK_READ(0x39));
  121 
  122 #else                           /* CHECK_POINTS */
  123 
  124 #define CHECK_INIT(D)
  125 #define CHECK_PRINT(S)
  126 #define CHECK_WRITE(A, D)
  127 
  128 #endif                          /* CHECK_POINTS */
  129 
  130 /*
  131  * Values to send to the POST hardware.
  132  */
  133 #define MP_BOOTADDRESS_POST     0x10
  134 #define MP_PROBE_POST           0x11
  135 #define MPTABLE_PASS1_POST      0x12
  136 
  137 #define MP_START_POST           0x13
  138 #define MP_ENABLE_POST          0x14
  139 #define MPTABLE_PASS2_POST      0x15
  140 
  141 #define START_ALL_APS_POST      0x16
  142 #define INSTALL_AP_TRAMP_POST   0x17
  143 #define START_AP_POST           0x18
  144 
  145 #define MP_ANNOUNCE_POST        0x19
  146 
  147 /* lock region used by kernel profiling */
  148 int     mcount_lock;
  149 
  150 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
  151 int     current_postcode;
  152 
  153 int     mp_naps;                /* # of Applications processors */
  154 int     boot_cpu_id = -1;       /* designated BSP */
  155 extern  int nkpt;
  156 
  157 extern  struct pcpu __pcpu[];
  158 
  159 /*
  160  * CPU topology map datastructures for HTT.
  161  */
  162 static struct cpu_group mp_groups[MAXCPU];
  163 static struct cpu_top mp_top;
  164 
  165 /* AP uses this during bootstrap.  Do not staticize.  */
  166 char *bootSTK;
  167 static int bootAP;
  168 
  169 /* Free these after use */
  170 void *bootstacks[MAXCPU];
  171 
  172 /* Hotwire a 0->4MB V==P mapping */
  173 extern pt_entry_t *KPTphys;
  174 
  175 struct pcb stoppcbs[MAXCPU];
  176 
  177 /* Variables needed for SMP tlb shootdown. */
  178 vm_offset_t smp_tlb_addr1;
  179 vm_offset_t smp_tlb_addr2;
  180 volatile int smp_tlb_wait;
  181 
  182 #ifdef KDB_STOP_NMI
  183 volatile cpumask_t ipi_nmi_pending;
  184 #endif 
  185 
  186 #ifdef COUNT_IPIS
  187 /* Interrupt counts. */
  188 #ifdef IPI_PREEMPTION
  189 static u_long *ipi_preempt_counts[MAXCPU];
  190 #endif
  191 static u_long *ipi_ast_counts[MAXCPU];
  192 u_long *ipi_invltlb_counts[MAXCPU];
  193 u_long *ipi_invlrng_counts[MAXCPU];
  194 u_long *ipi_invlpg_counts[MAXCPU];
  195 u_long *ipi_invlcache_counts[MAXCPU];
  196 u_long *ipi_rendezvous_counts[MAXCPU];
  197 u_long *ipi_lazypmap_counts[MAXCPU];
  198 #endif
  199 
  200 /*
  201  * Local data and functions.
  202  */
  203 
  204 static u_int logical_cpus;
  205 
  206 /* used to hold the AP's until we are ready to release them */
  207 static struct mtx ap_boot_mtx;
  208 
  209 /* Set to 1 once we're ready to let the APs out of the pen. */
  210 static volatile int aps_ready = 0;
  211 
  212 /*
  213  * Store data from cpu_add() until later in the boot when we actually setup
  214  * the APs.
  215  */
  216 struct cpu_info {
  217         int     cpu_present:1;
  218         int     cpu_bsp:1;
  219         int     cpu_disabled:1;
  220         int     cpu_hyperthread:1;
  221 } static cpu_info[MAX_APIC_ID + 1];
  222 static int cpu_apic_ids[MAXCPU];
  223 
  224 /* Holds pending bitmap based IPIs per CPU */
  225 static volatile u_int cpu_ipi_pending[MAXCPU];
  226 
  227 static u_int boot_address;
  228 
  229 static void     assign_cpu_ids(void);
  230 static void     install_ap_tramp(void);
  231 static void     set_interrupt_apic_ids(void);
  232 static int      start_all_aps(void);
  233 static int      start_ap(int apic_id);
  234 static void     release_aps(void *dummy);
  235 
  236 static int      hlt_logical_cpus;
  237 static u_int    hyperthreading_cpus;
  238 static cpumask_t        hyperthreading_cpus_mask;
  239 static int      hyperthreading_allowed;
  240 static struct   sysctl_ctx_list logical_cpu_clist;
  241 
  242 static void
  243 mem_range_AP_init(void)
  244 {
  245         if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
  246                 mem_range_softc.mr_op->initAP(&mem_range_softc);
  247 }
  248 
  249 void
  250 mp_topology(void)
  251 {
  252         struct cpu_group *group;
  253         int logical_cpus;
  254         int apic_id;
  255         int groups;
  256         int cpu;
  257 
  258         /* Build the smp_topology map. */
  259         /* Nothing to do if there is no HTT support. */
  260         if ((cpu_feature & CPUID_HTT) == 0)
  261                 return;
  262         logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
  263         if (logical_cpus <= 1)
  264                 return;
  265         group = &mp_groups[0];
  266         groups = 1;
  267         for (cpu = 0, apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
  268                 if (!cpu_info[apic_id].cpu_present)
  269                         continue;
  270                 /*
  271                  * If the current group has members and we're not a logical
  272                  * cpu, create a new group.
  273                  */
  274                 if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
  275                         group++;
  276                         groups++;
  277                 }
  278                 group->cg_count++;
  279                 group->cg_mask |= 1 << cpu;
  280                 cpu++;
  281         }
  282 
  283         mp_top.ct_count = groups;
  284         mp_top.ct_group = mp_groups;
  285         smp_topology = &mp_top;
  286 }
  287 
  288 
  289 /*
  290  * Calculate usable address in base memory for AP trampoline code.
  291  */
  292 u_int
  293 mp_bootaddress(u_int basemem)
  294 {
  295         POSTCODE(MP_BOOTADDRESS_POST);
  296 
  297         boot_address = trunc_page(basemem);     /* round down to 4k boundary */
  298         if ((basemem - boot_address) < bootMP_size)
  299                 boot_address -= PAGE_SIZE;      /* not enough, lower by 4k */
  300 
  301         return boot_address;
  302 }
  303 
  304 void
  305 cpu_add(u_int apic_id, char boot_cpu)
  306 {
  307 
  308         if (apic_id > MAX_APIC_ID) {
  309                 panic("SMP: APIC ID %d too high", apic_id);
  310                 return;
  311         }
  312         KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
  313             apic_id));
  314         cpu_info[apic_id].cpu_present = 1;
  315         if (boot_cpu) {
  316                 KASSERT(boot_cpu_id == -1,
  317                     ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
  318                     boot_cpu_id));
  319                 boot_cpu_id = apic_id;
  320                 cpu_info[apic_id].cpu_bsp = 1;
  321         }
  322         if (mp_ncpus < MAXCPU)
  323                 mp_ncpus++;
  324         if (bootverbose)
  325                 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
  326                     "AP");
  327 }
  328 
  329 void
  330 cpu_mp_setmaxid(void)
  331 {
  332 
  333         mp_maxid = MAXCPU - 1;
  334 }
  335 
  336 int
  337 cpu_mp_probe(void)
  338 {
  339 
  340         /*
  341          * Always record BSP in CPU map so that the mbuf init code works
  342          * correctly.
  343          */
  344         all_cpus = 1;
  345         if (mp_ncpus == 0) {
  346                 /*
  347                  * No CPUs were found, so this must be a UP system.  Setup
  348                  * the variables to represent a system with a single CPU
  349                  * with an id of 0.
  350                  */
  351                 mp_ncpus = 1;
  352                 return (0);
  353         }
  354 
  355         /* At least one CPU was found. */
  356         if (mp_ncpus == 1) {
  357                 /*
  358                  * One CPU was found, so this must be a UP system with
  359                  * an I/O APIC.
  360                  */
  361                 return (0);
  362         }
  363 
  364         /* At least two CPUs were found. */
  365         return (1);
  366 }
  367 
  368 /*
  369  * Initialize the IPI handlers and start up the AP's.
  370  */
  371 void
  372 cpu_mp_start(void)
  373 {
  374         int i;
  375         u_int threads_per_cache, p[4];
  376 
  377         POSTCODE(MP_START_POST);
  378 
  379         /* Initialize the logical ID to APIC ID table. */
  380         for (i = 0; i < MAXCPU; i++) {
  381                 cpu_apic_ids[i] = -1;
  382                 cpu_ipi_pending[i] = 0;
  383         }
  384 
  385         /* Install an inter-CPU IPI for TLB invalidation */
  386         setidt(IPI_INVLTLB, IDTVEC(invltlb),
  387                SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
  388         setidt(IPI_INVLPG, IDTVEC(invlpg),
  389                SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
  390         setidt(IPI_INVLRNG, IDTVEC(invlrng),
  391                SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
  392 
  393         /* Install an inter-CPU IPI for cache invalidation. */
  394         setidt(IPI_INVLCACHE, IDTVEC(invlcache),
  395                SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
  396 
  397         /* Install an inter-CPU IPI for lazy pmap release */
  398         setidt(IPI_LAZYPMAP, IDTVEC(lazypmap),
  399                SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
  400 
  401         /* Install an inter-CPU IPI for all-CPU rendezvous */
  402         setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous),
  403                SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
  404 
  405         /* Install generic inter-CPU IPI handler */
  406         setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
  407                SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
  408 
  409         /* Install an inter-CPU IPI for CPU stop/restart */
  410         setidt(IPI_STOP, IDTVEC(cpustop),
  411                SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
  412 
  413 
  414         /* Set boot_cpu_id if needed. */
  415         if (boot_cpu_id == -1) {
  416                 boot_cpu_id = PCPU_GET(apic_id);
  417                 cpu_info[boot_cpu_id].cpu_bsp = 1;
  418         } else
  419                 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
  420                     ("BSP's APIC ID doesn't match boot_cpu_id"));
  421 
  422         /* Setup the initial logical CPUs info. */
  423         logical_cpus = logical_cpus_mask = 0;
  424         if (cpu_feature & CPUID_HTT)
  425                 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
  426 
  427         /*
  428          * Work out if hyperthreading is *really* enabled.  This
  429          * is made really ugly by the fact that processors lie: Dual
  430          * core processors claim to be hyperthreaded even when they're
  431          * not, presumably because they want to be treated the same
  432          * way as HTT with respect to per-cpu software licensing.
  433          * At the time of writing (May 12, 2005) the only hyperthreaded
  434          * cpus are from Intel, and Intel's dual-core processors can be
  435          * identified via the "deterministic cache parameters" cpuid
  436          * calls.
  437          */
  438         /*
  439          * First determine if this is an Intel processor which claims
  440          * to have hyperthreading support.
  441          */
  442         if ((cpu_feature & CPUID_HTT) &&
  443             (strcmp(cpu_vendor, "GenuineIntel") == 0)) {
  444                 /*
  445                  * If the "deterministic cache parameters" cpuid calls
  446                  * are available, use them.
  447                  */
  448                 if (cpu_high >= 4) {
  449                         /* Ask the processor about the L1 cache. */
  450                         for (i = 0; i < 1; i++) {
  451                                 cpuid_count(4, i, p);
  452                                 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
  453                                 if (hyperthreading_cpus < threads_per_cache)
  454                                         hyperthreading_cpus = threads_per_cache;
  455                                 if ((p[0] & 0x1f) == 0)
  456                                         break;
  457                         }
  458                 }
  459 
  460                 /*
  461                  * If the deterministic cache parameters are not
  462                  * available, or if no caches were reported to exist,
  463                  * just accept what the HTT flag indicated.
  464                  */
  465                 if (hyperthreading_cpus == 0)
  466                         hyperthreading_cpus = logical_cpus;
  467         }
  468 
  469         assign_cpu_ids();
  470 
  471         /* Start each Application Processor */
  472         start_all_aps();
  473 
  474         set_interrupt_apic_ids();
  475 }
  476 
  477 
  478 /*
  479  * Print various information about the SMP system hardware and setup.
  480  */
  481 void
  482 cpu_mp_announce(void)
  483 {
  484         const char *hyperthread;
  485         int i;
  486 
  487         POSTCODE(MP_ANNOUNCE_POST);
  488 
  489         /* List Active CPUs first. */
  490         printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
  491         for (i = 1; i < mp_ncpus; i++) {
  492                 if (cpu_info[cpu_apic_ids[i]].cpu_hyperthread)
  493                         hyperthread = "/HT";
  494                 else
  495                         hyperthread = "";
  496                 printf(" cpu%d (AP%s): APIC ID: %2d\n", i, hyperthread,
  497                                 cpu_apic_ids[i]);
  498         }
  499 
  500         /* List disabled CPUs last. */
  501         for (i=0 ; i<= MAX_APIC_ID; i++ ) {
  502                 if (!cpu_info[i].cpu_present || !cpu_info[i].cpu_disabled)
  503                         continue;
  504                 if (cpu_info[i].cpu_hyperthread)
  505                         hyperthread = "/HT";
  506                 else
  507                         hyperthread = "";
  508                 printf("  cpu (AP%s): APIC ID: %2d (disabled)\n", hyperthread,
  509                         i);
  510         }
  511 }
  512 
  513 /*
  514  * AP CPU's call this to initialize themselves.
  515  */
  516 void
  517 init_secondary(void)
  518 {
  519         struct pcpu *pc;
  520         vm_offset_t addr;
  521         int     gsel_tss;
  522         int     x, myid;
  523         u_int   cr0;
  524 
  525         /* bootAP is set in start_ap() to our ID. */
  526         myid = bootAP;
  527 
  528         /* Get per-cpu data */
  529         pc = &__pcpu[myid];
  530 
  531         /* prime data page for it to use */
  532         pcpu_init(pc, myid, sizeof(struct pcpu));
  533         pc->pc_apic_id = cpu_apic_ids[myid];
  534         pc->pc_prvspace = pc;
  535         pc->pc_curthread = 0;
  536 
  537         gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
  538         gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
  539 
  540         for (x = 0; x < NGDT; x++) {
  541                 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
  542         }
  543 
  544         r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
  545         r_gdt.rd_base = (int) &gdt[myid * NGDT];
  546         lgdt(&r_gdt);                   /* does magic intra-segment return */
  547 
  548         lidt(&r_idt);
  549 
  550         lldt(_default_ldt);
  551         PCPU_SET(currentldt, _default_ldt);
  552 
  553         gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
  554         gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
  555         PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
  556         PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
  557         PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
  558         PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
  559         PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
  560         ltr(gsel_tss);
  561 
  562         PCPU_SET(fsgs_gdt, &gdt[myid * NGDT + GUFS_SEL].sd);
  563 
  564         /*
  565          * Set to a known state:
  566          * Set by mpboot.s: CR0_PG, CR0_PE
  567          * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
  568          */
  569         cr0 = rcr0();
  570         cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
  571         load_cr0(cr0);
  572         CHECK_WRITE(0x38, 5);
  573         
  574         /* Disable local APIC just to be sure. */
  575         lapic_disable();
  576 
  577         /* signal our startup to the BSP. */
  578         mp_naps++;
  579         CHECK_WRITE(0x39, 6);
  580 
  581         /* Spin until the BSP releases the AP's. */
  582         while (!aps_ready)
  583                 ia32_pause();
  584 
  585         /* BSP may have changed PTD while we were waiting */
  586         invltlb();
  587         for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE)
  588                 invlpg(addr);
  589 
  590 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
  591         lidt(&r_idt);
  592 #endif
  593 
  594         /* Initialize the PAT MSR if present. */
  595         pmap_init_pat();
  596 
  597         /* set up CPU registers and state */
  598         cpu_setregs();
  599 
  600         /* set up FPU state on the AP */
  601         npxinit(__INITIAL_NPXCW__);
  602 
  603         /* set up SSE registers */
  604         enable_sse();
  605 
  606         /* A quick check from sanity claus */
  607         if (PCPU_GET(apic_id) != lapic_id()) {
  608                 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
  609                 printf("SMP: actual apic_id = %d\n", lapic_id());
  610                 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
  611                 panic("cpuid mismatch! boom!!");
  612         }
  613 
  614         /* Initialize curthread. */
  615         KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
  616         PCPU_SET(curthread, PCPU_GET(idlethread));
  617 
  618         mtx_lock_spin(&ap_boot_mtx);
  619 
  620         /* Init local apic for irq's */
  621         lapic_setup(1);
  622 
  623         /* Set memory range attributes for this CPU to match the BSP */
  624         mem_range_AP_init();
  625 
  626         smp_cpus++;
  627 
  628         CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
  629         printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
  630 
  631         /* Determine if we are a logical CPU. */
  632         if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
  633                 logical_cpus_mask |= PCPU_GET(cpumask);
  634         
  635         /* Determine if we are a hyperthread. */
  636         if (hyperthreading_cpus > 1 &&
  637             PCPU_GET(apic_id) % hyperthreading_cpus != 0)
  638                 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
  639 
  640         /* Build our map of 'other' CPUs. */
  641         PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
  642 
  643         if (bootverbose)
  644                 lapic_dump("AP");
  645 
  646         if (smp_cpus == mp_ncpus) {
  647                 /* enable IPI's, tlb shootdown, freezes etc */
  648                 atomic_store_rel_int(&smp_started, 1);
  649                 smp_active = 1;  /* historic */
  650         }
  651 
  652         mtx_unlock_spin(&ap_boot_mtx);
  653 
  654         /* wait until all the AP's are up */
  655         while (smp_started == 0)
  656                 ia32_pause();
  657 
  658         /* ok, now grab sched_lock and enter the scheduler */
  659         mtx_lock_spin(&sched_lock);
  660 
  661         /*
  662          * Correct spinlock nesting.  The idle thread context that we are
  663          * borrowing was created so that it would start out with a single
  664          * spin lock (sched_lock) held in fork_trampoline().  Since we've
  665          * explicitly acquired locks in this function, the nesting count
  666          * is now 2 rather than 1.  Since we are nested, calling
  667          * spinlock_exit() will simply adjust the counts without allowing
  668          * spin lock using code to interrupt us.
  669          */
  670         spinlock_exit();
  671         KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
  672 
  673         binuptime(PCPU_PTR(switchtime));
  674         PCPU_SET(switchticks, ticks);
  675 
  676         cpu_throw(NULL, choosethread());        /* doesn't return */
  677 
  678         panic("scheduler returned us to %s", __func__);
  679         /* NOTREACHED */
  680 }
  681 
  682 /*******************************************************************
  683  * local functions and data
  684  */
  685 
  686 /*
  687  * We tell the I/O APIC code about all the CPUs we want to receive
  688  * interrupts.  If we don't want certain CPUs to receive IRQs we
  689  * can simply not tell the I/O APIC code about them in this function.
  690  * We also do not tell it about the BSP since it tells itself about
  691  * the BSP internally to work with UP kernels and on UP machines.
  692  */
  693 static void
  694 set_interrupt_apic_ids(void)
  695 {
  696         u_int apic_id;
  697 
  698         for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
  699                 if (!cpu_info[apic_id].cpu_present)
  700                         continue;
  701                 if (cpu_info[apic_id].cpu_bsp)
  702                         continue;
  703                 if (cpu_info[apic_id].cpu_disabled)
  704                         continue;
  705 
  706                 /* Don't let hyperthreads service interrupts. */
  707                 if (hyperthreading_cpus > 1 &&
  708                     apic_id % hyperthreading_cpus != 0)
  709                         continue;
  710 
  711                 intr_add_cpu(apic_id);
  712         }
  713 }
  714 
  715 /*
  716  * Assign logical CPU IDs to local APICs.
  717  */
  718 static void
  719 assign_cpu_ids(void)
  720 {
  721         u_int i;
  722 
  723         /* Check for explicitly disabled CPUs. */
  724         for (i = 0; i <= MAX_APIC_ID; i++) {
  725                 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
  726                         continue;
  727 
  728                 if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0)
  729                         cpu_info[i].cpu_hyperthread = 1;
  730 
  731                 /* Don't use this CPU if it has been disabled by a tunable. */
  732                 if (resource_disabled("lapic", i)) {
  733                         cpu_info[i].cpu_disabled = 1;
  734                         continue;
  735                 }
  736         }
  737 
  738         /*
  739          * Assign CPU IDs to local APIC IDs and disable any CPUs
  740          * beyond MAXCPU.  CPU 0 is always assigned to the BSP.
  741          *
  742          * To minimize confusion for userland, we attempt to number
  743          * CPUs such that all the threads and cores in a package are
  744          * grouped together. For now we assume that the BSP is always
  745          * the first thread in a package and just start adding APs
  746          * starting with the BSP's APIC ID.
  747          */
  748         mp_ncpus = 1;
  749         cpu_apic_ids[0] = boot_cpu_id;
  750         for (i = boot_cpu_id + 1; i != boot_cpu_id;
  751                  i == MAX_APIC_ID ? i = 0 : i++) {
  752                 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
  753                     cpu_info[i].cpu_disabled)
  754                         continue;
  755 
  756                 if (mp_ncpus < MAXCPU) {
  757                         cpu_apic_ids[mp_ncpus] = i;
  758                         mp_ncpus++;
  759                 } else
  760                         cpu_info[i].cpu_disabled = 1;
  761         }
  762         KASSERT(mp_maxid >= mp_ncpus - 1,
  763             ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
  764             mp_ncpus));         
  765 }
  766 
  767 /*
  768  * start each AP in our list
  769  */
  770 static int
  771 start_all_aps(void)
  772 {
  773 #ifndef PC98
  774         u_char mpbiosreason;
  775 #endif
  776         uintptr_t kptbase;
  777         u_int32_t mpbioswarmvec;
  778         int apic_id, cpu, i;
  779 
  780         POSTCODE(START_ALL_APS_POST);
  781 
  782         mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
  783 
  784         /* install the AP 1st level boot code */
  785         install_ap_tramp();
  786 
  787         /* save the current value of the warm-start vector */
  788         mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
  789 #ifndef PC98
  790         outb(CMOS_REG, BIOS_RESET);
  791         mpbiosreason = inb(CMOS_DATA);
  792 #endif
  793 
  794         /* set up temporary P==V mapping for AP boot */
  795         /* XXX this is a hack, we should boot the AP on its own stack/PTD */
  796         kptbase = (uintptr_t)(void *)KPTphys;
  797         for (i = 0; i < NKPT; i++)
  798                 PTD[i] = (pd_entry_t)(PG_V | PG_RW |
  799                     ((kptbase + i * PAGE_SIZE) & PG_FRAME));
  800         invltlb();
  801 
  802         /* start each AP */
  803         for (cpu = 1; cpu < mp_ncpus; cpu++) {
  804                 apic_id = cpu_apic_ids[cpu];
  805 
  806                 /* allocate and set up a boot stack data page */
  807                 bootstacks[cpu] = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
  808 
  809                 /* setup a vector to our boot code */
  810                 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
  811                 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
  812 #ifndef PC98
  813                 outb(CMOS_REG, BIOS_RESET);
  814                 outb(CMOS_DATA, BIOS_WARM);     /* 'warm-start' */
  815 #endif
  816 
  817                 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 4;
  818                 bootAP = cpu;
  819 
  820                 /* attempt to start the Application Processor */
  821                 CHECK_INIT(99); /* setup checkpoints */
  822                 if (!start_ap(apic_id)) {
  823                         printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id);
  824                         CHECK_PRINT("trace");   /* show checkpoints */
  825                         /* better panic as the AP may be running loose */
  826                         printf("panic y/n? [y] ");
  827                         if (cngetc() != 'n')
  828                                 panic("bye-bye");
  829                 }
  830                 CHECK_PRINT("trace");           /* show checkpoints */
  831 
  832                 all_cpus |= (1 << cpu);         /* record AP in CPU map */
  833         }
  834 
  835         /* build our map of 'other' CPUs */
  836         PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
  837 
  838         /* restore the warmstart vector */
  839         *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
  840 
  841 #ifndef PC98
  842         outb(CMOS_REG, BIOS_RESET);
  843         outb(CMOS_DATA, mpbiosreason);
  844 #endif
  845 
  846         /* Undo V==P hack from above */
  847         for (i = 0; i < NKPT; i++)
  848                 PTD[i] = 0;
  849         pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
  850 
  851         /* number of APs actually started */
  852         return mp_naps;
  853 }
  854 
  855 /*
  856  * load the 1st level AP boot code into base memory.
  857  */
  858 
  859 /* targets for relocation */
  860 extern void bigJump(void);
  861 extern void bootCodeSeg(void);
  862 extern void bootDataSeg(void);
  863 extern void MPentry(void);
  864 extern u_int MP_GDT;
  865 extern u_int mp_gdtbase;
  866 
  867 static void
  868 install_ap_tramp(void)
  869 {
  870         int     x;
  871         int     size = *(int *) ((u_long) & bootMP_size);
  872         vm_offset_t va = boot_address + KERNBASE;
  873         u_char *src = (u_char *) ((u_long) bootMP);
  874         u_char *dst = (u_char *) va;
  875         u_int   boot_base = (u_int) bootMP;
  876         u_int8_t *dst8;
  877         u_int16_t *dst16;
  878         u_int32_t *dst32;
  879 
  880         POSTCODE(INSTALL_AP_TRAMP_POST);
  881 
  882         KASSERT (size <= PAGE_SIZE,
  883             ("'size' do not fit into PAGE_SIZE, as expected."));
  884         pmap_kenter(va, boot_address);
  885         pmap_invalidate_page (kernel_pmap, va);
  886         for (x = 0; x < size; ++x)
  887                 *dst++ = *src++;
  888 
  889         /*
  890          * modify addresses in code we just moved to basemem. unfortunately we
  891          * need fairly detailed info about mpboot.s for this to work.  changes
  892          * to mpboot.s might require changes here.
  893          */
  894 
  895         /* boot code is located in KERNEL space */
  896         dst = (u_char *) va;
  897 
  898         /* modify the lgdt arg */
  899         dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
  900         *dst32 = boot_address + ((u_int) & MP_GDT - boot_base);
  901 
  902         /* modify the ljmp target for MPentry() */
  903         dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
  904         *dst32 = ((u_int) MPentry - KERNBASE);
  905 
  906         /* modify the target for boot code segment */
  907         dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
  908         dst8 = (u_int8_t *) (dst16 + 1);
  909         *dst16 = (u_int) boot_address & 0xffff;
  910         *dst8 = ((u_int) boot_address >> 16) & 0xff;
  911 
  912         /* modify the target for boot data segment */
  913         dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
  914         dst8 = (u_int8_t *) (dst16 + 1);
  915         *dst16 = (u_int) boot_address & 0xffff;
  916         *dst8 = ((u_int) boot_address >> 16) & 0xff;
  917 }
  918 
  919 /*
  920  * This function starts the AP (application processor) identified
  921  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
  922  * to accomplish this.  This is necessary because of the nuances
  923  * of the different hardware we might encounter.  It isn't pretty,
  924  * but it seems to work.
  925  */
  926 static int
  927 start_ap(int apic_id)
  928 {
  929         int vector, ms;
  930         int cpus;
  931 
  932         POSTCODE(START_AP_POST);
  933 
  934         /* calculate the vector */
  935         vector = (boot_address >> 12) & 0xff;
  936 
  937         /* used as a watchpoint to signal AP startup */
  938         cpus = mp_naps;
  939 
  940         /*
  941          * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
  942          * and running the target CPU. OR this INIT IPI might be latched (P5
  943          * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
  944          * ignored.
  945          */
  946 
  947         /* do an INIT IPI: assert RESET */
  948         lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
  949             APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
  950 
  951         /* wait for pending status end */
  952         lapic_ipi_wait(-1);
  953 
  954         /* do an INIT IPI: deassert RESET */
  955         lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
  956             APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
  957 
  958         /* wait for pending status end */
  959         DELAY(10000);           /* wait ~10mS */
  960         lapic_ipi_wait(-1);
  961 
  962         /*
  963          * next we do a STARTUP IPI: the previous INIT IPI might still be
  964          * latched, (P5 bug) this 1st STARTUP would then terminate
  965          * immediately, and the previously started INIT IPI would continue. OR
  966          * the previous INIT IPI has already run. and this STARTUP IPI will
  967          * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
  968          * will run.
  969          */
  970 
  971         /* do a STARTUP IPI */
  972         lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
  973             APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
  974             vector, apic_id);
  975         lapic_ipi_wait(-1);
  976         DELAY(200);             /* wait ~200uS */
  977 
  978         /*
  979          * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
  980          * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
  981          * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
  982          * recognized after hardware RESET or INIT IPI.
  983          */
  984 
  985         lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
  986             APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
  987             vector, apic_id);
  988         lapic_ipi_wait(-1);
  989         DELAY(200);             /* wait ~200uS */
  990 
  991         /* Wait up to 5 seconds for it to start. */
  992         for (ms = 0; ms < 5000; ms++) {
  993                 if (mp_naps > cpus)
  994                         return 1;       /* return SUCCESS */
  995                 DELAY(1000);
  996         }
  997         return 0;               /* return FAILURE */
  998 }
  999 
 1000 #ifdef COUNT_XINVLTLB_HITS
 1001 u_int xhits_gbl[MAXCPU];
 1002 u_int xhits_pg[MAXCPU];
 1003 u_int xhits_rng[MAXCPU];
 1004 SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
 1005 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
 1006     sizeof(xhits_gbl), "IU", "");
 1007 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
 1008     sizeof(xhits_pg), "IU", "");
 1009 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
 1010     sizeof(xhits_rng), "IU", "");
 1011 
 1012 u_int ipi_global;
 1013 u_int ipi_page;
 1014 u_int ipi_range;
 1015 u_int ipi_range_size;
 1016 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
 1017 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
 1018 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
 1019 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
 1020     0, "");
 1021 
 1022 u_int ipi_masked_global;
 1023 u_int ipi_masked_page;
 1024 u_int ipi_masked_range;
 1025 u_int ipi_masked_range_size;
 1026 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW,
 1027     &ipi_masked_global, 0, "");
 1028 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW,
 1029     &ipi_masked_page, 0, "");
 1030 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW,
 1031     &ipi_masked_range, 0, "");
 1032 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
 1033     &ipi_masked_range_size, 0, "");
 1034 #endif /* COUNT_XINVLTLB_HITS */
 1035 
 1036 /*
 1037  * Flush the TLB on all other CPU's
 1038  */
 1039 static void
 1040 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
 1041 {
 1042         u_int ncpu;
 1043 
 1044         ncpu = mp_ncpus - 1;    /* does not shootdown self */
 1045         if (ncpu < 1)
 1046                 return;         /* no other cpus */
 1047         if (!(read_eflags() & PSL_I))
 1048                 panic("%s: interrupts disabled", __func__);
 1049         mtx_lock_spin(&smp_ipi_mtx);
 1050         smp_tlb_addr1 = addr1;
 1051         smp_tlb_addr2 = addr2;
 1052         atomic_store_rel_int(&smp_tlb_wait, 0);
 1053         ipi_all_but_self(vector);
 1054         while (smp_tlb_wait < ncpu)
 1055                 ia32_pause();
 1056         mtx_unlock_spin(&smp_ipi_mtx);
 1057 }
 1058 
 1059 static void
 1060 smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
 1061 {
 1062         int ncpu, othercpus;
 1063 
 1064         othercpus = mp_ncpus - 1;
 1065         if (mask == (u_int)-1) {
 1066                 ncpu = othercpus;
 1067                 if (ncpu < 1)
 1068                         return;
 1069         } else {
 1070                 mask &= ~PCPU_GET(cpumask);
 1071                 if (mask == 0)
 1072                         return;
 1073                 ncpu = bitcount32(mask);
 1074                 if (ncpu > othercpus) {
 1075                         /* XXX this should be a panic offence */
 1076                         printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
 1077                             ncpu, othercpus);
 1078                         ncpu = othercpus;
 1079                 }
 1080                 /* XXX should be a panic, implied by mask == 0 above */
 1081                 if (ncpu < 1)
 1082                         return;
 1083         }
 1084         if (!(read_eflags() & PSL_I))
 1085                 panic("%s: interrupts disabled", __func__);
 1086         mtx_lock_spin(&smp_ipi_mtx);
 1087         smp_tlb_addr1 = addr1;
 1088         smp_tlb_addr2 = addr2;
 1089         atomic_store_rel_int(&smp_tlb_wait, 0);
 1090         if (mask == (u_int)-1)
 1091                 ipi_all_but_self(vector);
 1092         else
 1093                 ipi_selected(mask, vector);
 1094         while (smp_tlb_wait < ncpu)
 1095                 ia32_pause();
 1096         mtx_unlock_spin(&smp_ipi_mtx);
 1097 }
 1098 
 1099 void
 1100 smp_cache_flush(void)
 1101 {
 1102 
 1103         if (smp_started)
 1104                 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
 1105 }
 1106 
 1107 void
 1108 smp_invltlb(void)
 1109 {
 1110 
 1111         if (smp_started) {
 1112                 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
 1113 #ifdef COUNT_XINVLTLB_HITS
 1114                 ipi_global++;
 1115 #endif
 1116         }
 1117 }
 1118 
 1119 void
 1120 smp_invlpg(vm_offset_t addr)
 1121 {
 1122 
 1123         if (smp_started) {
 1124                 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
 1125 #ifdef COUNT_XINVLTLB_HITS
 1126                 ipi_page++;
 1127 #endif
 1128         }
 1129 }
 1130 
 1131 void
 1132 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
 1133 {
 1134 
 1135         if (smp_started) {
 1136                 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
 1137 #ifdef COUNT_XINVLTLB_HITS
 1138                 ipi_range++;
 1139                 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
 1140 #endif
 1141         }
 1142 }
 1143 
 1144 void
 1145 smp_masked_invltlb(u_int mask)
 1146 {
 1147 
 1148         if (smp_started) {
 1149                 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
 1150 #ifdef COUNT_XINVLTLB_HITS
 1151                 ipi_masked_global++;
 1152 #endif
 1153         }
 1154 }
 1155 
 1156 void
 1157 smp_masked_invlpg(u_int mask, vm_offset_t addr)
 1158 {
 1159 
 1160         if (smp_started) {
 1161                 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
 1162 #ifdef COUNT_XINVLTLB_HITS
 1163                 ipi_masked_page++;
 1164 #endif
 1165         }
 1166 }
 1167 
 1168 void
 1169 smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
 1170 {
 1171 
 1172         if (smp_started) {
 1173                 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
 1174 #ifdef COUNT_XINVLTLB_HITS
 1175                 ipi_masked_range++;
 1176                 ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE;
 1177 #endif
 1178         }
 1179 }
 1180 
 1181 
 1182 void
 1183 ipi_bitmap_handler(struct clockframe frame)
 1184 {
 1185         int cpu = PCPU_GET(cpuid);
 1186         u_int ipi_bitmap;
 1187 
 1188         ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
 1189 
 1190 #ifdef IPI_PREEMPTION
 1191         if (ipi_bitmap & (1 << IPI_PREEMPT)) {
 1192 #ifdef COUNT_IPIS
 1193                 *ipi_preempt_counts[cpu]++;
 1194 #endif
 1195                 mtx_lock_spin(&sched_lock);
 1196                 /* Don't preempt the idle thread */
 1197                 if (curthread != PCPU_GET(idlethread)) {
 1198                         struct thread *running_thread = curthread;
 1199                         if (running_thread->td_critnest > 1) 
 1200                                 running_thread->td_owepreempt = 1;
 1201                         else            
 1202                                 mi_switch(SW_INVOL | SW_PREEMPT, NULL);
 1203                 }
 1204                 mtx_unlock_spin(&sched_lock);
 1205         }
 1206 #endif
 1207 
 1208         if (ipi_bitmap & (1 << IPI_AST)) {
 1209 #ifdef COUNT_IPIS
 1210                 *ipi_ast_counts[cpu]++;
 1211 #endif
 1212                 /* Nothing to do for AST */
 1213         }
 1214 }
 1215 
 1216 /*
 1217  * send an IPI to a set of cpus.
 1218  */
 1219 void
 1220 ipi_selected(u_int32_t cpus, u_int ipi)
 1221 {
 1222         int cpu;
 1223         u_int bitmap = 0;
 1224         u_int old_pending;
 1225         u_int new_pending;
 1226 
 1227         if (IPI_IS_BITMAPED(ipi)) { 
 1228                 bitmap = 1 << ipi;
 1229                 ipi = IPI_BITMAP_VECTOR;
 1230         }
 1231 
 1232         CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
 1233         while ((cpu = ffs(cpus)) != 0) {
 1234                 cpu--;
 1235                 cpus &= ~(1 << cpu);
 1236 
 1237                 KASSERT(cpu_apic_ids[cpu] != -1,
 1238                     ("IPI to non-existent CPU %d", cpu));
 1239 
 1240                 if (bitmap) {
 1241                         do {
 1242                                 old_pending = cpu_ipi_pending[cpu];
 1243                                 new_pending = old_pending | bitmap;
 1244                         } while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));  
 1245 
 1246                         if (old_pending)
 1247                                 continue;
 1248                 }
 1249 
 1250                 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
 1251         }
 1252 
 1253 }
 1254 
 1255 /*
 1256  * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
 1257  */
 1258 void
 1259 ipi_all(u_int ipi)
 1260 {
 1261 
 1262         CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
 1263         lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
 1264 }
 1265 
 1266 /*
 1267  * send an IPI to all CPUs EXCEPT myself
 1268  */
 1269 void
 1270 ipi_all_but_self(u_int ipi)
 1271 {
 1272 
 1273         CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
 1274         lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
 1275 }
 1276 
 1277 /*
 1278  * send an IPI to myself
 1279  */
 1280 void
 1281 ipi_self(u_int ipi)
 1282 {
 1283 
 1284         CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
 1285         lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
 1286 }
 1287 
 1288 #ifdef KDB_STOP_NMI
 1289 /*
 1290  * send NMI IPI to selected CPUs
 1291  */
 1292 
 1293 #define BEFORE_SPIN     1000000
 1294 
 1295 void
 1296 ipi_nmi_selected(u_int32_t cpus)
 1297 {
 1298 
 1299         int cpu;
 1300         register_t icrlo;
 1301 
 1302         icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT 
 1303                 | APIC_TRIGMOD_EDGE; 
 1304         
 1305         CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
 1306 
 1307 
 1308         atomic_set_int(&ipi_nmi_pending, cpus);
 1309 
 1310 
 1311         while ((cpu = ffs(cpus)) != 0) {
 1312                 cpu--;
 1313                 cpus &= ~(1 << cpu);
 1314 
 1315                 KASSERT(cpu_apic_ids[cpu] != -1,
 1316                     ("IPI NMI to non-existent CPU %d", cpu));
 1317                 
 1318                 /* Wait for an earlier IPI to finish. */
 1319                 if (!lapic_ipi_wait(BEFORE_SPIN))
 1320                         panic("ipi_nmi_selected: previous IPI has not cleared");
 1321 
 1322                 lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]);
 1323         }
 1324 }
 1325 
 1326 
 1327 int
 1328 ipi_nmi_handler()
 1329 {
 1330         int cpu  = PCPU_GET(cpuid);
 1331 
 1332         if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu)))
 1333                 return 1;
 1334 
 1335         atomic_clear_int(&ipi_nmi_pending,1 << cpu);
 1336 
 1337         savectx(&stoppcbs[cpu]);
 1338 
 1339         /* Indicate that we are stopped */
 1340         atomic_set_int(&stopped_cpus,1 << cpu);
 1341 
 1342 
 1343         /* Wait for restart */
 1344         while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu)))
 1345             ia32_pause();
 1346 
 1347         atomic_clear_int(&started_cpus,1 << cpu);
 1348         atomic_clear_int(&stopped_cpus,1 << cpu);
 1349 
 1350         if(cpu == 0 && cpustop_restartfunc != NULL)
 1351                 cpustop_restartfunc();
 1352 
 1353         return 0;
 1354 }
 1355      
 1356 #endif /* KDB_STOP_NMI */
 1357 
 1358 /*
 1359  * This is called once the rest of the system is up and running and we're
 1360  * ready to let the AP's out of the pen.
 1361  */
 1362 static void
 1363 release_aps(void *dummy __unused)
 1364 {
 1365 
 1366         if (mp_ncpus == 1) 
 1367                 return;
 1368         mtx_lock_spin(&sched_lock);
 1369         atomic_store_rel_int(&aps_ready, 1);
 1370         while (smp_started == 0)
 1371                 ia32_pause();
 1372         mtx_unlock_spin(&sched_lock);
 1373 }
 1374 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
 1375 
 1376 static int
 1377 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
 1378 {
 1379         u_int mask;
 1380         int error;
 1381 
 1382         mask = hlt_cpus_mask;
 1383         error = sysctl_handle_int(oidp, &mask, 0, req);
 1384         if (error || !req->newptr)
 1385                 return (error);
 1386 
 1387         if (logical_cpus_mask != 0 &&
 1388             (mask & logical_cpus_mask) == logical_cpus_mask)
 1389                 hlt_logical_cpus = 1;
 1390         else
 1391                 hlt_logical_cpus = 0;
 1392 
 1393         if (! hyperthreading_allowed)
 1394                 mask |= hyperthreading_cpus_mask;
 1395 
 1396         if ((mask & all_cpus) == all_cpus)
 1397                 mask &= ~(1<<0);
 1398         hlt_cpus_mask = mask;
 1399         return (error);
 1400 }
 1401 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
 1402     0, 0, sysctl_hlt_cpus, "IU",
 1403     "Bitmap of CPUs to halt.  101 (binary) will halt CPUs 0 and 2.");
 1404 
 1405 static int
 1406 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
 1407 {
 1408         int disable, error;
 1409 
 1410         disable = hlt_logical_cpus;
 1411         error = sysctl_handle_int(oidp, &disable, 0, req);
 1412         if (error || !req->newptr)
 1413                 return (error);
 1414 
 1415         if (disable)
 1416                 hlt_cpus_mask |= logical_cpus_mask;
 1417         else
 1418                 hlt_cpus_mask &= ~logical_cpus_mask;
 1419 
 1420         if (! hyperthreading_allowed)
 1421                 hlt_cpus_mask |= hyperthreading_cpus_mask;
 1422 
 1423         if ((hlt_cpus_mask & all_cpus) == all_cpus)
 1424                 hlt_cpus_mask &= ~(1<<0);
 1425 
 1426         hlt_logical_cpus = disable;
 1427         return (error);
 1428 }
 1429 
 1430 static int
 1431 sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
 1432 {
 1433         int allowed, error;
 1434 
 1435         allowed = hyperthreading_allowed;
 1436         error = sysctl_handle_int(oidp, &allowed, 0, req);
 1437         if (error || !req->newptr)
 1438                 return (error);
 1439 
 1440         if (allowed)
 1441                 hlt_cpus_mask &= ~hyperthreading_cpus_mask;
 1442         else
 1443                 hlt_cpus_mask |= hyperthreading_cpus_mask;
 1444 
 1445         if (logical_cpus_mask != 0 &&
 1446             (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
 1447                 hlt_logical_cpus = 1;
 1448         else
 1449                 hlt_logical_cpus = 0;
 1450 
 1451         if ((hlt_cpus_mask & all_cpus) == all_cpus)
 1452                 hlt_cpus_mask &= ~(1<<0);
 1453 
 1454         hyperthreading_allowed = allowed;
 1455         return (error);
 1456 }
 1457 
 1458 static void
 1459 cpu_hlt_setup(void *dummy __unused)
 1460 {
 1461 
 1462         if (logical_cpus_mask != 0) {
 1463                 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
 1464                     &hlt_logical_cpus);
 1465                 sysctl_ctx_init(&logical_cpu_clist);
 1466                 SYSCTL_ADD_PROC(&logical_cpu_clist,
 1467                     SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
 1468                     "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
 1469                     sysctl_hlt_logical_cpus, "IU", "");
 1470                 SYSCTL_ADD_UINT(&logical_cpu_clist,
 1471                     SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
 1472                     "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
 1473                     &logical_cpus_mask, 0, "");
 1474 
 1475                 if (hlt_logical_cpus)
 1476                         hlt_cpus_mask |= logical_cpus_mask;
 1477 
 1478                 /*
 1479                  * If necessary for security purposes, force
 1480                  * hyperthreading off, regardless of the value
 1481                  * of hlt_logical_cpus.
 1482                  */
 1483                 if (hyperthreading_cpus_mask) {
 1484                         TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
 1485                             &hyperthreading_allowed);
 1486                         SYSCTL_ADD_PROC(&logical_cpu_clist,
 1487                             SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
 1488                             "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
 1489                             0, 0, sysctl_hyperthreading_allowed, "IU", "");
 1490                         if (! hyperthreading_allowed)
 1491                                 hlt_cpus_mask |= hyperthreading_cpus_mask;
 1492                 }
 1493         }
 1494 }
 1495 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
 1496 
 1497 int
 1498 mp_grab_cpu_hlt(void)
 1499 {
 1500         u_int mask = PCPU_GET(cpumask);
 1501 #ifdef MP_WATCHDOG
 1502         u_int cpuid = PCPU_GET(cpuid);
 1503 #endif
 1504         int retval;
 1505 
 1506 #ifdef MP_WATCHDOG
 1507         ap_watchdog(cpuid);
 1508 #endif
 1509 
 1510         retval = mask & hlt_cpus_mask;
 1511         while (mask & hlt_cpus_mask)
 1512                 __asm __volatile("sti; hlt" : : : "memory");
 1513         return (retval);
 1514 }
 1515 
 1516 #ifdef COUNT_IPIS
 1517 /*
 1518  * Setup interrupt counters for IPI handlers.
 1519  */
 1520 static void
 1521 mp_ipi_intrcnt(void *dummy)
 1522 {
 1523         char buf[64];
 1524         int i;
 1525 
 1526         for (i = 0; i < mp_maxid; i++) {
 1527                 if (CPU_ABSENT(i))
 1528                         continue;
 1529                 snprintf(buf, sizeof(buf), "cpu%d: invltlb", i);
 1530                 intrcnt_add(buf, &ipi_invltlb_counts[i]);
 1531                 snprintf(buf, sizeof(buf), "cpu%d: invlrng", i);
 1532                 intrcnt_add(buf, &ipi_invlrng_counts[i]);
 1533                 snprintf(buf, sizeof(buf), "cpu%d: invlpg", i);
 1534                 intrcnt_add(buf, &ipi_invlpg_counts[i]);
 1535 #ifdef IPI_PREEMPTION
 1536                 snprintf(buf, sizeof(buf), "cpu%d: preempt", i);
 1537                 intrcnt_add(buf, &ipi_preempt_counts[i]);
 1538 #endif
 1539                 snprintf(buf, sizeof(buf), "cpu%d: ast", i);
 1540                 intrcnt_add(buf, &ipi_ast_counts[i]);
 1541                 snprintf(buf, sizeof(buf), "cpu%d: rendezvous", i);
 1542                 intrcnt_add(buf, &ipi_rendezvous_counts[i]);
 1543                 snprintf(buf, sizeof(buf), "cpu%d: lazypmap", i);
 1544                 intrcnt_add(buf, &ipi_lazypmap_counts[i]);
 1545         }               
 1546 }
 1547 SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL)
 1548 #endif

Cache object: 0f6ad87f25848d791a644db9a286e0be


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.