1 /*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/8.0/sys/amd64/amd64/mp_machdep.c 196198 2009-08-13 17:54:11Z attilio $");
29
30 #include "opt_cpu.h"
31 #include "opt_kstack_pages.h"
32 #include "opt_mp_watchdog.h"
33 #include "opt_sched.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #ifdef GPROF
39 #include <sys/gmon.h>
40 #endif
41 #include <sys/kernel.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/memrange.h>
46 #include <sys/mutex.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/sched.h>
50 #include <sys/smp.h>
51 #include <sys/sysctl.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_extern.h>
58
59 #include <machine/apicreg.h>
60 #include <machine/clock.h>
61 #include <machine/cputypes.h>
62 #include <machine/cpufunc.h>
63 #include <machine/mca.h>
64 #include <machine/md_var.h>
65 #include <machine/mp_watchdog.h>
66 #include <machine/pcb.h>
67 #include <machine/psl.h>
68 #include <machine/smp.h>
69 #include <machine/specialreg.h>
70 #include <machine/tss.h>
71
72 #define WARMBOOT_TARGET 0
73 #define WARMBOOT_OFF (KERNBASE + 0x0467)
74 #define WARMBOOT_SEG (KERNBASE + 0x0469)
75
76 #define CMOS_REG (0x70)
77 #define CMOS_DATA (0x71)
78 #define BIOS_RESET (0x0f)
79 #define BIOS_WARM (0x0a)
80
81 /* lock region used by kernel profiling */
82 int mcount_lock;
83
84 int mp_naps; /* # of Applications processors */
85 int boot_cpu_id = -1; /* designated BSP */
86
87 extern struct pcpu __pcpu[];
88
89 /* AP uses this during bootstrap. Do not staticize. */
90 char *bootSTK;
91 static int bootAP;
92
93 /* Free these after use */
94 void *bootstacks[MAXCPU];
95
96 /* Temporary variables for init_secondary() */
97 char *doublefault_stack;
98 char *nmi_stack;
99 void *dpcpu;
100
101 /* Hotwire a 0->4MB V==P mapping */
102 extern pt_entry_t *KPTphys;
103
104 /* SMP page table page */
105 extern pt_entry_t *SMPpt;
106
107 struct pcb stoppcbs[MAXCPU];
108 struct xpcb *stopxpcbs = NULL;
109
110 /* Variables needed for SMP tlb shootdown. */
111 vm_offset_t smp_tlb_addr1;
112 vm_offset_t smp_tlb_addr2;
113 volatile int smp_tlb_wait;
114
115 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
116
117 /*
118 * Local data and functions.
119 */
120
121 static u_int logical_cpus;
122 static volatile cpumask_t ipi_nmi_pending;
123
124 /* used to hold the AP's until we are ready to release them */
125 static struct mtx ap_boot_mtx;
126
127 /* Set to 1 once we're ready to let the APs out of the pen. */
128 static volatile int aps_ready = 0;
129
130 /*
131 * Store data from cpu_add() until later in the boot when we actually setup
132 * the APs.
133 */
134 struct cpu_info {
135 int cpu_present:1;
136 int cpu_bsp:1;
137 int cpu_disabled:1;
138 int cpu_hyperthread:1;
139 } static cpu_info[MAX_APIC_ID + 1];
140 int cpu_apic_ids[MAXCPU];
141 int apic_cpuids[MAX_APIC_ID + 1];
142
143 /* Holds pending bitmap based IPIs per CPU */
144 static volatile u_int cpu_ipi_pending[MAXCPU];
145
146 static u_int boot_address;
147 static int cpu_logical;
148 static int cpu_cores;
149
150 static void assign_cpu_ids(void);
151 static void set_interrupt_apic_ids(void);
152 static int start_all_aps(void);
153 static int start_ap(int apic_id);
154 static void release_aps(void *dummy);
155
156 static int hlt_logical_cpus;
157 static u_int hyperthreading_cpus;
158 static cpumask_t hyperthreading_cpus_mask;
159 static int hyperthreading_allowed = 1;
160 static struct sysctl_ctx_list logical_cpu_clist;
161 static u_int bootMP_size;
162
163 static void
164 mem_range_AP_init(void)
165 {
166 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
167 mem_range_softc.mr_op->initAP(&mem_range_softc);
168 }
169
170 static void
171 topo_probe_0xb(void)
172 {
173 int logical;
174 int p[4];
175 int bits;
176 int type;
177 int cnt;
178 int i;
179 int x;
180
181 /* We only support two levels for now. */
182 for (i = 0; i < 3; i++) {
183 cpuid_count(0x0B, i, p);
184 bits = p[0] & 0x1f;
185 logical = p[1] &= 0xffff;
186 type = (p[2] >> 8) & 0xff;
187 if (type == 0 || logical == 0)
188 break;
189 for (cnt = 0, x = 0; x <= MAX_APIC_ID; x++) {
190 if (!cpu_info[x].cpu_present ||
191 cpu_info[x].cpu_disabled)
192 continue;
193 if (x >> bits == boot_cpu_id >> bits)
194 cnt++;
195 }
196 if (type == CPUID_TYPE_SMT)
197 cpu_logical = cnt;
198 else if (type == CPUID_TYPE_CORE)
199 cpu_cores = cnt;
200 }
201 if (cpu_logical == 0)
202 cpu_logical = 1;
203 cpu_cores /= cpu_logical;
204 }
205
206 static void
207 topo_probe_0x4(void)
208 {
209 u_int threads_per_cache, p[4];
210 u_int htt, cmp;
211 int i;
212
213 htt = cmp = 1;
214 /*
215 * If this CPU supports HTT or CMP then mention the
216 * number of physical/logical cores it contains.
217 */
218 if (cpu_feature & CPUID_HTT)
219 htt = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
220 if (cpu_vendor_id == CPU_VENDOR_AMD && (amd_feature2 & AMDID2_CMP))
221 cmp = (cpu_procinfo2 & AMDID_CMP_CORES) + 1;
222 else if (cpu_vendor_id == CPU_VENDOR_INTEL && (cpu_high >= 4)) {
223 cpuid_count(4, 0, p);
224 if ((p[0] & 0x1f) != 0)
225 cmp = ((p[0] >> 26) & 0x3f) + 1;
226 }
227 cpu_cores = cmp;
228 cpu_logical = htt / cmp;
229
230 /* Setup the initial logical CPUs info. */
231 if (cpu_feature & CPUID_HTT)
232 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
233
234 /*
235 * Work out if hyperthreading is *really* enabled. This
236 * is made really ugly by the fact that processors lie: Dual
237 * core processors claim to be hyperthreaded even when they're
238 * not, presumably because they want to be treated the same
239 * way as HTT with respect to per-cpu software licensing.
240 * At the time of writing (May 12, 2005) the only hyperthreaded
241 * cpus are from Intel, and Intel's dual-core processors can be
242 * identified via the "deterministic cache parameters" cpuid
243 * calls.
244 */
245 /*
246 * First determine if this is an Intel processor which claims
247 * to have hyperthreading support.
248 */
249 if ((cpu_feature & CPUID_HTT) && cpu_vendor_id == CPU_VENDOR_INTEL) {
250 /*
251 * If the "deterministic cache parameters" cpuid calls
252 * are available, use them.
253 */
254 if (cpu_high >= 4) {
255 /* Ask the processor about the L1 cache. */
256 for (i = 0; i < 1; i++) {
257 cpuid_count(4, i, p);
258 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
259 if (hyperthreading_cpus < threads_per_cache)
260 hyperthreading_cpus = threads_per_cache;
261 if ((p[0] & 0x1f) == 0)
262 break;
263 }
264 }
265
266 /*
267 * If the deterministic cache parameters are not
268 * available, or if no caches were reported to exist,
269 * just accept what the HTT flag indicated.
270 */
271 if (hyperthreading_cpus == 0)
272 hyperthreading_cpus = logical_cpus;
273 }
274 }
275
276 static void
277 topo_probe(void)
278 {
279 static int cpu_topo_probed = 0;
280
281 if (cpu_topo_probed)
282 return;
283
284 logical_cpus = logical_cpus_mask = 0;
285 if (cpu_high >= 0xb)
286 topo_probe_0xb();
287 else if (cpu_high)
288 topo_probe_0x4();
289 if (cpu_cores == 0)
290 cpu_cores = mp_ncpus > 0 ? mp_ncpus : 1;
291 if (cpu_logical == 0)
292 cpu_logical = 1;
293 cpu_topo_probed = 1;
294 }
295
296 struct cpu_group *
297 cpu_topo(void)
298 {
299 int cg_flags;
300
301 /*
302 * Determine whether any threading flags are
303 * necessry.
304 */
305 topo_probe();
306 if (cpu_logical > 1 && hyperthreading_cpus)
307 cg_flags = CG_FLAG_HTT;
308 else if (cpu_logical > 1)
309 cg_flags = CG_FLAG_SMT;
310 else
311 cg_flags = 0;
312 if (mp_ncpus % (cpu_cores * cpu_logical) != 0) {
313 printf("WARNING: Non-uniform processors.\n");
314 printf("WARNING: Using suboptimal topology.\n");
315 return (smp_topo_none());
316 }
317 /*
318 * No multi-core or hyper-threaded.
319 */
320 if (cpu_logical * cpu_cores == 1)
321 return (smp_topo_none());
322 /*
323 * Only HTT no multi-core.
324 */
325 if (cpu_logical > 1 && cpu_cores == 1)
326 return (smp_topo_1level(CG_SHARE_L1, cpu_logical, cg_flags));
327 /*
328 * Only multi-core no HTT.
329 */
330 if (cpu_cores > 1 && cpu_logical == 1)
331 return (smp_topo_1level(CG_SHARE_L2, cpu_cores, cg_flags));
332 /*
333 * Both HTT and multi-core.
334 */
335 return (smp_topo_2level(CG_SHARE_L2, cpu_cores,
336 CG_SHARE_L1, cpu_logical, cg_flags));
337 }
338
339 /*
340 * Calculate usable address in base memory for AP trampoline code.
341 */
342 u_int
343 mp_bootaddress(u_int basemem)
344 {
345
346 bootMP_size = mptramp_end - mptramp_start;
347 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
348 if (((basemem * 1024) - boot_address) < bootMP_size)
349 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
350 /* 3 levels of page table pages */
351 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
352
353 return mptramp_pagetables;
354 }
355
356 void
357 cpu_add(u_int apic_id, char boot_cpu)
358 {
359
360 if (apic_id > MAX_APIC_ID) {
361 panic("SMP: APIC ID %d too high", apic_id);
362 return;
363 }
364 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
365 apic_id));
366 cpu_info[apic_id].cpu_present = 1;
367 if (boot_cpu) {
368 KASSERT(boot_cpu_id == -1,
369 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
370 boot_cpu_id));
371 boot_cpu_id = apic_id;
372 cpu_info[apic_id].cpu_bsp = 1;
373 }
374 if (mp_ncpus < MAXCPU) {
375 mp_ncpus++;
376 mp_maxid = mp_ncpus -1;
377 }
378 if (bootverbose)
379 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
380 "AP");
381 }
382
383 void
384 cpu_mp_setmaxid(void)
385 {
386
387 /*
388 * mp_maxid should be already set by calls to cpu_add().
389 * Just sanity check its value here.
390 */
391 if (mp_ncpus == 0)
392 KASSERT(mp_maxid == 0,
393 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
394 else if (mp_ncpus == 1)
395 mp_maxid = 0;
396 else
397 KASSERT(mp_maxid >= mp_ncpus - 1,
398 ("%s: counters out of sync: max %d, count %d", __func__,
399 mp_maxid, mp_ncpus));
400 }
401
402 int
403 cpu_mp_probe(void)
404 {
405
406 /*
407 * Always record BSP in CPU map so that the mbuf init code works
408 * correctly.
409 */
410 all_cpus = 1;
411 if (mp_ncpus == 0) {
412 /*
413 * No CPUs were found, so this must be a UP system. Setup
414 * the variables to represent a system with a single CPU
415 * with an id of 0.
416 */
417 mp_ncpus = 1;
418 return (0);
419 }
420
421 /* At least one CPU was found. */
422 if (mp_ncpus == 1) {
423 /*
424 * One CPU was found, so this must be a UP system with
425 * an I/O APIC.
426 */
427 mp_maxid = 0;
428 return (0);
429 }
430
431 /* At least two CPUs were found. */
432 return (1);
433 }
434
435 /*
436 * Initialize the IPI handlers and start up the AP's.
437 */
438 void
439 cpu_mp_start(void)
440 {
441 int i;
442
443 /* Initialize the logical ID to APIC ID table. */
444 for (i = 0; i < MAXCPU; i++) {
445 cpu_apic_ids[i] = -1;
446 cpu_ipi_pending[i] = 0;
447 }
448
449 /* Install an inter-CPU IPI for TLB invalidation */
450 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
451 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
452 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
453
454 /* Install an inter-CPU IPI for cache invalidation. */
455 setidt(IPI_INVLCACHE, IDTVEC(invlcache), SDT_SYSIGT, SEL_KPL, 0);
456
457 /* Install an inter-CPU IPI for all-CPU rendezvous */
458 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
459
460 /* Install generic inter-CPU IPI handler */
461 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
462 SDT_SYSIGT, SEL_KPL, 0);
463
464 /* Install an inter-CPU IPI for CPU stop/restart */
465 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
466
467 /* Install an inter-CPU IPI for CPU suspend/resume */
468 setidt(IPI_SUSPEND, IDTVEC(cpususpend), SDT_SYSIGT, SEL_KPL, 0);
469
470 /* Set boot_cpu_id if needed. */
471 if (boot_cpu_id == -1) {
472 boot_cpu_id = PCPU_GET(apic_id);
473 cpu_info[boot_cpu_id].cpu_bsp = 1;
474 } else
475 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
476 ("BSP's APIC ID doesn't match boot_cpu_id"));
477
478 /* Probe logical/physical core configuration. */
479 topo_probe();
480
481 assign_cpu_ids();
482
483 /* Start each Application Processor */
484 start_all_aps();
485
486 set_interrupt_apic_ids();
487 }
488
489
490 /*
491 * Print various information about the SMP system hardware and setup.
492 */
493 void
494 cpu_mp_announce(void)
495 {
496 const char *hyperthread;
497 int i;
498
499 printf("FreeBSD/SMP: %d package(s) x %d core(s)",
500 mp_ncpus / (cpu_cores * cpu_logical), cpu_cores);
501 if (hyperthreading_cpus > 1)
502 printf(" x %d HTT threads", cpu_logical);
503 else if (cpu_logical > 1)
504 printf(" x %d SMT threads", cpu_logical);
505 printf("\n");
506
507 /* List active CPUs first. */
508 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
509 for (i = 1; i < mp_ncpus; i++) {
510 if (cpu_info[cpu_apic_ids[i]].cpu_hyperthread)
511 hyperthread = "/HT";
512 else
513 hyperthread = "";
514 printf(" cpu%d (AP%s): APIC ID: %2d\n", i, hyperthread,
515 cpu_apic_ids[i]);
516 }
517
518 /* List disabled CPUs last. */
519 for (i = 0; i <= MAX_APIC_ID; i++) {
520 if (!cpu_info[i].cpu_present || !cpu_info[i].cpu_disabled)
521 continue;
522 if (cpu_info[i].cpu_hyperthread)
523 hyperthread = "/HT";
524 else
525 hyperthread = "";
526 printf(" cpu (AP%s): APIC ID: %2d (disabled)\n", hyperthread,
527 i);
528 }
529 }
530
531 /*
532 * AP CPU's call this to initialize themselves.
533 */
534 void
535 init_secondary(void)
536 {
537 struct pcpu *pc;
538 struct nmi_pcpu *np;
539 u_int64_t msr, cr0;
540 int cpu, gsel_tss, x;
541 struct region_descriptor ap_gdt;
542
543 /* Set by the startup code for us to use */
544 cpu = bootAP;
545
546 /* Init tss */
547 common_tss[cpu] = common_tss[0];
548 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */
549 common_tss[cpu].tss_iobase = sizeof(struct amd64tss) +
550 IOPAGES * PAGE_SIZE;
551 common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
552
553 /* The NMI stack runs on IST2. */
554 np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
555 common_tss[cpu].tss_ist2 = (long) np;
556
557 /* Prepare private GDT */
558 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
559 for (x = 0; x < NGDT; x++) {
560 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
561 x != GUSERLDT_SEL && x != (GUSERLDT_SEL + 1))
562 ssdtosd(&gdt_segs[x], &gdt[NGDT * cpu + x]);
563 }
564 ssdtosyssd(&gdt_segs[GPROC0_SEL],
565 (struct system_segment_descriptor *)&gdt[NGDT * cpu + GPROC0_SEL]);
566 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
567 ap_gdt.rd_base = (long) &gdt[NGDT * cpu];
568 lgdt(&ap_gdt); /* does magic intra-segment return */
569
570 /* Get per-cpu data */
571 pc = &__pcpu[cpu];
572
573 /* prime data page for it to use */
574 pcpu_init(pc, cpu, sizeof(struct pcpu));
575 dpcpu_init(dpcpu, cpu);
576 pc->pc_apic_id = cpu_apic_ids[cpu];
577 pc->pc_prvspace = pc;
578 pc->pc_curthread = 0;
579 pc->pc_tssp = &common_tss[cpu];
580 pc->pc_commontssp = &common_tss[cpu];
581 pc->pc_rsp0 = 0;
582 pc->pc_tss = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
583 GPROC0_SEL];
584 pc->pc_fs32p = &gdt[NGDT * cpu + GUFS32_SEL];
585 pc->pc_gs32p = &gdt[NGDT * cpu + GUGS32_SEL];
586 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
587 GUSERLDT_SEL];
588
589 /* Save the per-cpu pointer for use by the NMI handler. */
590 np->np_pcpu = (register_t) pc;
591
592 wrmsr(MSR_FSBASE, 0); /* User value */
593 wrmsr(MSR_GSBASE, (u_int64_t)pc);
594 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
595
596 lidt(&r_idt);
597
598 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
599 ltr(gsel_tss);
600
601 /*
602 * Set to a known state:
603 * Set by mpboot.s: CR0_PG, CR0_PE
604 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
605 */
606 cr0 = rcr0();
607 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
608 load_cr0(cr0);
609
610 /* Set up the fast syscall stuff */
611 msr = rdmsr(MSR_EFER) | EFER_SCE;
612 wrmsr(MSR_EFER, msr);
613 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
614 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
615 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
616 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
617 wrmsr(MSR_STAR, msr);
618 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
619
620 /* Disable local APIC just to be sure. */
621 lapic_disable();
622
623 /* signal our startup to the BSP. */
624 mp_naps++;
625
626 /* Spin until the BSP releases the AP's. */
627 while (!aps_ready)
628 ia32_pause();
629
630 /* Initialize the PAT MSR. */
631 pmap_init_pat();
632
633 /* set up CPU registers and state */
634 cpu_setregs();
635
636 /* set up SSE/NX registers */
637 initializecpu();
638
639 /* set up FPU state on the AP */
640 fpuinit();
641
642 /* A quick check from sanity claus */
643 if (PCPU_GET(apic_id) != lapic_id()) {
644 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
645 printf("SMP: actual apic_id = %d\n", lapic_id());
646 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
647 panic("cpuid mismatch! boom!!");
648 }
649
650 /* Initialize curthread. */
651 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
652 PCPU_SET(curthread, PCPU_GET(idlethread));
653
654 mca_init();
655
656 mtx_lock_spin(&ap_boot_mtx);
657
658 /* Init local apic for irq's */
659 lapic_setup(1);
660
661 /* Set memory range attributes for this CPU to match the BSP */
662 mem_range_AP_init();
663
664 smp_cpus++;
665
666 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
667 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
668
669 /* Determine if we are a logical CPU. */
670 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
671 logical_cpus_mask |= PCPU_GET(cpumask);
672
673 /* Determine if we are a hyperthread. */
674 if (hyperthreading_cpus > 1 &&
675 PCPU_GET(apic_id) % hyperthreading_cpus != 0)
676 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
677
678 /* Build our map of 'other' CPUs. */
679 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
680
681 if (bootverbose)
682 lapic_dump("AP");
683
684 if (smp_cpus == mp_ncpus) {
685 /* enable IPI's, tlb shootdown, freezes etc */
686 atomic_store_rel_int(&smp_started, 1);
687 smp_active = 1; /* historic */
688 }
689
690 /*
691 * Enable global pages TLB extension
692 * This also implicitly flushes the TLB
693 */
694
695 load_cr4(rcr4() | CR4_PGE);
696 load_ds(_udatasel);
697 load_es(_udatasel);
698 load_fs(_ufssel);
699 mtx_unlock_spin(&ap_boot_mtx);
700
701 /* wait until all the AP's are up */
702 while (smp_started == 0)
703 ia32_pause();
704
705 sched_throw(NULL);
706
707 panic("scheduler returned us to %s", __func__);
708 /* NOTREACHED */
709 }
710
711 /*******************************************************************
712 * local functions and data
713 */
714
715 /*
716 * We tell the I/O APIC code about all the CPUs we want to receive
717 * interrupts. If we don't want certain CPUs to receive IRQs we
718 * can simply not tell the I/O APIC code about them in this function.
719 * We also do not tell it about the BSP since it tells itself about
720 * the BSP internally to work with UP kernels and on UP machines.
721 */
722 static void
723 set_interrupt_apic_ids(void)
724 {
725 u_int i, apic_id;
726
727 for (i = 0; i < MAXCPU; i++) {
728 apic_id = cpu_apic_ids[i];
729 if (apic_id == -1)
730 continue;
731 if (cpu_info[apic_id].cpu_bsp)
732 continue;
733 if (cpu_info[apic_id].cpu_disabled)
734 continue;
735
736 /* Don't let hyperthreads service interrupts. */
737 if (hyperthreading_cpus > 1 &&
738 apic_id % hyperthreading_cpus != 0)
739 continue;
740
741 intr_add_cpu(i);
742 }
743 }
744
745 /*
746 * Assign logical CPU IDs to local APICs.
747 */
748 static void
749 assign_cpu_ids(void)
750 {
751 u_int i;
752
753 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
754 &hyperthreading_allowed);
755
756 /* Check for explicitly disabled CPUs. */
757 for (i = 0; i <= MAX_APIC_ID; i++) {
758 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
759 continue;
760
761 if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
762 cpu_info[i].cpu_hyperthread = 1;
763 #if defined(SCHED_ULE)
764 /*
765 * Don't use HT CPU if it has been disabled by a
766 * tunable.
767 */
768 if (hyperthreading_allowed == 0) {
769 cpu_info[i].cpu_disabled = 1;
770 continue;
771 }
772 #endif
773 }
774
775 /* Don't use this CPU if it has been disabled by a tunable. */
776 if (resource_disabled("lapic", i)) {
777 cpu_info[i].cpu_disabled = 1;
778 continue;
779 }
780 }
781
782 /*
783 * Assign CPU IDs to local APIC IDs and disable any CPUs
784 * beyond MAXCPU. CPU 0 is always assigned to the BSP.
785 *
786 * To minimize confusion for userland, we attempt to number
787 * CPUs such that all threads and cores in a package are
788 * grouped together. For now we assume that the BSP is always
789 * the first thread in a package and just start adding APs
790 * starting with the BSP's APIC ID.
791 */
792 mp_ncpus = 1;
793 cpu_apic_ids[0] = boot_cpu_id;
794 apic_cpuids[boot_cpu_id] = 0;
795 for (i = boot_cpu_id + 1; i != boot_cpu_id;
796 i == MAX_APIC_ID ? i = 0 : i++) {
797 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
798 cpu_info[i].cpu_disabled)
799 continue;
800
801 if (mp_ncpus < MAXCPU) {
802 cpu_apic_ids[mp_ncpus] = i;
803 apic_cpuids[i] = mp_ncpus;
804 mp_ncpus++;
805 } else
806 cpu_info[i].cpu_disabled = 1;
807 }
808 KASSERT(mp_maxid >= mp_ncpus - 1,
809 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
810 mp_ncpus));
811 }
812
813 /*
814 * start each AP in our list
815 */
816 static int
817 start_all_aps(void)
818 {
819 vm_offset_t va = boot_address + KERNBASE;
820 u_int64_t *pt4, *pt3, *pt2;
821 u_int32_t mpbioswarmvec;
822 int apic_id, cpu, i;
823 u_char mpbiosreason;
824
825 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
826
827 /* install the AP 1st level boot code */
828 pmap_kenter(va, boot_address);
829 pmap_invalidate_page(kernel_pmap, va);
830 bcopy(mptramp_start, (void *)va, bootMP_size);
831
832 /* Locate the page tables, they'll be below the trampoline */
833 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
834 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
835 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
836
837 /* Create the initial 1GB replicated page tables */
838 for (i = 0; i < 512; i++) {
839 /* Each slot of the level 4 pages points to the same level 3 page */
840 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
841 pt4[i] |= PG_V | PG_RW | PG_U;
842
843 /* Each slot of the level 3 pages points to the same level 2 page */
844 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
845 pt3[i] |= PG_V | PG_RW | PG_U;
846
847 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
848 pt2[i] = i * (2 * 1024 * 1024);
849 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
850 }
851
852 /* save the current value of the warm-start vector */
853 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
854 outb(CMOS_REG, BIOS_RESET);
855 mpbiosreason = inb(CMOS_DATA);
856
857 /* setup a vector to our boot code */
858 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
859 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
860 outb(CMOS_REG, BIOS_RESET);
861 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
862
863 /* start each AP */
864 for (cpu = 1; cpu < mp_ncpus; cpu++) {
865 apic_id = cpu_apic_ids[cpu];
866
867 /* allocate and set up an idle stack data page */
868 bootstacks[cpu] = (void *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
869 doublefault_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
870 nmi_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
871 dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
872
873 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
874 bootAP = cpu;
875
876 /* attempt to start the Application Processor */
877 if (!start_ap(apic_id)) {
878 /* restore the warmstart vector */
879 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
880 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
881 }
882
883 all_cpus |= (1 << cpu); /* record AP in CPU map */
884 }
885
886 /* build our map of 'other' CPUs */
887 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
888
889 /* restore the warmstart vector */
890 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
891
892 outb(CMOS_REG, BIOS_RESET);
893 outb(CMOS_DATA, mpbiosreason);
894
895 /* number of APs actually started */
896 return mp_naps;
897 }
898
899
900 /*
901 * This function starts the AP (application processor) identified
902 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
903 * to accomplish this. This is necessary because of the nuances
904 * of the different hardware we might encounter. It isn't pretty,
905 * but it seems to work.
906 */
907 static int
908 start_ap(int apic_id)
909 {
910 int vector, ms;
911 int cpus;
912
913 /* calculate the vector */
914 vector = (boot_address >> 12) & 0xff;
915
916 /* used as a watchpoint to signal AP startup */
917 cpus = mp_naps;
918
919 /*
920 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
921 * and running the target CPU. OR this INIT IPI might be latched (P5
922 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
923 * ignored.
924 */
925
926 /* do an INIT IPI: assert RESET */
927 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
928 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
929
930 /* wait for pending status end */
931 lapic_ipi_wait(-1);
932
933 /* do an INIT IPI: deassert RESET */
934 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
935 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
936
937 /* wait for pending status end */
938 DELAY(10000); /* wait ~10mS */
939 lapic_ipi_wait(-1);
940
941 /*
942 * next we do a STARTUP IPI: the previous INIT IPI might still be
943 * latched, (P5 bug) this 1st STARTUP would then terminate
944 * immediately, and the previously started INIT IPI would continue. OR
945 * the previous INIT IPI has already run. and this STARTUP IPI will
946 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
947 * will run.
948 */
949
950 /* do a STARTUP IPI */
951 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
952 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
953 vector, apic_id);
954 lapic_ipi_wait(-1);
955 DELAY(200); /* wait ~200uS */
956
957 /*
958 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
959 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
960 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
961 * recognized after hardware RESET or INIT IPI.
962 */
963
964 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
965 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
966 vector, apic_id);
967 lapic_ipi_wait(-1);
968 DELAY(200); /* wait ~200uS */
969
970 /* Wait up to 5 seconds for it to start. */
971 for (ms = 0; ms < 5000; ms++) {
972 if (mp_naps > cpus)
973 return 1; /* return SUCCESS */
974 DELAY(1000);
975 }
976 return 0; /* return FAILURE */
977 }
978
979 /*
980 * Flush the TLB on all other CPU's
981 */
982 static void
983 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
984 {
985 u_int ncpu;
986
987 ncpu = mp_ncpus - 1; /* does not shootdown self */
988 if (ncpu < 1)
989 return; /* no other cpus */
990 if (!(read_rflags() & PSL_I))
991 panic("%s: interrupts disabled", __func__);
992 mtx_lock_spin(&smp_ipi_mtx);
993 smp_tlb_addr1 = addr1;
994 smp_tlb_addr2 = addr2;
995 atomic_store_rel_int(&smp_tlb_wait, 0);
996 ipi_all_but_self(vector);
997 while (smp_tlb_wait < ncpu)
998 ia32_pause();
999 mtx_unlock_spin(&smp_ipi_mtx);
1000 }
1001
1002 static void
1003 smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1004 {
1005 int ncpu, othercpus;
1006
1007 othercpus = mp_ncpus - 1;
1008 if (mask == (u_int)-1) {
1009 ncpu = othercpus;
1010 if (ncpu < 1)
1011 return;
1012 } else {
1013 mask &= ~PCPU_GET(cpumask);
1014 if (mask == 0)
1015 return;
1016 ncpu = bitcount32(mask);
1017 if (ncpu > othercpus) {
1018 /* XXX this should be a panic offence */
1019 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
1020 ncpu, othercpus);
1021 ncpu = othercpus;
1022 }
1023 /* XXX should be a panic, implied by mask == 0 above */
1024 if (ncpu < 1)
1025 return;
1026 }
1027 if (!(read_rflags() & PSL_I))
1028 panic("%s: interrupts disabled", __func__);
1029 mtx_lock_spin(&smp_ipi_mtx);
1030 smp_tlb_addr1 = addr1;
1031 smp_tlb_addr2 = addr2;
1032 atomic_store_rel_int(&smp_tlb_wait, 0);
1033 if (mask == (u_int)-1)
1034 ipi_all_but_self(vector);
1035 else
1036 ipi_selected(mask, vector);
1037 while (smp_tlb_wait < ncpu)
1038 ia32_pause();
1039 mtx_unlock_spin(&smp_ipi_mtx);
1040 }
1041
1042 void
1043 smp_cache_flush(void)
1044 {
1045
1046 if (smp_started)
1047 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
1048 }
1049
1050 void
1051 smp_invltlb(void)
1052 {
1053
1054 if (smp_started) {
1055 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
1056 }
1057 }
1058
1059 void
1060 smp_invlpg(vm_offset_t addr)
1061 {
1062
1063 if (smp_started)
1064 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
1065 }
1066
1067 void
1068 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
1069 {
1070
1071 if (smp_started) {
1072 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
1073 }
1074 }
1075
1076 void
1077 smp_masked_invltlb(cpumask_t mask)
1078 {
1079
1080 if (smp_started) {
1081 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
1082 }
1083 }
1084
1085 void
1086 smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
1087 {
1088
1089 if (smp_started) {
1090 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
1091 }
1092 }
1093
1094 void
1095 smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
1096 {
1097
1098 if (smp_started) {
1099 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
1100 }
1101 }
1102
1103 void
1104 ipi_bitmap_handler(struct trapframe frame)
1105 {
1106 int cpu = PCPU_GET(cpuid);
1107 u_int ipi_bitmap;
1108
1109 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1110
1111 if (ipi_bitmap & (1 << IPI_PREEMPT))
1112 sched_preempt(curthread);
1113
1114 /* Nothing to do for AST */
1115
1116 if (ipi_bitmap & (1 << IPI_HARDCLOCK))
1117 hardclockintr(&frame);
1118
1119 if (ipi_bitmap & (1 << IPI_STATCLOCK))
1120 statclockintr(&frame);
1121
1122 if (ipi_bitmap & (1 << IPI_PROFCLOCK))
1123 profclockintr(&frame);
1124 }
1125
1126 /*
1127 * send an IPI to a set of cpus.
1128 */
1129 void
1130 ipi_selected(cpumask_t cpus, u_int ipi)
1131 {
1132 int cpu;
1133 u_int bitmap = 0;
1134 u_int old_pending;
1135 u_int new_pending;
1136
1137 if (IPI_IS_BITMAPED(ipi)) {
1138 bitmap = 1 << ipi;
1139 ipi = IPI_BITMAP_VECTOR;
1140 }
1141
1142 /*
1143 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1144 * of help in order to understand what is the source.
1145 * Set the mask of receiving CPUs for this purpose.
1146 */
1147 if (ipi == IPI_STOP_HARD)
1148 atomic_set_int(&ipi_nmi_pending, cpus);
1149
1150 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
1151 while ((cpu = ffs(cpus)) != 0) {
1152 cpu--;
1153 cpus &= ~(1 << cpu);
1154
1155 KASSERT(cpu_apic_ids[cpu] != -1,
1156 ("IPI to non-existent CPU %d", cpu));
1157
1158 if (bitmap) {
1159 do {
1160 old_pending = cpu_ipi_pending[cpu];
1161 new_pending = old_pending | bitmap;
1162 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
1163
1164 if (old_pending)
1165 continue;
1166 }
1167
1168 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1169 }
1170
1171 }
1172
1173 /*
1174 * send an IPI to all CPUs EXCEPT myself
1175 */
1176 void
1177 ipi_all_but_self(u_int ipi)
1178 {
1179
1180 if (IPI_IS_BITMAPED(ipi)) {
1181 ipi_selected(PCPU_GET(other_cpus), ipi);
1182 return;
1183 }
1184
1185 /*
1186 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1187 * of help in order to understand what is the source.
1188 * Set the mask of receiving CPUs for this purpose.
1189 */
1190 if (ipi == IPI_STOP_HARD)
1191 atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
1192
1193 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1194 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1195 }
1196
1197 int
1198 ipi_nmi_handler()
1199 {
1200 cpumask_t cpumask;
1201
1202 /*
1203 * As long as there is not a simple way to know about a NMI's
1204 * source, if the bitmask for the current CPU is present in
1205 * the global pending bitword an IPI_STOP_HARD has been issued
1206 * and should be handled.
1207 */
1208 cpumask = PCPU_GET(cpumask);
1209 if ((ipi_nmi_pending & cpumask) == 0)
1210 return (1);
1211
1212 atomic_clear_int(&ipi_nmi_pending, cpumask);
1213 cpustop_handler();
1214 return (0);
1215 }
1216
1217 /*
1218 * Handle an IPI_STOP by saving our current context and spinning until we
1219 * are resumed.
1220 */
1221 void
1222 cpustop_handler(void)
1223 {
1224 int cpu = PCPU_GET(cpuid);
1225 int cpumask = PCPU_GET(cpumask);
1226
1227 savectx(&stoppcbs[cpu]);
1228
1229 /* Indicate that we are stopped */
1230 atomic_set_int(&stopped_cpus, cpumask);
1231
1232 /* Wait for restart */
1233 while (!(started_cpus & cpumask))
1234 ia32_pause();
1235
1236 atomic_clear_int(&started_cpus, cpumask);
1237 atomic_clear_int(&stopped_cpus, cpumask);
1238
1239 if (cpu == 0 && cpustop_restartfunc != NULL) {
1240 cpustop_restartfunc();
1241 cpustop_restartfunc = NULL;
1242 }
1243 }
1244
1245 /*
1246 * Handle an IPI_SUSPEND by saving our current context and spinning until we
1247 * are resumed.
1248 */
1249 void
1250 cpususpend_handler(void)
1251 {
1252 struct savefpu *stopfpu;
1253 register_t cr3, rf;
1254 int cpu = PCPU_GET(cpuid);
1255 int cpumask = PCPU_GET(cpumask);
1256
1257 rf = intr_disable();
1258 cr3 = rcr3();
1259 stopfpu = &stopxpcbs[cpu].xpcb_pcb.pcb_save;
1260 if (savectx2(&stopxpcbs[cpu])) {
1261 fpugetregs(curthread, stopfpu);
1262 wbinvd();
1263 atomic_set_int(&stopped_cpus, cpumask);
1264 } else
1265 fpusetregs(curthread, stopfpu);
1266
1267 /* Wait for resume */
1268 while (!(started_cpus & cpumask))
1269 ia32_pause();
1270
1271 atomic_clear_int(&started_cpus, cpumask);
1272 atomic_clear_int(&stopped_cpus, cpumask);
1273
1274 /* Restore CR3 and enable interrupts */
1275 load_cr3(cr3);
1276 lapic_setup(0);
1277 intr_restore(rf);
1278 }
1279
1280 /*
1281 * This is called once the rest of the system is up and running and we're
1282 * ready to let the AP's out of the pen.
1283 */
1284 static void
1285 release_aps(void *dummy __unused)
1286 {
1287
1288 if (mp_ncpus == 1)
1289 return;
1290 atomic_store_rel_int(&aps_ready, 1);
1291 while (smp_started == 0)
1292 ia32_pause();
1293 }
1294 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1295
1296 static int
1297 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1298 {
1299 cpumask_t mask;
1300 int error;
1301
1302 mask = hlt_cpus_mask;
1303 error = sysctl_handle_int(oidp, &mask, 0, req);
1304 if (error || !req->newptr)
1305 return (error);
1306
1307 if (logical_cpus_mask != 0 &&
1308 (mask & logical_cpus_mask) == logical_cpus_mask)
1309 hlt_logical_cpus = 1;
1310 else
1311 hlt_logical_cpus = 0;
1312
1313 if (! hyperthreading_allowed)
1314 mask |= hyperthreading_cpus_mask;
1315
1316 if ((mask & all_cpus) == all_cpus)
1317 mask &= ~(1<<0);
1318 hlt_cpus_mask = mask;
1319 return (error);
1320 }
1321 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1322 0, 0, sysctl_hlt_cpus, "IU",
1323 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
1324
1325 static int
1326 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1327 {
1328 int disable, error;
1329
1330 disable = hlt_logical_cpus;
1331 error = sysctl_handle_int(oidp, &disable, 0, req);
1332 if (error || !req->newptr)
1333 return (error);
1334
1335 if (disable)
1336 hlt_cpus_mask |= logical_cpus_mask;
1337 else
1338 hlt_cpus_mask &= ~logical_cpus_mask;
1339
1340 if (! hyperthreading_allowed)
1341 hlt_cpus_mask |= hyperthreading_cpus_mask;
1342
1343 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1344 hlt_cpus_mask &= ~(1<<0);
1345
1346 hlt_logical_cpus = disable;
1347 return (error);
1348 }
1349
1350 static int
1351 sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
1352 {
1353 int allowed, error;
1354
1355 allowed = hyperthreading_allowed;
1356 error = sysctl_handle_int(oidp, &allowed, 0, req);
1357 if (error || !req->newptr)
1358 return (error);
1359
1360 #ifdef SCHED_ULE
1361 /*
1362 * SCHED_ULE doesn't allow enabling/disabling HT cores at
1363 * run-time.
1364 */
1365 if (allowed != hyperthreading_allowed)
1366 return (ENOTSUP);
1367 return (error);
1368 #endif
1369
1370 if (allowed)
1371 hlt_cpus_mask &= ~hyperthreading_cpus_mask;
1372 else
1373 hlt_cpus_mask |= hyperthreading_cpus_mask;
1374
1375 if (logical_cpus_mask != 0 &&
1376 (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
1377 hlt_logical_cpus = 1;
1378 else
1379 hlt_logical_cpus = 0;
1380
1381 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1382 hlt_cpus_mask &= ~(1<<0);
1383
1384 hyperthreading_allowed = allowed;
1385 return (error);
1386 }
1387
1388 static void
1389 cpu_hlt_setup(void *dummy __unused)
1390 {
1391
1392 if (logical_cpus_mask != 0) {
1393 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1394 &hlt_logical_cpus);
1395 sysctl_ctx_init(&logical_cpu_clist);
1396 SYSCTL_ADD_PROC(&logical_cpu_clist,
1397 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1398 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1399 sysctl_hlt_logical_cpus, "IU", "");
1400 SYSCTL_ADD_UINT(&logical_cpu_clist,
1401 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1402 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1403 &logical_cpus_mask, 0, "");
1404
1405 if (hlt_logical_cpus)
1406 hlt_cpus_mask |= logical_cpus_mask;
1407
1408 /*
1409 * If necessary for security purposes, force
1410 * hyperthreading off, regardless of the value
1411 * of hlt_logical_cpus.
1412 */
1413 if (hyperthreading_cpus_mask) {
1414 SYSCTL_ADD_PROC(&logical_cpu_clist,
1415 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1416 "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
1417 0, 0, sysctl_hyperthreading_allowed, "IU", "");
1418 if (! hyperthreading_allowed)
1419 hlt_cpus_mask |= hyperthreading_cpus_mask;
1420 }
1421 }
1422 }
1423 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1424
1425 int
1426 mp_grab_cpu_hlt(void)
1427 {
1428 u_int mask = PCPU_GET(cpumask);
1429 #ifdef MP_WATCHDOG
1430 u_int cpuid = PCPU_GET(cpuid);
1431 #endif
1432 int retval;
1433
1434 #ifdef MP_WATCHDOG
1435 ap_watchdog(cpuid);
1436 #endif
1437
1438 retval = mask & hlt_cpus_mask;
1439 while (mask & hlt_cpus_mask)
1440 __asm __volatile("sti; hlt" : : : "memory");
1441 return (retval);
1442 }
Cache object: 247d8e1570c755a16b833ac9aa943ac3
|