1 /*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/7.3/sys/amd64/amd64/mp_machdep.c 198589 2009-10-29 14:34:02Z jhb $");
29
30 #include "opt_cpu.h"
31 #include "opt_kstack_pages.h"
32 #include "opt_mp_watchdog.h"
33 #include "opt_sched.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #ifdef GPROF
39 #include <sys/gmon.h>
40 #endif
41 #include <sys/kernel.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/memrange.h>
46 #include <sys/mutex.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/sched.h>
50 #include <sys/smp.h>
51 #include <sys/sysctl.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_extern.h>
58
59 #include <machine/apicreg.h>
60 #include <machine/cputypes.h>
61 #include <machine/mca.h>
62 #include <machine/md_var.h>
63 #include <machine/mp_watchdog.h>
64 #include <machine/pcb.h>
65 #include <machine/psl.h>
66 #include <machine/smp.h>
67 #include <machine/specialreg.h>
68 #include <machine/tss.h>
69
70 #define WARMBOOT_TARGET 0
71 #define WARMBOOT_OFF (KERNBASE + 0x0467)
72 #define WARMBOOT_SEG (KERNBASE + 0x0469)
73
74 #define CMOS_REG (0x70)
75 #define CMOS_DATA (0x71)
76 #define BIOS_RESET (0x0f)
77 #define BIOS_WARM (0x0a)
78
79 /* lock region used by kernel profiling */
80 int mcount_lock;
81
82 int mp_naps; /* # of Applications processors */
83 int boot_cpu_id = -1; /* designated BSP */
84
85 extern struct pcpu __pcpu[];
86
87 /*
88 * CPU topology map datastructures for HTT.
89 */
90 static struct cpu_group mp_groups[MAXCPU];
91 static struct cpu_top mp_top;
92
93 /* AP uses this during bootstrap. Do not staticize. */
94 char *bootSTK;
95 static int bootAP;
96
97 /* Free these after use */
98 void *bootstacks[MAXCPU];
99
100 /* Temporary holder for double fault stack */
101 char *doublefault_stack;
102 char *nmi_stack;
103
104 /* Hotwire a 0->4MB V==P mapping */
105 extern pt_entry_t *KPTphys;
106
107 /* SMP page table page */
108 extern pt_entry_t *SMPpt;
109
110 struct pcb stoppcbs[MAXCPU];
111
112 /* Variables needed for SMP tlb shootdown. */
113 vm_offset_t smp_tlb_addr1;
114 vm_offset_t smp_tlb_addr2;
115 volatile int smp_tlb_wait;
116
117 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
118
119 #ifdef STOP_NMI
120 volatile cpumask_t ipi_nmi_pending;
121
122 static void ipi_nmi_selected(u_int32_t cpus);
123 #endif
124
125 /*
126 * Local data and functions.
127 */
128
129 #ifdef STOP_NMI
130 /*
131 * Provide an alternate method of stopping other CPUs. If another CPU has
132 * disabled interrupts the conventional STOP IPI will be blocked. This
133 * NMI-based stop should get through in that case.
134 */
135 static int stop_cpus_with_nmi = 1;
136 SYSCTL_INT(_debug, OID_AUTO, stop_cpus_with_nmi, CTLTYPE_INT | CTLFLAG_RW,
137 &stop_cpus_with_nmi, 0, "");
138 TUNABLE_INT("debug.stop_cpus_with_nmi", &stop_cpus_with_nmi);
139 #else
140 #define stop_cpus_with_nmi 0
141 #endif
142
143 static u_int logical_cpus;
144
145 /* used to hold the AP's until we are ready to release them */
146 static struct mtx ap_boot_mtx;
147
148 /* Set to 1 once we're ready to let the APs out of the pen. */
149 static volatile int aps_ready = 0;
150
151 /*
152 * Store data from cpu_add() until later in the boot when we actually setup
153 * the APs.
154 */
155 struct cpu_info {
156 int cpu_present:1;
157 int cpu_bsp:1;
158 int cpu_disabled:1;
159 int cpu_hyperthread:1;
160 } static cpu_info[MAX_APIC_ID + 1];
161 int cpu_apic_ids[MAXCPU];
162
163 /* Holds pending bitmap based IPIs per CPU */
164 static volatile u_int cpu_ipi_pending[MAXCPU];
165
166 static u_int boot_address;
167
168 static void assign_cpu_ids(void);
169 static void set_interrupt_apic_ids(void);
170 static int start_all_aps(void);
171 static int start_ap(int apic_id);
172 static void release_aps(void *dummy);
173
174 static int hlt_logical_cpus;
175 static u_int hyperthreading_cpus;
176 static cpumask_t hyperthreading_cpus_mask;
177 static int hyperthreading_allowed = 1;
178 static struct sysctl_ctx_list logical_cpu_clist;
179 static u_int bootMP_size;
180
181 static void
182 mem_range_AP_init(void)
183 {
184 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
185 mem_range_softc.mr_op->initAP(&mem_range_softc);
186 }
187
188 void
189 mp_topology(void)
190 {
191 struct cpu_group *group;
192 int apic_id;
193 int groups;
194 int cpu;
195
196 /* Build the smp_topology map. */
197 /* Nothing to do if there is no HTT support. */
198 if (hyperthreading_cpus <= 1)
199 return;
200 group = &mp_groups[0];
201 groups = 1;
202 for (cpu = 0, apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
203 if (!cpu_info[apic_id].cpu_present)
204 continue;
205 /*
206 * If the current group has members and we're not a logical
207 * cpu, create a new group.
208 */
209 if (group->cg_count != 0 &&
210 (apic_id % hyperthreading_cpus) == 0) {
211 group++;
212 groups++;
213 }
214 group->cg_count++;
215 group->cg_mask |= 1 << cpu;
216 cpu++;
217 }
218
219 mp_top.ct_count = groups;
220 mp_top.ct_group = mp_groups;
221 smp_topology = &mp_top;
222 }
223
224 /*
225 * Calculate usable address in base memory for AP trampoline code.
226 */
227 u_int
228 mp_bootaddress(u_int basemem)
229 {
230
231 bootMP_size = mptramp_end - mptramp_start;
232 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
233 if (((basemem * 1024) - boot_address) < bootMP_size)
234 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
235 /* 3 levels of page table pages */
236 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
237
238 return mptramp_pagetables;
239 }
240
241 void
242 cpu_add(u_int apic_id, char boot_cpu)
243 {
244
245 if (apic_id > MAX_APIC_ID) {
246 panic("SMP: APIC ID %d too high", apic_id);
247 return;
248 }
249 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
250 apic_id));
251 cpu_info[apic_id].cpu_present = 1;
252 if (boot_cpu) {
253 KASSERT(boot_cpu_id == -1,
254 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
255 boot_cpu_id));
256 boot_cpu_id = apic_id;
257 cpu_info[apic_id].cpu_bsp = 1;
258 }
259 if (mp_ncpus < MAXCPU) {
260 mp_ncpus++;
261 mp_maxid = mp_ncpus -1;
262 }
263 if (bootverbose)
264 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
265 "AP");
266 }
267
268 void
269 cpu_mp_setmaxid(void)
270 {
271
272 /*
273 * mp_maxid should be already set by calls to cpu_add().
274 * Just sanity check its value here.
275 */
276 if (mp_ncpus == 0)
277 KASSERT(mp_maxid == 0,
278 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
279 else if (mp_ncpus == 1)
280 mp_maxid = 0;
281 else
282 KASSERT(mp_maxid >= mp_ncpus - 1,
283 ("%s: counters out of sync: max %d, count %d", __func__,
284 mp_maxid, mp_ncpus));
285 }
286
287 int
288 cpu_mp_probe(void)
289 {
290
291 /*
292 * Always record BSP in CPU map so that the mbuf init code works
293 * correctly.
294 */
295 all_cpus = 1;
296 if (mp_ncpus == 0) {
297 /*
298 * No CPUs were found, so this must be a UP system. Setup
299 * the variables to represent a system with a single CPU
300 * with an id of 0.
301 */
302 mp_ncpus = 1;
303 return (0);
304 }
305
306 /* At least one CPU was found. */
307 if (mp_ncpus == 1) {
308 /*
309 * One CPU was found, so this must be a UP system with
310 * an I/O APIC.
311 */
312 mp_maxid = 0;
313 return (0);
314 }
315
316 /* At least two CPUs were found. */
317 return (1);
318 }
319
320 /*
321 * Initialize the IPI handlers and start up the AP's.
322 */
323 void
324 cpu_mp_start(void)
325 {
326 int i;
327 u_int threads_per_cache, p[4];
328
329 /* Initialize the logical ID to APIC ID table. */
330 for (i = 0; i < MAXCPU; i++) {
331 cpu_apic_ids[i] = -1;
332 cpu_ipi_pending[i] = 0;
333 }
334
335 /* Install an inter-CPU IPI for TLB invalidation */
336 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
337 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
338 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
339
340 /* Install an inter-CPU IPI for cache invalidation. */
341 setidt(IPI_INVLCACHE, IDTVEC(invlcache), SDT_SYSIGT, SEL_KPL, 0);
342
343 /* Install an inter-CPU IPI for all-CPU rendezvous */
344 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
345
346 /* Install generic inter-CPU IPI handler */
347 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
348 SDT_SYSIGT, SEL_KPL, 0);
349
350 /* Install an inter-CPU IPI for CPU stop/restart */
351 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
352
353 /* Set boot_cpu_id if needed. */
354 if (boot_cpu_id == -1) {
355 boot_cpu_id = PCPU_GET(apic_id);
356 cpu_info[boot_cpu_id].cpu_bsp = 1;
357 } else
358 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
359 ("BSP's APIC ID doesn't match boot_cpu_id"));
360
361 /* Setup the initial logical CPUs info. */
362 logical_cpus = logical_cpus_mask = 0;
363 if (cpu_feature & CPUID_HTT)
364 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
365
366 /*
367 * Work out if hyperthreading is *really* enabled. This
368 * is made really ugly by the fact that processors lie: Dual
369 * core processors claim to be hyperthreaded even when they're
370 * not, presumably because they want to be treated the same
371 * way as HTT with respect to per-cpu software licensing.
372 * At the time of writing (May 12, 2005) the only hyperthreaded
373 * cpus are from Intel, and Intel's dual-core processors can be
374 * identified via the "deterministic cache parameters" cpuid
375 * calls.
376 */
377 /*
378 * First determine if this is an Intel processor which claims
379 * to have hyperthreading support.
380 */
381 if ((cpu_feature & CPUID_HTT) && cpu_vendor_id == CPU_VENDOR_INTEL) {
382 /*
383 * If the "deterministic cache parameters" cpuid calls
384 * are available, use them.
385 */
386 if (cpu_high >= 4) {
387 /* Ask the processor about the L1 cache. */
388 for (i = 0; i < 1; i++) {
389 cpuid_count(4, i, p);
390 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
391 if (hyperthreading_cpus < threads_per_cache)
392 hyperthreading_cpus = threads_per_cache;
393 if ((p[0] & 0x1f) == 0)
394 break;
395 }
396 }
397
398 /*
399 * If the deterministic cache parameters are not
400 * available, or if no caches were reported to exist,
401 * just accept what the HTT flag indicated.
402 */
403 if (hyperthreading_cpus == 0)
404 hyperthreading_cpus = logical_cpus;
405 }
406
407 assign_cpu_ids();
408
409 /* Start each Application Processor */
410 start_all_aps();
411
412 set_interrupt_apic_ids();
413
414 /* Last, setup the cpu topology now that we have probed CPUs */
415 mp_topology();
416 }
417
418
419 /*
420 * Print various information about the SMP system hardware and setup.
421 */
422 void
423 cpu_mp_announce(void)
424 {
425 const char *hyperthread;
426 int i;
427
428 /* List active CPUs first. */
429 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
430 for (i = 1; i < mp_ncpus; i++) {
431 if (cpu_info[cpu_apic_ids[i]].cpu_hyperthread)
432 hyperthread = "/HT";
433 else
434 hyperthread = "";
435 printf(" cpu%d (AP%s): APIC ID: %2d\n", i, hyperthread,
436 cpu_apic_ids[i]);
437 }
438
439 /* List disabled CPUs last. */
440 for (i = 0; i <= MAX_APIC_ID; i++) {
441 if (!cpu_info[i].cpu_present || !cpu_info[i].cpu_disabled)
442 continue;
443 if (cpu_info[i].cpu_hyperthread)
444 hyperthread = "/HT";
445 else
446 hyperthread = "";
447 printf(" cpu (AP%s): APIC ID: %2d (disabled)\n", hyperthread,
448 i);
449 }
450 }
451
452 /*
453 * AP CPU's call this to initialize themselves.
454 */
455 void
456 init_secondary(void)
457 {
458 struct pcpu *pc;
459 struct nmi_pcpu *np;
460 u_int64_t msr, cr0;
461 int cpu, gsel_tss, x;
462 struct region_descriptor ap_gdt;
463
464 /* Set by the startup code for us to use */
465 cpu = bootAP;
466
467 /* Init tss */
468 common_tss[cpu] = common_tss[0];
469 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */
470 common_tss[cpu].tss_iobase = sizeof(struct amd64tss);
471 common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
472
473 /* The NMI stack runs on IST2. */
474 np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
475 common_tss[cpu].tss_ist2 = (long) np;
476
477 /* Prepare private GDT */
478 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
479 ssdtosyssd(&gdt_segs[GPROC0_SEL],
480 (struct system_segment_descriptor *)&gdt[NGDT * cpu + GPROC0_SEL]);
481 for (x = 0; x < NGDT; x++) {
482 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
483 ssdtosd(&gdt_segs[x], &gdt[NGDT * cpu + x]);
484 }
485 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
486 ap_gdt.rd_base = (long) &gdt[NGDT * cpu];
487 lgdt(&ap_gdt); /* does magic intra-segment return */
488
489 /* Get per-cpu data */
490 pc = &__pcpu[cpu];
491
492 /* prime data page for it to use */
493 pcpu_init(pc, cpu, sizeof(struct pcpu));
494 pc->pc_apic_id = cpu_apic_ids[cpu];
495 pc->pc_prvspace = pc;
496 pc->pc_curthread = 0;
497 pc->pc_tssp = &common_tss[cpu];
498 pc->pc_rsp0 = 0;
499 pc->pc_gs32p = &gdt[NGDT * cpu + GUGS32_SEL];
500
501 /* Save the per-cpu pointer for use by the NMI handler. */
502 np->np_pcpu = (register_t) pc;
503
504 wrmsr(MSR_FSBASE, 0); /* User value */
505 wrmsr(MSR_GSBASE, (u_int64_t)pc);
506 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
507
508 lidt(&r_idt);
509
510 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
511 ltr(gsel_tss);
512
513 /*
514 * Set to a known state:
515 * Set by mpboot.s: CR0_PG, CR0_PE
516 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
517 */
518 cr0 = rcr0();
519 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
520 load_cr0(cr0);
521
522 /* Set up the fast syscall stuff */
523 msr = rdmsr(MSR_EFER) | EFER_SCE;
524 wrmsr(MSR_EFER, msr);
525 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
526 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
527 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
528 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
529 wrmsr(MSR_STAR, msr);
530 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
531
532 /* Disable local APIC just to be sure. */
533 lapic_disable();
534
535 /* signal our startup to the BSP. */
536 mp_naps++;
537
538 /* Spin until the BSP releases the AP's. */
539 while (!aps_ready)
540 ia32_pause();
541
542 /* Initialize the PAT MSR. */
543 pmap_init_pat();
544
545 /* set up CPU registers and state */
546 cpu_setregs();
547
548 /* set up SSE/NX registers */
549 initializecpu();
550
551 /* set up FPU state on the AP */
552 fpuinit();
553
554 /* A quick check from sanity claus */
555 if (PCPU_GET(apic_id) != lapic_id()) {
556 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
557 printf("SMP: actual apic_id = %d\n", lapic_id());
558 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
559 panic("cpuid mismatch! boom!!");
560 }
561
562 /* Initialize curthread. */
563 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
564 PCPU_SET(curthread, PCPU_GET(idlethread));
565
566 mca_init();
567
568 mtx_lock_spin(&ap_boot_mtx);
569
570 /* Init local apic for irq's */
571 lapic_setup(1);
572
573 /* Set memory range attributes for this CPU to match the BSP */
574 mem_range_AP_init();
575
576 smp_cpus++;
577
578 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
579 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
580
581 /* Determine if we are a logical CPU. */
582 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
583 logical_cpus_mask |= PCPU_GET(cpumask);
584
585 /* Determine if we are a hyperthread. */
586 if (hyperthreading_cpus > 1 &&
587 PCPU_GET(apic_id) % hyperthreading_cpus != 0)
588 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
589
590 /* Build our map of 'other' CPUs. */
591 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
592
593 if (bootverbose)
594 lapic_dump("AP");
595
596 if (smp_cpus == mp_ncpus) {
597 /* enable IPI's, tlb shootdown, freezes etc */
598 atomic_store_rel_int(&smp_started, 1);
599 smp_active = 1; /* historic */
600 }
601
602 /*
603 * Enable global pages TLB extension
604 * This also implicitly flushes the TLB
605 */
606
607 load_cr4(rcr4() | CR4_PGE);
608
609 mtx_unlock_spin(&ap_boot_mtx);
610
611 /* wait until all the AP's are up */
612 while (smp_started == 0)
613 ia32_pause();
614
615 sched_throw(NULL);
616
617 panic("scheduler returned us to %s", __func__);
618 /* NOTREACHED */
619 }
620
621 /*******************************************************************
622 * local functions and data
623 */
624
625 /*
626 * We tell the I/O APIC code about all the CPUs we want to receive
627 * interrupts. If we don't want certain CPUs to receive IRQs we
628 * can simply not tell the I/O APIC code about them in this function.
629 * We also do not tell it about the BSP since it tells itself about
630 * the BSP internally to work with UP kernels and on UP machines.
631 */
632 static void
633 set_interrupt_apic_ids(void)
634 {
635 u_int i, apic_id;
636
637 for (i = 0; i < MAXCPU; i++) {
638 apic_id = cpu_apic_ids[i];
639 if (apic_id == -1)
640 continue;
641 if (cpu_info[apic_id].cpu_bsp)
642 continue;
643 if (cpu_info[apic_id].cpu_disabled)
644 continue;
645
646 /* Don't let hyperthreads service interrupts. */
647 if (hyperthreading_cpus > 1 &&
648 apic_id % hyperthreading_cpus != 0)
649 continue;
650
651 intr_add_cpu(i);
652 }
653 }
654
655 /*
656 * Assign logical CPU IDs to local APICs.
657 */
658 static void
659 assign_cpu_ids(void)
660 {
661 u_int i;
662
663 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
664 &hyperthreading_allowed);
665
666 /* Check for explicitly disabled CPUs. */
667 for (i = 0; i <= MAX_APIC_ID; i++) {
668 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
669 continue;
670
671 if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
672 cpu_info[i].cpu_hyperthread = 1;
673 #if defined(SCHED_ULE)
674 /*
675 * Don't use HT CPU if it has been disabled by a
676 * tunable.
677 */
678 if (hyperthreading_allowed == 0) {
679 cpu_info[i].cpu_disabled = 1;
680 continue;
681 }
682 #endif
683 }
684
685 /* Don't use this CPU if it has been disabled by a tunable. */
686 if (resource_disabled("lapic", i)) {
687 cpu_info[i].cpu_disabled = 1;
688 continue;
689 }
690 }
691
692 /*
693 * Assign CPU IDs to local APIC IDs and disable any CPUs
694 * beyond MAXCPU. CPU 0 is always assigned to the BSP.
695 *
696 * To minimize confusion for userland, we attempt to number
697 * CPUs such that all threads and cores in a package are
698 * grouped together. For now we assume that the BSP is always
699 * the first thread in a package and just start adding APs
700 * starting with the BSP's APIC ID.
701 */
702 mp_ncpus = 1;
703 cpu_apic_ids[0] = boot_cpu_id;
704 for (i = boot_cpu_id + 1; i != boot_cpu_id;
705 i == MAX_APIC_ID ? i = 0 : i++) {
706 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
707 cpu_info[i].cpu_disabled)
708 continue;
709
710 if (mp_ncpus < MAXCPU) {
711 cpu_apic_ids[mp_ncpus] = i;
712 mp_ncpus++;
713 } else
714 cpu_info[i].cpu_disabled = 1;
715 }
716 KASSERT(mp_maxid >= mp_ncpus - 1,
717 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
718 mp_ncpus));
719 }
720
721 /*
722 * start each AP in our list
723 */
724 static int
725 start_all_aps(void)
726 {
727 vm_offset_t va = boot_address + KERNBASE;
728 u_int64_t *pt4, *pt3, *pt2;
729 u_int32_t mpbioswarmvec;
730 int apic_id, cpu, i;
731 u_char mpbiosreason;
732
733 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
734
735 /* install the AP 1st level boot code */
736 pmap_kenter(va, boot_address);
737 pmap_invalidate_page(kernel_pmap, va);
738 bcopy(mptramp_start, (void *)va, bootMP_size);
739
740 /* Locate the page tables, they'll be below the trampoline */
741 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
742 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
743 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
744
745 /* Create the initial 1GB replicated page tables */
746 for (i = 0; i < 512; i++) {
747 /* Each slot of the level 4 pages points to the same level 3 page */
748 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
749 pt4[i] |= PG_V | PG_RW | PG_U;
750
751 /* Each slot of the level 3 pages points to the same level 2 page */
752 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
753 pt3[i] |= PG_V | PG_RW | PG_U;
754
755 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
756 pt2[i] = i * (2 * 1024 * 1024);
757 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
758 }
759
760 /* save the current value of the warm-start vector */
761 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
762 outb(CMOS_REG, BIOS_RESET);
763 mpbiosreason = inb(CMOS_DATA);
764
765 /* setup a vector to our boot code */
766 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
767 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
768 outb(CMOS_REG, BIOS_RESET);
769 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
770
771 /* start each AP */
772 for (cpu = 1; cpu < mp_ncpus; cpu++) {
773 apic_id = cpu_apic_ids[cpu];
774
775 /* allocate and set up an idle stack data page */
776 bootstacks[cpu] = (void *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
777 doublefault_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
778 nmi_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
779
780 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
781 bootAP = cpu;
782
783 /* attempt to start the Application Processor */
784 if (!start_ap(apic_id)) {
785 /* restore the warmstart vector */
786 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
787 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
788 }
789
790 all_cpus |= (1 << cpu); /* record AP in CPU map */
791 }
792
793 /* build our map of 'other' CPUs */
794 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
795
796 /* restore the warmstart vector */
797 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
798
799 outb(CMOS_REG, BIOS_RESET);
800 outb(CMOS_DATA, mpbiosreason);
801
802 /* number of APs actually started */
803 return mp_naps;
804 }
805
806
807 /*
808 * This function starts the AP (application processor) identified
809 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
810 * to accomplish this. This is necessary because of the nuances
811 * of the different hardware we might encounter. It isn't pretty,
812 * but it seems to work.
813 */
814 static int
815 start_ap(int apic_id)
816 {
817 int vector, ms;
818 int cpus;
819
820 /* calculate the vector */
821 vector = (boot_address >> 12) & 0xff;
822
823 /* used as a watchpoint to signal AP startup */
824 cpus = mp_naps;
825
826 /*
827 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
828 * and running the target CPU. OR this INIT IPI might be latched (P5
829 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
830 * ignored.
831 */
832
833 /* do an INIT IPI: assert RESET */
834 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
835 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
836
837 /* wait for pending status end */
838 lapic_ipi_wait(-1);
839
840 /* do an INIT IPI: deassert RESET */
841 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
842 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
843
844 /* wait for pending status end */
845 DELAY(10000); /* wait ~10mS */
846 lapic_ipi_wait(-1);
847
848 /*
849 * next we do a STARTUP IPI: the previous INIT IPI might still be
850 * latched, (P5 bug) this 1st STARTUP would then terminate
851 * immediately, and the previously started INIT IPI would continue. OR
852 * the previous INIT IPI has already run. and this STARTUP IPI will
853 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
854 * will run.
855 */
856
857 /* do a STARTUP IPI */
858 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
859 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
860 vector, apic_id);
861 lapic_ipi_wait(-1);
862 DELAY(200); /* wait ~200uS */
863
864 /*
865 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
866 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
867 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
868 * recognized after hardware RESET or INIT IPI.
869 */
870
871 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
872 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
873 vector, apic_id);
874 lapic_ipi_wait(-1);
875 DELAY(200); /* wait ~200uS */
876
877 /* Wait up to 5 seconds for it to start. */
878 for (ms = 0; ms < 5000; ms++) {
879 if (mp_naps > cpus)
880 return 1; /* return SUCCESS */
881 DELAY(1000);
882 }
883 return 0; /* return FAILURE */
884 }
885
886 /*
887 * Flush the TLB on all other CPU's
888 */
889 static void
890 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
891 {
892 u_int ncpu;
893
894 ncpu = mp_ncpus - 1; /* does not shootdown self */
895 if (ncpu < 1)
896 return; /* no other cpus */
897 if (!(read_rflags() & PSL_I))
898 panic("%s: interrupts disabled", __func__);
899 mtx_lock_spin(&smp_ipi_mtx);
900 smp_tlb_addr1 = addr1;
901 smp_tlb_addr2 = addr2;
902 atomic_store_rel_int(&smp_tlb_wait, 0);
903 ipi_all_but_self(vector);
904 while (smp_tlb_wait < ncpu)
905 ia32_pause();
906 mtx_unlock_spin(&smp_ipi_mtx);
907 }
908
909 static void
910 smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
911 {
912 int ncpu, othercpus;
913
914 othercpus = mp_ncpus - 1;
915 if (mask == (u_int)-1) {
916 ncpu = othercpus;
917 if (ncpu < 1)
918 return;
919 } else {
920 mask &= ~PCPU_GET(cpumask);
921 if (mask == 0)
922 return;
923 ncpu = bitcount32(mask);
924 if (ncpu > othercpus) {
925 /* XXX this should be a panic offence */
926 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
927 ncpu, othercpus);
928 ncpu = othercpus;
929 }
930 /* XXX should be a panic, implied by mask == 0 above */
931 if (ncpu < 1)
932 return;
933 }
934 if (!(read_rflags() & PSL_I))
935 panic("%s: interrupts disabled", __func__);
936 mtx_lock_spin(&smp_ipi_mtx);
937 smp_tlb_addr1 = addr1;
938 smp_tlb_addr2 = addr2;
939 atomic_store_rel_int(&smp_tlb_wait, 0);
940 if (mask == (u_int)-1)
941 ipi_all_but_self(vector);
942 else
943 ipi_selected(mask, vector);
944 while (smp_tlb_wait < ncpu)
945 ia32_pause();
946 mtx_unlock_spin(&smp_ipi_mtx);
947 }
948
949 void
950 smp_cache_flush(void)
951 {
952
953 if (smp_started)
954 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
955 }
956
957 void
958 smp_invltlb(void)
959 {
960
961 if (smp_started) {
962 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
963 }
964 }
965
966 void
967 smp_invlpg(vm_offset_t addr)
968 {
969
970 if (smp_started)
971 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
972 }
973
974 void
975 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
976 {
977
978 if (smp_started) {
979 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
980 }
981 }
982
983 void
984 smp_masked_invltlb(u_int mask)
985 {
986
987 if (smp_started) {
988 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
989 }
990 }
991
992 void
993 smp_masked_invlpg(u_int mask, vm_offset_t addr)
994 {
995
996 if (smp_started) {
997 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
998 }
999 }
1000
1001 void
1002 smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
1003 {
1004
1005 if (smp_started) {
1006 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
1007 }
1008 }
1009
1010 void
1011 ipi_bitmap_handler(struct trapframe frame)
1012 {
1013 int cpu = PCPU_GET(cpuid);
1014 u_int ipi_bitmap;
1015
1016 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1017
1018 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
1019 struct thread *running_thread = curthread;
1020 thread_lock(running_thread);
1021 if (running_thread->td_critnest > 1)
1022 running_thread->td_owepreempt = 1;
1023 else
1024 mi_switch(SW_INVOL | SW_PREEMPT, NULL);
1025 thread_unlock(running_thread);
1026 }
1027
1028 /* Nothing to do for AST */
1029 }
1030
1031 /*
1032 * send an IPI to a set of cpus.
1033 */
1034 void
1035 ipi_selected(u_int32_t cpus, u_int ipi)
1036 {
1037 int cpu;
1038 u_int bitmap = 0;
1039 u_int old_pending;
1040 u_int new_pending;
1041
1042 if (IPI_IS_BITMAPED(ipi)) {
1043 bitmap = 1 << ipi;
1044 ipi = IPI_BITMAP_VECTOR;
1045 }
1046
1047 #ifdef STOP_NMI
1048 if (ipi == IPI_STOP && stop_cpus_with_nmi) {
1049 ipi_nmi_selected(cpus);
1050 return;
1051 }
1052 #endif
1053 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
1054 while ((cpu = ffs(cpus)) != 0) {
1055 cpu--;
1056 cpus &= ~(1 << cpu);
1057
1058 KASSERT(cpu_apic_ids[cpu] != -1,
1059 ("IPI to non-existent CPU %d", cpu));
1060
1061 if (bitmap) {
1062 do {
1063 old_pending = cpu_ipi_pending[cpu];
1064 new_pending = old_pending | bitmap;
1065 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
1066
1067 if (old_pending)
1068 continue;
1069 }
1070
1071 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1072 }
1073
1074 }
1075
1076 /*
1077 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
1078 */
1079 void
1080 ipi_all(u_int ipi)
1081 {
1082
1083 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) {
1084 ipi_selected(all_cpus, ipi);
1085 return;
1086 }
1087 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1088 lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
1089 }
1090
1091 /*
1092 * send an IPI to all CPUs EXCEPT myself
1093 */
1094 void
1095 ipi_all_but_self(u_int ipi)
1096 {
1097
1098 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) {
1099 ipi_selected(PCPU_GET(other_cpus), ipi);
1100 return;
1101 }
1102 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1103 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1104 }
1105
1106 /*
1107 * send an IPI to myself
1108 */
1109 void
1110 ipi_self(u_int ipi)
1111 {
1112
1113 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) {
1114 ipi_selected(PCPU_GET(cpumask), ipi);
1115 return;
1116 }
1117 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1118 lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
1119 }
1120
1121 #ifdef STOP_NMI
1122 /*
1123 * send NMI IPI to selected CPUs
1124 */
1125
1126 #define BEFORE_SPIN 1000000
1127
1128 void
1129 ipi_nmi_selected(u_int32_t cpus)
1130 {
1131 int cpu;
1132 register_t icrlo;
1133
1134 icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
1135 | APIC_TRIGMOD_EDGE;
1136
1137 CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
1138
1139 atomic_set_int(&ipi_nmi_pending, cpus);
1140
1141 while ((cpu = ffs(cpus)) != 0) {
1142 cpu--;
1143 cpus &= ~(1 << cpu);
1144
1145 KASSERT(cpu_apic_ids[cpu] != -1,
1146 ("IPI NMI to non-existent CPU %d", cpu));
1147
1148 /* Wait for an earlier IPI to finish. */
1149 if (!lapic_ipi_wait(BEFORE_SPIN))
1150 panic("ipi_nmi_selected: previous IPI has not cleared");
1151
1152 lapic_ipi_raw(icrlo, cpu_apic_ids[cpu]);
1153 }
1154 }
1155
1156 int
1157 ipi_nmi_handler(void)
1158 {
1159 int cpumask = PCPU_GET(cpumask);
1160
1161 if (!(ipi_nmi_pending & cpumask))
1162 return 1;
1163
1164 atomic_clear_int(&ipi_nmi_pending, cpumask);
1165 cpustop_handler();
1166 return 0;
1167 }
1168
1169 #endif /* STOP_NMI */
1170
1171 /*
1172 * Handle an IPI_STOP by saving our current context and spinning until we
1173 * are resumed.
1174 */
1175 void
1176 cpustop_handler(void)
1177 {
1178 int cpu = PCPU_GET(cpuid);
1179 int cpumask = PCPU_GET(cpumask);
1180
1181 savectx(&stoppcbs[cpu]);
1182
1183 /* Indicate that we are stopped */
1184 atomic_set_int(&stopped_cpus, cpumask);
1185
1186 /* Wait for restart */
1187 while (!(started_cpus & cpumask))
1188 ia32_pause();
1189
1190 atomic_clear_int(&started_cpus, cpumask);
1191 atomic_clear_int(&stopped_cpus, cpumask);
1192
1193 if (cpu == 0 && cpustop_restartfunc != NULL) {
1194 cpustop_restartfunc();
1195 cpustop_restartfunc = NULL;
1196 }
1197 }
1198
1199 /*
1200 * This is called once the rest of the system is up and running and we're
1201 * ready to let the AP's out of the pen.
1202 */
1203 static void
1204 release_aps(void *dummy __unused)
1205 {
1206
1207 if (mp_ncpus == 1)
1208 return;
1209 atomic_store_rel_int(&aps_ready, 1);
1210 while (smp_started == 0)
1211 ia32_pause();
1212 }
1213 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1214
1215 static int
1216 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1217 {
1218 u_int mask;
1219 int error;
1220
1221 mask = hlt_cpus_mask;
1222 error = sysctl_handle_int(oidp, &mask, 0, req);
1223 if (error || !req->newptr)
1224 return (error);
1225
1226 if (logical_cpus_mask != 0 &&
1227 (mask & logical_cpus_mask) == logical_cpus_mask)
1228 hlt_logical_cpus = 1;
1229 else
1230 hlt_logical_cpus = 0;
1231
1232 if (! hyperthreading_allowed)
1233 mask |= hyperthreading_cpus_mask;
1234
1235 if ((mask & all_cpus) == all_cpus)
1236 mask &= ~(1<<0);
1237 hlt_cpus_mask = mask;
1238 return (error);
1239 }
1240 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1241 0, 0, sysctl_hlt_cpus, "IU",
1242 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
1243
1244 static int
1245 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1246 {
1247 int disable, error;
1248
1249 disable = hlt_logical_cpus;
1250 error = sysctl_handle_int(oidp, &disable, 0, req);
1251 if (error || !req->newptr)
1252 return (error);
1253
1254 if (disable)
1255 hlt_cpus_mask |= logical_cpus_mask;
1256 else
1257 hlt_cpus_mask &= ~logical_cpus_mask;
1258
1259 if (! hyperthreading_allowed)
1260 hlt_cpus_mask |= hyperthreading_cpus_mask;
1261
1262 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1263 hlt_cpus_mask &= ~(1<<0);
1264
1265 hlt_logical_cpus = disable;
1266 return (error);
1267 }
1268
1269 static int
1270 sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
1271 {
1272 int allowed, error;
1273
1274 allowed = hyperthreading_allowed;
1275 error = sysctl_handle_int(oidp, &allowed, 0, req);
1276 if (error || !req->newptr)
1277 return (error);
1278
1279 #ifdef SCHED_ULE
1280 /*
1281 * SCHED_ULE doesn't allow enabling/disabling HT cores at
1282 * run-time.
1283 */
1284 if (allowed != hyperthreading_allowed)
1285 return (ENOTSUP);
1286 return (error);
1287 #endif
1288
1289 if (allowed)
1290 hlt_cpus_mask &= ~hyperthreading_cpus_mask;
1291 else
1292 hlt_cpus_mask |= hyperthreading_cpus_mask;
1293
1294 if (logical_cpus_mask != 0 &&
1295 (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
1296 hlt_logical_cpus = 1;
1297 else
1298 hlt_logical_cpus = 0;
1299
1300 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1301 hlt_cpus_mask &= ~(1<<0);
1302
1303 hyperthreading_allowed = allowed;
1304 return (error);
1305 }
1306
1307 static void
1308 cpu_hlt_setup(void *dummy __unused)
1309 {
1310
1311 if (logical_cpus_mask != 0) {
1312 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1313 &hlt_logical_cpus);
1314 sysctl_ctx_init(&logical_cpu_clist);
1315 SYSCTL_ADD_PROC(&logical_cpu_clist,
1316 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1317 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1318 sysctl_hlt_logical_cpus, "IU", "");
1319 SYSCTL_ADD_UINT(&logical_cpu_clist,
1320 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1321 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1322 &logical_cpus_mask, 0, "");
1323
1324 if (hlt_logical_cpus)
1325 hlt_cpus_mask |= logical_cpus_mask;
1326
1327 /*
1328 * If necessary for security purposes, force
1329 * hyperthreading off, regardless of the value
1330 * of hlt_logical_cpus.
1331 */
1332 if (hyperthreading_cpus_mask) {
1333 SYSCTL_ADD_PROC(&logical_cpu_clist,
1334 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1335 "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
1336 0, 0, sysctl_hyperthreading_allowed, "IU", "");
1337 if (! hyperthreading_allowed)
1338 hlt_cpus_mask |= hyperthreading_cpus_mask;
1339 }
1340 }
1341 }
1342 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1343
1344 int
1345 mp_grab_cpu_hlt(void)
1346 {
1347 u_int mask = PCPU_GET(cpumask);
1348 #ifdef MP_WATCHDOG
1349 u_int cpuid = PCPU_GET(cpuid);
1350 #endif
1351 int retval;
1352
1353 #ifdef MP_WATCHDOG
1354 ap_watchdog(cpuid);
1355 #endif
1356
1357 retval = mask & hlt_cpus_mask;
1358 while (mask & hlt_cpus_mask)
1359 __asm __volatile("sti; hlt" : : : "memory");
1360 return (retval);
1361 }
Cache object: f2e9852631dbad2a1a5f19ea6f754976
|