1 /*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/8.2/sys/amd64/amd64/mp_machdep.c 215904 2010-11-26 21:39:11Z jkim $");
29
30 #include "opt_cpu.h"
31 #include "opt_kstack_pages.h"
32 #include "opt_mp_watchdog.h"
33 #include "opt_sched.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #ifdef GPROF
39 #include <sys/gmon.h>
40 #endif
41 #include <sys/kernel.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/memrange.h>
46 #include <sys/mutex.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/sched.h>
50 #include <sys/smp.h>
51 #include <sys/sysctl.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_extern.h>
58
59 #include <machine/apicreg.h>
60 #include <machine/clock.h>
61 #include <machine/cputypes.h>
62 #include <machine/cpufunc.h>
63 #include <machine/mca.h>
64 #include <machine/md_var.h>
65 #include <machine/mp_watchdog.h>
66 #include <machine/pcb.h>
67 #include <machine/psl.h>
68 #include <machine/smp.h>
69 #include <machine/specialreg.h>
70 #include <machine/tss.h>
71
72 #define WARMBOOT_TARGET 0
73 #define WARMBOOT_OFF (KERNBASE + 0x0467)
74 #define WARMBOOT_SEG (KERNBASE + 0x0469)
75
76 #define CMOS_REG (0x70)
77 #define CMOS_DATA (0x71)
78 #define BIOS_RESET (0x0f)
79 #define BIOS_WARM (0x0a)
80
81 /* lock region used by kernel profiling */
82 int mcount_lock;
83
84 int mp_naps; /* # of Applications processors */
85 int boot_cpu_id = -1; /* designated BSP */
86
87 extern struct pcpu __pcpu[];
88
89 /* AP uses this during bootstrap. Do not staticize. */
90 char *bootSTK;
91 static int bootAP;
92
93 /* Free these after use */
94 void *bootstacks[MAXCPU];
95
96 /* Temporary variables for init_secondary() */
97 char *doublefault_stack;
98 char *nmi_stack;
99 void *dpcpu;
100
101 /* Hotwire a 0->4MB V==P mapping */
102 extern pt_entry_t *KPTphys;
103
104 /* SMP page table page */
105 extern pt_entry_t *SMPpt;
106
107 struct pcb stoppcbs[MAXCPU];
108 struct pcb **susppcbs = NULL;
109
110 /* Variables needed for SMP tlb shootdown. */
111 vm_offset_t smp_tlb_addr1;
112 vm_offset_t smp_tlb_addr2;
113 volatile int smp_tlb_wait;
114
115 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
116
117 /*
118 * Local data and functions.
119 */
120
121 static volatile cpumask_t ipi_nmi_pending;
122
123 /* used to hold the AP's until we are ready to release them */
124 static struct mtx ap_boot_mtx;
125
126 /* Set to 1 once we're ready to let the APs out of the pen. */
127 static volatile int aps_ready = 0;
128
129 /*
130 * Store data from cpu_add() until later in the boot when we actually setup
131 * the APs.
132 */
133 struct cpu_info {
134 int cpu_present:1;
135 int cpu_bsp:1;
136 int cpu_disabled:1;
137 int cpu_hyperthread:1;
138 } static cpu_info[MAX_APIC_ID + 1];
139 int cpu_apic_ids[MAXCPU];
140 int apic_cpuids[MAX_APIC_ID + 1];
141
142 /* Holds pending bitmap based IPIs per CPU */
143 static volatile u_int cpu_ipi_pending[MAXCPU];
144
145 static u_int boot_address;
146 static int cpu_logical; /* logical cpus per core */
147 static int cpu_cores; /* cores per package */
148
149 static void assign_cpu_ids(void);
150 static void set_interrupt_apic_ids(void);
151 static int start_all_aps(void);
152 static int start_ap(int apic_id);
153 static void release_aps(void *dummy);
154
155 static int hlt_logical_cpus;
156 static u_int hyperthreading_cpus; /* logical cpus sharing L1 cache */
157 static cpumask_t hyperthreading_cpus_mask;
158 static int hyperthreading_allowed = 1;
159 static struct sysctl_ctx_list logical_cpu_clist;
160 static u_int bootMP_size;
161
162 static void
163 mem_range_AP_init(void)
164 {
165 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
166 mem_range_softc.mr_op->initAP(&mem_range_softc);
167 }
168
169 static void
170 topo_probe_amd(void)
171 {
172
173 /* AMD processors do not support HTT. */
174 cpu_cores = (amd_feature2 & AMDID2_CMP) != 0 ?
175 (cpu_procinfo2 & AMDID_CMP_CORES) + 1 : 1;
176 cpu_logical = 1;
177 }
178
179 /*
180 * Round up to the next power of two, if necessary, and then
181 * take log2.
182 * Returns -1 if argument is zero.
183 */
184 static __inline int
185 mask_width(u_int x)
186 {
187
188 return (fls(x << (1 - powerof2(x))) - 1);
189 }
190
191 static void
192 topo_probe_0x4(void)
193 {
194 u_int p[4];
195 int pkg_id_bits;
196 int core_id_bits;
197 int max_cores;
198 int max_logical;
199 int id;
200
201 /* Both zero and one here mean one logical processor per package. */
202 max_logical = (cpu_feature & CPUID_HTT) != 0 ?
203 (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1;
204 if (max_logical <= 1)
205 return;
206
207 /*
208 * Because of uniformity assumption we examine only
209 * those logical processors that belong to the same
210 * package as BSP. Further, we count number of
211 * logical processors that belong to the same core
212 * as BSP thus deducing number of threads per core.
213 */
214 cpuid_count(0x04, 0, p);
215 max_cores = ((p[0] >> 26) & 0x3f) + 1;
216 core_id_bits = mask_width(max_logical/max_cores);
217 if (core_id_bits < 0)
218 return;
219 pkg_id_bits = core_id_bits + mask_width(max_cores);
220
221 for (id = 0; id <= MAX_APIC_ID; id++) {
222 /* Check logical CPU availability. */
223 if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled)
224 continue;
225 /* Check if logical CPU has the same package ID. */
226 if ((id >> pkg_id_bits) != (boot_cpu_id >> pkg_id_bits))
227 continue;
228 cpu_cores++;
229 /* Check if logical CPU has the same package and core IDs. */
230 if ((id >> core_id_bits) == (boot_cpu_id >> core_id_bits))
231 cpu_logical++;
232 }
233
234 KASSERT(cpu_cores >= 1 && cpu_logical >= 1,
235 ("topo_probe_0x4 couldn't find BSP"));
236
237 cpu_cores /= cpu_logical;
238 hyperthreading_cpus = cpu_logical;
239 }
240
241 static void
242 topo_probe_0xb(void)
243 {
244 u_int p[4];
245 int bits;
246 int cnt;
247 int i;
248 int logical;
249 int type;
250 int x;
251
252 /* We only support three levels for now. */
253 for (i = 0; i < 3; i++) {
254 cpuid_count(0x0b, i, p);
255
256 /* Fall back if CPU leaf 11 doesn't really exist. */
257 if (i == 0 && p[1] == 0) {
258 topo_probe_0x4();
259 return;
260 }
261
262 bits = p[0] & 0x1f;
263 logical = p[1] &= 0xffff;
264 type = (p[2] >> 8) & 0xff;
265 if (type == 0 || logical == 0)
266 break;
267 /*
268 * Because of uniformity assumption we examine only
269 * those logical processors that belong to the same
270 * package as BSP.
271 */
272 for (cnt = 0, x = 0; x <= MAX_APIC_ID; x++) {
273 if (!cpu_info[x].cpu_present ||
274 cpu_info[x].cpu_disabled)
275 continue;
276 if (x >> bits == boot_cpu_id >> bits)
277 cnt++;
278 }
279 if (type == CPUID_TYPE_SMT)
280 cpu_logical = cnt;
281 else if (type == CPUID_TYPE_CORE)
282 cpu_cores = cnt;
283 }
284 if (cpu_logical == 0)
285 cpu_logical = 1;
286 cpu_cores /= cpu_logical;
287 }
288
289 /*
290 * Both topology discovery code and code that consumes topology
291 * information assume top-down uniformity of the topology.
292 * That is, all physical packages must be identical and each
293 * core in a package must have the same number of threads.
294 * Topology information is queried only on BSP, on which this
295 * code runs and for which it can query CPUID information.
296 * Then topology is extrapolated on all packages using the
297 * uniformity assumption.
298 */
299 static void
300 topo_probe(void)
301 {
302 static int cpu_topo_probed = 0;
303
304 if (cpu_topo_probed)
305 return;
306
307 logical_cpus_mask = 0;
308 if (mp_ncpus <= 1)
309 cpu_cores = cpu_logical = 1;
310 else if (cpu_vendor_id == CPU_VENDOR_AMD)
311 topo_probe_amd();
312 else if (cpu_vendor_id == CPU_VENDOR_INTEL) {
313 /*
314 * See Intel(R) 64 Architecture Processor
315 * Topology Enumeration article for details.
316 *
317 * Note that 0x1 <= cpu_high < 4 case should be
318 * compatible with topo_probe_0x4() logic when
319 * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1)
320 * or it should trigger the fallback otherwise.
321 */
322 if (cpu_high >= 0xb)
323 topo_probe_0xb();
324 else if (cpu_high >= 0x1)
325 topo_probe_0x4();
326 }
327
328 /*
329 * Fallback: assume each logical CPU is in separate
330 * physical package. That is, no multi-core, no SMT.
331 */
332 if (cpu_cores == 0 || cpu_logical == 0)
333 cpu_cores = cpu_logical = 1;
334 cpu_topo_probed = 1;
335 }
336
337 struct cpu_group *
338 cpu_topo(void)
339 {
340 int cg_flags;
341
342 /*
343 * Determine whether any threading flags are
344 * necessry.
345 */
346 topo_probe();
347 if (cpu_logical > 1 && hyperthreading_cpus)
348 cg_flags = CG_FLAG_HTT;
349 else if (cpu_logical > 1)
350 cg_flags = CG_FLAG_SMT;
351 else
352 cg_flags = 0;
353 if (mp_ncpus % (cpu_cores * cpu_logical) != 0) {
354 printf("WARNING: Non-uniform processors.\n");
355 printf("WARNING: Using suboptimal topology.\n");
356 return (smp_topo_none());
357 }
358 /*
359 * No multi-core or hyper-threaded.
360 */
361 if (cpu_logical * cpu_cores == 1)
362 return (smp_topo_none());
363 /*
364 * Only HTT no multi-core.
365 */
366 if (cpu_logical > 1 && cpu_cores == 1)
367 return (smp_topo_1level(CG_SHARE_L1, cpu_logical, cg_flags));
368 /*
369 * Only multi-core no HTT.
370 */
371 if (cpu_cores > 1 && cpu_logical == 1)
372 return (smp_topo_1level(CG_SHARE_L2, cpu_cores, cg_flags));
373 /*
374 * Both HTT and multi-core.
375 */
376 return (smp_topo_2level(CG_SHARE_L2, cpu_cores,
377 CG_SHARE_L1, cpu_logical, cg_flags));
378 }
379
380 /*
381 * Calculate usable address in base memory for AP trampoline code.
382 */
383 u_int
384 mp_bootaddress(u_int basemem)
385 {
386
387 bootMP_size = mptramp_end - mptramp_start;
388 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
389 if (((basemem * 1024) - boot_address) < bootMP_size)
390 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
391 /* 3 levels of page table pages */
392 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
393
394 return mptramp_pagetables;
395 }
396
397 void
398 cpu_add(u_int apic_id, char boot_cpu)
399 {
400
401 if (apic_id > MAX_APIC_ID) {
402 panic("SMP: APIC ID %d too high", apic_id);
403 return;
404 }
405 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
406 apic_id));
407 cpu_info[apic_id].cpu_present = 1;
408 if (boot_cpu) {
409 KASSERT(boot_cpu_id == -1,
410 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
411 boot_cpu_id));
412 boot_cpu_id = apic_id;
413 cpu_info[apic_id].cpu_bsp = 1;
414 }
415 if (mp_ncpus < MAXCPU) {
416 mp_ncpus++;
417 mp_maxid = mp_ncpus -1;
418 }
419 if (bootverbose)
420 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
421 "AP");
422 }
423
424 void
425 cpu_mp_setmaxid(void)
426 {
427
428 /*
429 * mp_maxid should be already set by calls to cpu_add().
430 * Just sanity check its value here.
431 */
432 if (mp_ncpus == 0)
433 KASSERT(mp_maxid == 0,
434 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
435 else if (mp_ncpus == 1)
436 mp_maxid = 0;
437 else
438 KASSERT(mp_maxid >= mp_ncpus - 1,
439 ("%s: counters out of sync: max %d, count %d", __func__,
440 mp_maxid, mp_ncpus));
441 }
442
443 int
444 cpu_mp_probe(void)
445 {
446
447 /*
448 * Always record BSP in CPU map so that the mbuf init code works
449 * correctly.
450 */
451 all_cpus = 1;
452 if (mp_ncpus == 0) {
453 /*
454 * No CPUs were found, so this must be a UP system. Setup
455 * the variables to represent a system with a single CPU
456 * with an id of 0.
457 */
458 mp_ncpus = 1;
459 return (0);
460 }
461
462 /* At least one CPU was found. */
463 if (mp_ncpus == 1) {
464 /*
465 * One CPU was found, so this must be a UP system with
466 * an I/O APIC.
467 */
468 mp_maxid = 0;
469 return (0);
470 }
471
472 /* At least two CPUs were found. */
473 return (1);
474 }
475
476 /*
477 * Initialize the IPI handlers and start up the AP's.
478 */
479 void
480 cpu_mp_start(void)
481 {
482 int i;
483
484 /* Initialize the logical ID to APIC ID table. */
485 for (i = 0; i < MAXCPU; i++) {
486 cpu_apic_ids[i] = -1;
487 cpu_ipi_pending[i] = 0;
488 }
489
490 /* Install an inter-CPU IPI for TLB invalidation */
491 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
492 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
493 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
494
495 /* Install an inter-CPU IPI for cache invalidation. */
496 setidt(IPI_INVLCACHE, IDTVEC(invlcache), SDT_SYSIGT, SEL_KPL, 0);
497
498 /* Install an inter-CPU IPI for all-CPU rendezvous */
499 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
500
501 /* Install generic inter-CPU IPI handler */
502 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
503 SDT_SYSIGT, SEL_KPL, 0);
504
505 /* Install an inter-CPU IPI for CPU stop/restart */
506 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
507
508 /* Install an inter-CPU IPI for CPU suspend/resume */
509 setidt(IPI_SUSPEND, IDTVEC(cpususpend), SDT_SYSIGT, SEL_KPL, 0);
510
511 /* Set boot_cpu_id if needed. */
512 if (boot_cpu_id == -1) {
513 boot_cpu_id = PCPU_GET(apic_id);
514 cpu_info[boot_cpu_id].cpu_bsp = 1;
515 } else
516 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
517 ("BSP's APIC ID doesn't match boot_cpu_id"));
518
519 /* Probe logical/physical core configuration. */
520 topo_probe();
521
522 assign_cpu_ids();
523
524 /* Start each Application Processor */
525 start_all_aps();
526
527 set_interrupt_apic_ids();
528 }
529
530
531 /*
532 * Print various information about the SMP system hardware and setup.
533 */
534 void
535 cpu_mp_announce(void)
536 {
537 const char *hyperthread;
538 int i;
539
540 printf("FreeBSD/SMP: %d package(s) x %d core(s)",
541 mp_ncpus / (cpu_cores * cpu_logical), cpu_cores);
542 if (hyperthreading_cpus > 1)
543 printf(" x %d HTT threads", cpu_logical);
544 else if (cpu_logical > 1)
545 printf(" x %d SMT threads", cpu_logical);
546 printf("\n");
547
548 /* List active CPUs first. */
549 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
550 for (i = 1; i < mp_ncpus; i++) {
551 if (cpu_info[cpu_apic_ids[i]].cpu_hyperthread)
552 hyperthread = "/HT";
553 else
554 hyperthread = "";
555 printf(" cpu%d (AP%s): APIC ID: %2d\n", i, hyperthread,
556 cpu_apic_ids[i]);
557 }
558
559 /* List disabled CPUs last. */
560 for (i = 0; i <= MAX_APIC_ID; i++) {
561 if (!cpu_info[i].cpu_present || !cpu_info[i].cpu_disabled)
562 continue;
563 if (cpu_info[i].cpu_hyperthread)
564 hyperthread = "/HT";
565 else
566 hyperthread = "";
567 printf(" cpu (AP%s): APIC ID: %2d (disabled)\n", hyperthread,
568 i);
569 }
570 }
571
572 /*
573 * AP CPU's call this to initialize themselves.
574 */
575 void
576 init_secondary(void)
577 {
578 struct pcpu *pc;
579 struct nmi_pcpu *np;
580 u_int64_t msr, cr0;
581 int cpu, gsel_tss, x;
582 struct region_descriptor ap_gdt;
583
584 /* Set by the startup code for us to use */
585 cpu = bootAP;
586
587 /* Init tss */
588 common_tss[cpu] = common_tss[0];
589 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */
590 common_tss[cpu].tss_iobase = sizeof(struct amd64tss) +
591 IOPAGES * PAGE_SIZE;
592 common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
593
594 /* The NMI stack runs on IST2. */
595 np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
596 common_tss[cpu].tss_ist2 = (long) np;
597
598 /* Prepare private GDT */
599 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
600 for (x = 0; x < NGDT; x++) {
601 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
602 x != GUSERLDT_SEL && x != (GUSERLDT_SEL + 1))
603 ssdtosd(&gdt_segs[x], &gdt[NGDT * cpu + x]);
604 }
605 ssdtosyssd(&gdt_segs[GPROC0_SEL],
606 (struct system_segment_descriptor *)&gdt[NGDT * cpu + GPROC0_SEL]);
607 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
608 ap_gdt.rd_base = (long) &gdt[NGDT * cpu];
609 lgdt(&ap_gdt); /* does magic intra-segment return */
610
611 /* Get per-cpu data */
612 pc = &__pcpu[cpu];
613
614 /* prime data page for it to use */
615 pcpu_init(pc, cpu, sizeof(struct pcpu));
616 dpcpu_init(dpcpu, cpu);
617 pc->pc_apic_id = cpu_apic_ids[cpu];
618 pc->pc_prvspace = pc;
619 pc->pc_curthread = 0;
620 pc->pc_tssp = &common_tss[cpu];
621 pc->pc_commontssp = &common_tss[cpu];
622 pc->pc_rsp0 = 0;
623 pc->pc_tss = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
624 GPROC0_SEL];
625 pc->pc_fs32p = &gdt[NGDT * cpu + GUFS32_SEL];
626 pc->pc_gs32p = &gdt[NGDT * cpu + GUGS32_SEL];
627 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
628 GUSERLDT_SEL];
629
630 /* Save the per-cpu pointer for use by the NMI handler. */
631 np->np_pcpu = (register_t) pc;
632
633 wrmsr(MSR_FSBASE, 0); /* User value */
634 wrmsr(MSR_GSBASE, (u_int64_t)pc);
635 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
636
637 lidt(&r_idt);
638
639 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
640 ltr(gsel_tss);
641
642 /*
643 * Set to a known state:
644 * Set by mpboot.s: CR0_PG, CR0_PE
645 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
646 */
647 cr0 = rcr0();
648 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
649 load_cr0(cr0);
650
651 /* Set up the fast syscall stuff */
652 msr = rdmsr(MSR_EFER) | EFER_SCE;
653 wrmsr(MSR_EFER, msr);
654 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
655 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
656 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
657 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
658 wrmsr(MSR_STAR, msr);
659 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
660
661 /* Disable local APIC just to be sure. */
662 lapic_disable();
663
664 /* signal our startup to the BSP. */
665 mp_naps++;
666
667 /* Spin until the BSP releases the AP's. */
668 while (!aps_ready)
669 ia32_pause();
670
671 /* Initialize the PAT MSR. */
672 pmap_init_pat();
673
674 /* set up CPU registers and state */
675 cpu_setregs();
676
677 /* set up SSE/NX registers */
678 initializecpu();
679
680 /* set up FPU state on the AP */
681 fpuinit();
682
683 /* A quick check from sanity claus */
684 if (PCPU_GET(apic_id) != lapic_id()) {
685 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
686 printf("SMP: actual apic_id = %d\n", lapic_id());
687 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
688 panic("cpuid mismatch! boom!!");
689 }
690
691 /* Initialize curthread. */
692 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
693 PCPU_SET(curthread, PCPU_GET(idlethread));
694
695 mca_init();
696
697 mtx_lock_spin(&ap_boot_mtx);
698
699 /* Init local apic for irq's */
700 lapic_setup(1);
701
702 /* Set memory range attributes for this CPU to match the BSP */
703 mem_range_AP_init();
704
705 smp_cpus++;
706
707 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
708 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
709
710 /* Determine if we are a logical CPU. */
711 /* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
712 if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
713 logical_cpus_mask |= PCPU_GET(cpumask);
714
715 /* Determine if we are a hyperthread. */
716 if (hyperthreading_cpus > 1 &&
717 PCPU_GET(apic_id) % hyperthreading_cpus != 0)
718 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
719
720 /* Build our map of 'other' CPUs. */
721 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
722
723 if (bootverbose)
724 lapic_dump("AP");
725
726 if (smp_cpus == mp_ncpus) {
727 /* enable IPI's, tlb shootdown, freezes etc */
728 atomic_store_rel_int(&smp_started, 1);
729 smp_active = 1; /* historic */
730 }
731
732 /*
733 * Enable global pages TLB extension
734 * This also implicitly flushes the TLB
735 */
736
737 load_cr4(rcr4() | CR4_PGE);
738 load_ds(_udatasel);
739 load_es(_udatasel);
740 load_fs(_ufssel);
741 mtx_unlock_spin(&ap_boot_mtx);
742
743 /* wait until all the AP's are up */
744 while (smp_started == 0)
745 ia32_pause();
746
747 sched_throw(NULL);
748
749 panic("scheduler returned us to %s", __func__);
750 /* NOTREACHED */
751 }
752
753 /*******************************************************************
754 * local functions and data
755 */
756
757 /*
758 * We tell the I/O APIC code about all the CPUs we want to receive
759 * interrupts. If we don't want certain CPUs to receive IRQs we
760 * can simply not tell the I/O APIC code about them in this function.
761 * We also do not tell it about the BSP since it tells itself about
762 * the BSP internally to work with UP kernels and on UP machines.
763 */
764 static void
765 set_interrupt_apic_ids(void)
766 {
767 u_int i, apic_id;
768
769 for (i = 0; i < MAXCPU; i++) {
770 apic_id = cpu_apic_ids[i];
771 if (apic_id == -1)
772 continue;
773 if (cpu_info[apic_id].cpu_bsp)
774 continue;
775 if (cpu_info[apic_id].cpu_disabled)
776 continue;
777
778 /* Don't let hyperthreads service interrupts. */
779 if (hyperthreading_cpus > 1 &&
780 apic_id % hyperthreading_cpus != 0)
781 continue;
782
783 intr_add_cpu(i);
784 }
785 }
786
787 /*
788 * Assign logical CPU IDs to local APICs.
789 */
790 static void
791 assign_cpu_ids(void)
792 {
793 u_int i;
794
795 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
796 &hyperthreading_allowed);
797
798 /* Check for explicitly disabled CPUs. */
799 for (i = 0; i <= MAX_APIC_ID; i++) {
800 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
801 continue;
802
803 if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
804 cpu_info[i].cpu_hyperthread = 1;
805 #if defined(SCHED_ULE)
806 /*
807 * Don't use HT CPU if it has been disabled by a
808 * tunable.
809 */
810 if (hyperthreading_allowed == 0) {
811 cpu_info[i].cpu_disabled = 1;
812 continue;
813 }
814 #endif
815 }
816
817 /* Don't use this CPU if it has been disabled by a tunable. */
818 if (resource_disabled("lapic", i)) {
819 cpu_info[i].cpu_disabled = 1;
820 continue;
821 }
822 }
823
824 /*
825 * Assign CPU IDs to local APIC IDs and disable any CPUs
826 * beyond MAXCPU. CPU 0 is always assigned to the BSP.
827 *
828 * To minimize confusion for userland, we attempt to number
829 * CPUs such that all threads and cores in a package are
830 * grouped together. For now we assume that the BSP is always
831 * the first thread in a package and just start adding APs
832 * starting with the BSP's APIC ID.
833 */
834 mp_ncpus = 1;
835 cpu_apic_ids[0] = boot_cpu_id;
836 apic_cpuids[boot_cpu_id] = 0;
837 for (i = boot_cpu_id + 1; i != boot_cpu_id;
838 i == MAX_APIC_ID ? i = 0 : i++) {
839 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
840 cpu_info[i].cpu_disabled)
841 continue;
842
843 if (mp_ncpus < MAXCPU) {
844 cpu_apic_ids[mp_ncpus] = i;
845 apic_cpuids[i] = mp_ncpus;
846 mp_ncpus++;
847 } else
848 cpu_info[i].cpu_disabled = 1;
849 }
850 KASSERT(mp_maxid >= mp_ncpus - 1,
851 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
852 mp_ncpus));
853 }
854
855 /*
856 * start each AP in our list
857 */
858 static int
859 start_all_aps(void)
860 {
861 vm_offset_t va = boot_address + KERNBASE;
862 u_int64_t *pt4, *pt3, *pt2;
863 u_int32_t mpbioswarmvec;
864 int apic_id, cpu, i;
865 u_char mpbiosreason;
866
867 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
868
869 /* install the AP 1st level boot code */
870 pmap_kenter(va, boot_address);
871 pmap_invalidate_page(kernel_pmap, va);
872 bcopy(mptramp_start, (void *)va, bootMP_size);
873
874 /* Locate the page tables, they'll be below the trampoline */
875 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
876 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
877 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
878
879 /* Create the initial 1GB replicated page tables */
880 for (i = 0; i < 512; i++) {
881 /* Each slot of the level 4 pages points to the same level 3 page */
882 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
883 pt4[i] |= PG_V | PG_RW | PG_U;
884
885 /* Each slot of the level 3 pages points to the same level 2 page */
886 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
887 pt3[i] |= PG_V | PG_RW | PG_U;
888
889 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
890 pt2[i] = i * (2 * 1024 * 1024);
891 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
892 }
893
894 /* save the current value of the warm-start vector */
895 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
896 outb(CMOS_REG, BIOS_RESET);
897 mpbiosreason = inb(CMOS_DATA);
898
899 /* setup a vector to our boot code */
900 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
901 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
902 outb(CMOS_REG, BIOS_RESET);
903 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
904
905 /* start each AP */
906 for (cpu = 1; cpu < mp_ncpus; cpu++) {
907 apic_id = cpu_apic_ids[cpu];
908
909 /* allocate and set up an idle stack data page */
910 bootstacks[cpu] = (void *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
911 doublefault_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
912 nmi_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
913 dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
914
915 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
916 bootAP = cpu;
917
918 /* attempt to start the Application Processor */
919 if (!start_ap(apic_id)) {
920 /* restore the warmstart vector */
921 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
922 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
923 }
924
925 all_cpus |= (1 << cpu); /* record AP in CPU map */
926 }
927
928 /* build our map of 'other' CPUs */
929 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
930
931 /* restore the warmstart vector */
932 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
933
934 outb(CMOS_REG, BIOS_RESET);
935 outb(CMOS_DATA, mpbiosreason);
936
937 /* number of APs actually started */
938 return mp_naps;
939 }
940
941
942 /*
943 * This function starts the AP (application processor) identified
944 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
945 * to accomplish this. This is necessary because of the nuances
946 * of the different hardware we might encounter. It isn't pretty,
947 * but it seems to work.
948 */
949 static int
950 start_ap(int apic_id)
951 {
952 int vector, ms;
953 int cpus;
954
955 /* calculate the vector */
956 vector = (boot_address >> 12) & 0xff;
957
958 /* used as a watchpoint to signal AP startup */
959 cpus = mp_naps;
960
961 /*
962 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
963 * and running the target CPU. OR this INIT IPI might be latched (P5
964 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
965 * ignored.
966 */
967
968 /* do an INIT IPI: assert RESET */
969 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
970 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
971
972 /* wait for pending status end */
973 lapic_ipi_wait(-1);
974
975 /* do an INIT IPI: deassert RESET */
976 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
977 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
978
979 /* wait for pending status end */
980 DELAY(10000); /* wait ~10mS */
981 lapic_ipi_wait(-1);
982
983 /*
984 * next we do a STARTUP IPI: the previous INIT IPI might still be
985 * latched, (P5 bug) this 1st STARTUP would then terminate
986 * immediately, and the previously started INIT IPI would continue. OR
987 * the previous INIT IPI has already run. and this STARTUP IPI will
988 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
989 * will run.
990 */
991
992 /* do a STARTUP IPI */
993 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
994 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
995 vector, apic_id);
996 lapic_ipi_wait(-1);
997 DELAY(200); /* wait ~200uS */
998
999 /*
1000 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
1001 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
1002 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
1003 * recognized after hardware RESET or INIT IPI.
1004 */
1005
1006 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1007 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1008 vector, apic_id);
1009 lapic_ipi_wait(-1);
1010 DELAY(200); /* wait ~200uS */
1011
1012 /* Wait up to 5 seconds for it to start. */
1013 for (ms = 0; ms < 5000; ms++) {
1014 if (mp_naps > cpus)
1015 return 1; /* return SUCCESS */
1016 DELAY(1000);
1017 }
1018 return 0; /* return FAILURE */
1019 }
1020
1021 /*
1022 * Flush the TLB on all other CPU's
1023 */
1024 static void
1025 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1026 {
1027 u_int ncpu;
1028
1029 ncpu = mp_ncpus - 1; /* does not shootdown self */
1030 if (ncpu < 1)
1031 return; /* no other cpus */
1032 if (!(read_rflags() & PSL_I))
1033 panic("%s: interrupts disabled", __func__);
1034 mtx_lock_spin(&smp_ipi_mtx);
1035 smp_tlb_addr1 = addr1;
1036 smp_tlb_addr2 = addr2;
1037 atomic_store_rel_int(&smp_tlb_wait, 0);
1038 ipi_all_but_self(vector);
1039 while (smp_tlb_wait < ncpu)
1040 ia32_pause();
1041 mtx_unlock_spin(&smp_ipi_mtx);
1042 }
1043
1044 static void
1045 smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1046 {
1047 int ncpu, othercpus;
1048
1049 othercpus = mp_ncpus - 1;
1050 if (mask == (cpumask_t)-1) {
1051 ncpu = othercpus;
1052 if (ncpu < 1)
1053 return;
1054 } else {
1055 mask &= ~PCPU_GET(cpumask);
1056 if (mask == 0)
1057 return;
1058 ncpu = bitcount32(mask);
1059 if (ncpu > othercpus) {
1060 /* XXX this should be a panic offence */
1061 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
1062 ncpu, othercpus);
1063 ncpu = othercpus;
1064 }
1065 /* XXX should be a panic, implied by mask == 0 above */
1066 if (ncpu < 1)
1067 return;
1068 }
1069 if (!(read_rflags() & PSL_I))
1070 panic("%s: interrupts disabled", __func__);
1071 mtx_lock_spin(&smp_ipi_mtx);
1072 smp_tlb_addr1 = addr1;
1073 smp_tlb_addr2 = addr2;
1074 atomic_store_rel_int(&smp_tlb_wait, 0);
1075 if (mask == (cpumask_t)-1)
1076 ipi_all_but_self(vector);
1077 else
1078 ipi_selected(mask, vector);
1079 while (smp_tlb_wait < ncpu)
1080 ia32_pause();
1081 mtx_unlock_spin(&smp_ipi_mtx);
1082 }
1083
1084 /*
1085 * Send an IPI to specified CPU handling the bitmap logic.
1086 */
1087 static void
1088 ipi_send_cpu(int cpu, u_int ipi)
1089 {
1090 u_int bitmap, old_pending, new_pending;
1091
1092 KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
1093
1094 if (IPI_IS_BITMAPED(ipi)) {
1095 bitmap = 1 << ipi;
1096 ipi = IPI_BITMAP_VECTOR;
1097 do {
1098 old_pending = cpu_ipi_pending[cpu];
1099 new_pending = old_pending | bitmap;
1100 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
1101 old_pending, new_pending));
1102 if (old_pending)
1103 return;
1104 }
1105 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1106 }
1107
1108 void
1109 smp_cache_flush(void)
1110 {
1111
1112 if (smp_started)
1113 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
1114 }
1115
1116 void
1117 smp_invltlb(void)
1118 {
1119
1120 if (smp_started) {
1121 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
1122 }
1123 }
1124
1125 void
1126 smp_invlpg(vm_offset_t addr)
1127 {
1128
1129 if (smp_started)
1130 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
1131 }
1132
1133 void
1134 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
1135 {
1136
1137 if (smp_started) {
1138 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
1139 }
1140 }
1141
1142 void
1143 smp_masked_invltlb(cpumask_t mask)
1144 {
1145
1146 if (smp_started) {
1147 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
1148 }
1149 }
1150
1151 void
1152 smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
1153 {
1154
1155 if (smp_started) {
1156 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
1157 }
1158 }
1159
1160 void
1161 smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
1162 {
1163
1164 if (smp_started) {
1165 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
1166 }
1167 }
1168
1169 void
1170 ipi_bitmap_handler(struct trapframe frame)
1171 {
1172 int cpu = PCPU_GET(cpuid);
1173 u_int ipi_bitmap;
1174
1175 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1176
1177 if (ipi_bitmap & (1 << IPI_PREEMPT))
1178 sched_preempt(curthread);
1179
1180 /* Nothing to do for AST */
1181
1182 if (ipi_bitmap & (1 << IPI_HARDCLOCK))
1183 hardclockintr(&frame);
1184
1185 if (ipi_bitmap & (1 << IPI_STATCLOCK))
1186 statclockintr(&frame);
1187
1188 if (ipi_bitmap & (1 << IPI_PROFCLOCK))
1189 profclockintr(&frame);
1190 }
1191
1192 /*
1193 * send an IPI to a set of cpus.
1194 */
1195 void
1196 ipi_selected(cpumask_t cpus, u_int ipi)
1197 {
1198 int cpu;
1199
1200 /*
1201 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1202 * of help in order to understand what is the source.
1203 * Set the mask of receiving CPUs for this purpose.
1204 */
1205 if (ipi == IPI_STOP_HARD)
1206 atomic_set_int(&ipi_nmi_pending, cpus);
1207
1208 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
1209 while ((cpu = ffs(cpus)) != 0) {
1210 cpu--;
1211 cpus &= ~(1 << cpu);
1212 ipi_send_cpu(cpu, ipi);
1213 }
1214 }
1215
1216 /*
1217 * send an IPI to a specific CPU.
1218 */
1219 void
1220 ipi_cpu(int cpu, u_int ipi)
1221 {
1222
1223 /*
1224 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1225 * of help in order to understand what is the source.
1226 * Set the mask of receiving CPUs for this purpose.
1227 */
1228 if (ipi == IPI_STOP_HARD)
1229 atomic_set_int(&ipi_nmi_pending, 1 << cpu);
1230
1231 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1232 ipi_send_cpu(cpu, ipi);
1233 }
1234
1235 /*
1236 * send an IPI to all CPUs EXCEPT myself
1237 */
1238 void
1239 ipi_all_but_self(u_int ipi)
1240 {
1241
1242 if (IPI_IS_BITMAPED(ipi)) {
1243 ipi_selected(PCPU_GET(other_cpus), ipi);
1244 return;
1245 }
1246
1247 /*
1248 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1249 * of help in order to understand what is the source.
1250 * Set the mask of receiving CPUs for this purpose.
1251 */
1252 if (ipi == IPI_STOP_HARD)
1253 atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
1254
1255 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1256 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1257 }
1258
1259 int
1260 ipi_nmi_handler()
1261 {
1262 cpumask_t cpumask;
1263
1264 /*
1265 * As long as there is not a simple way to know about a NMI's
1266 * source, if the bitmask for the current CPU is present in
1267 * the global pending bitword an IPI_STOP_HARD has been issued
1268 * and should be handled.
1269 */
1270 cpumask = PCPU_GET(cpumask);
1271 if ((ipi_nmi_pending & cpumask) == 0)
1272 return (1);
1273
1274 atomic_clear_int(&ipi_nmi_pending, cpumask);
1275 cpustop_handler();
1276 return (0);
1277 }
1278
1279 /*
1280 * Handle an IPI_STOP by saving our current context and spinning until we
1281 * are resumed.
1282 */
1283 void
1284 cpustop_handler(void)
1285 {
1286 cpumask_t cpumask;
1287 u_int cpu;
1288
1289 cpu = PCPU_GET(cpuid);
1290 cpumask = PCPU_GET(cpumask);
1291
1292 savectx(&stoppcbs[cpu]);
1293
1294 /* Indicate that we are stopped */
1295 atomic_set_int(&stopped_cpus, cpumask);
1296
1297 /* Wait for restart */
1298 while (!(started_cpus & cpumask))
1299 ia32_pause();
1300
1301 atomic_clear_int(&started_cpus, cpumask);
1302 atomic_clear_int(&stopped_cpus, cpumask);
1303
1304 if (cpu == 0 && cpustop_restartfunc != NULL) {
1305 cpustop_restartfunc();
1306 cpustop_restartfunc = NULL;
1307 }
1308 }
1309
1310 /*
1311 * Handle an IPI_SUSPEND by saving our current context and spinning until we
1312 * are resumed.
1313 */
1314 void
1315 cpususpend_handler(void)
1316 {
1317 cpumask_t cpumask;
1318 register_t cr3, rf;
1319 u_int cpu;
1320
1321 cpu = PCPU_GET(cpuid);
1322 cpumask = PCPU_GET(cpumask);
1323
1324 rf = intr_disable();
1325 cr3 = rcr3();
1326
1327 if (savectx(susppcbs[cpu])) {
1328 wbinvd();
1329 atomic_set_int(&stopped_cpus, cpumask);
1330 } else {
1331 pmap_init_pat();
1332 PCPU_SET(switchtime, 0);
1333 PCPU_SET(switchticks, ticks);
1334 }
1335
1336 /* Wait for resume */
1337 while (!(started_cpus & cpumask))
1338 ia32_pause();
1339
1340 atomic_clear_int(&started_cpus, cpumask);
1341 atomic_clear_int(&stopped_cpus, cpumask);
1342
1343 /* Restore CR3 and enable interrupts */
1344 load_cr3(cr3);
1345 mca_resume();
1346 lapic_setup(0);
1347 intr_restore(rf);
1348 }
1349
1350 /*
1351 * This is called once the rest of the system is up and running and we're
1352 * ready to let the AP's out of the pen.
1353 */
1354 static void
1355 release_aps(void *dummy __unused)
1356 {
1357
1358 if (mp_ncpus == 1)
1359 return;
1360 atomic_store_rel_int(&aps_ready, 1);
1361 while (smp_started == 0)
1362 ia32_pause();
1363 }
1364 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1365
1366 static int
1367 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1368 {
1369 cpumask_t mask;
1370 int error;
1371
1372 mask = hlt_cpus_mask;
1373 error = sysctl_handle_int(oidp, &mask, 0, req);
1374 if (error || !req->newptr)
1375 return (error);
1376
1377 if (logical_cpus_mask != 0 &&
1378 (mask & logical_cpus_mask) == logical_cpus_mask)
1379 hlt_logical_cpus = 1;
1380 else
1381 hlt_logical_cpus = 0;
1382
1383 if (! hyperthreading_allowed)
1384 mask |= hyperthreading_cpus_mask;
1385
1386 if ((mask & all_cpus) == all_cpus)
1387 mask &= ~(1<<0);
1388 hlt_cpus_mask = mask;
1389 return (error);
1390 }
1391 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1392 0, 0, sysctl_hlt_cpus, "IU",
1393 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
1394
1395 static int
1396 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1397 {
1398 int disable, error;
1399
1400 disable = hlt_logical_cpus;
1401 error = sysctl_handle_int(oidp, &disable, 0, req);
1402 if (error || !req->newptr)
1403 return (error);
1404
1405 if (disable)
1406 hlt_cpus_mask |= logical_cpus_mask;
1407 else
1408 hlt_cpus_mask &= ~logical_cpus_mask;
1409
1410 if (! hyperthreading_allowed)
1411 hlt_cpus_mask |= hyperthreading_cpus_mask;
1412
1413 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1414 hlt_cpus_mask &= ~(1<<0);
1415
1416 hlt_logical_cpus = disable;
1417 return (error);
1418 }
1419
1420 static int
1421 sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
1422 {
1423 int allowed, error;
1424
1425 allowed = hyperthreading_allowed;
1426 error = sysctl_handle_int(oidp, &allowed, 0, req);
1427 if (error || !req->newptr)
1428 return (error);
1429
1430 #ifdef SCHED_ULE
1431 /*
1432 * SCHED_ULE doesn't allow enabling/disabling HT cores at
1433 * run-time.
1434 */
1435 if (allowed != hyperthreading_allowed)
1436 return (ENOTSUP);
1437 return (error);
1438 #endif
1439
1440 if (allowed)
1441 hlt_cpus_mask &= ~hyperthreading_cpus_mask;
1442 else
1443 hlt_cpus_mask |= hyperthreading_cpus_mask;
1444
1445 if (logical_cpus_mask != 0 &&
1446 (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
1447 hlt_logical_cpus = 1;
1448 else
1449 hlt_logical_cpus = 0;
1450
1451 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1452 hlt_cpus_mask &= ~(1<<0);
1453
1454 hyperthreading_allowed = allowed;
1455 return (error);
1456 }
1457
1458 static void
1459 cpu_hlt_setup(void *dummy __unused)
1460 {
1461
1462 if (logical_cpus_mask != 0) {
1463 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1464 &hlt_logical_cpus);
1465 sysctl_ctx_init(&logical_cpu_clist);
1466 SYSCTL_ADD_PROC(&logical_cpu_clist,
1467 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1468 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1469 sysctl_hlt_logical_cpus, "IU", "");
1470 SYSCTL_ADD_UINT(&logical_cpu_clist,
1471 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1472 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1473 &logical_cpus_mask, 0, "");
1474
1475 if (hlt_logical_cpus)
1476 hlt_cpus_mask |= logical_cpus_mask;
1477
1478 /*
1479 * If necessary for security purposes, force
1480 * hyperthreading off, regardless of the value
1481 * of hlt_logical_cpus.
1482 */
1483 if (hyperthreading_cpus_mask) {
1484 SYSCTL_ADD_PROC(&logical_cpu_clist,
1485 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1486 "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
1487 0, 0, sysctl_hyperthreading_allowed, "IU", "");
1488 if (! hyperthreading_allowed)
1489 hlt_cpus_mask |= hyperthreading_cpus_mask;
1490 }
1491 }
1492 }
1493 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1494
1495 int
1496 mp_grab_cpu_hlt(void)
1497 {
1498 cpumask_t mask;
1499 #ifdef MP_WATCHDOG
1500 u_int cpuid;
1501 #endif
1502 int retval;
1503
1504 mask = PCPU_GET(cpumask);
1505 #ifdef MP_WATCHDOG
1506 cpuid = PCPU_GET(cpuid);
1507 ap_watchdog(cpuid);
1508 #endif
1509
1510 retval = 0;
1511 while (mask & hlt_cpus_mask) {
1512 retval = 1;
1513 __asm __volatile("sti; hlt" : : : "memory");
1514 }
1515 return (retval);
1516 }
Cache object: 0157cf848d1add107b26dd75b4a82940
|