1 /*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/6.4/sys/amd64/amd64/mp_machdep.c 172480 2007-10-08 21:51:01Z jhb $");
29
30 #include "opt_cpu.h"
31 #include "opt_kdb.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_mp_watchdog.h"
34 #include "opt_sched.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #ifdef GPROF
40 #include <sys/gmon.h>
41 #endif
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/memrange.h>
47 #include <sys/mutex.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/smp.h>
51 #include <sys/sysctl.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_extern.h>
58
59 #include <machine/apicreg.h>
60 #include <machine/clock.h>
61 #include <machine/md_var.h>
62 #include <machine/mp_watchdog.h>
63 #include <machine/pcb.h>
64 #include <machine/psl.h>
65 #include <machine/smp.h>
66 #include <machine/specialreg.h>
67 #include <machine/tss.h>
68
69 #define WARMBOOT_TARGET 0
70 #define WARMBOOT_OFF (KERNBASE + 0x0467)
71 #define WARMBOOT_SEG (KERNBASE + 0x0469)
72
73 #define CMOS_REG (0x70)
74 #define CMOS_DATA (0x71)
75 #define BIOS_RESET (0x0f)
76 #define BIOS_WARM (0x0a)
77
78 /* lock region used by kernel profiling */
79 int mcount_lock;
80
81 int mp_naps; /* # of Applications processors */
82 int boot_cpu_id = -1; /* designated BSP */
83 extern int nkpt;
84
85 /*
86 * CPU topology map datastructures for HTT.
87 */
88 static struct cpu_group mp_groups[MAXCPU];
89 static struct cpu_top mp_top;
90
91 /* AP uses this during bootstrap. Do not staticize. */
92 char *bootSTK;
93 static int bootAP;
94
95 /* Free these after use */
96 void *bootstacks[MAXCPU];
97
98 /* Temporary holder for double fault stack */
99 char *doublefault_stack;
100
101 /* Hotwire a 0->4MB V==P mapping */
102 extern pt_entry_t *KPTphys;
103
104 /* SMP page table page */
105 extern pt_entry_t *SMPpt;
106
107 struct pcb stoppcbs[MAXCPU];
108
109 /* Variables needed for SMP tlb shootdown. */
110 vm_offset_t smp_tlb_addr1;
111 vm_offset_t smp_tlb_addr2;
112 volatile int smp_tlb_wait;
113
114 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
115
116 /*
117 * Local data and functions.
118 */
119
120 static u_int logical_cpus;
121
122 /* used to hold the AP's until we are ready to release them */
123 static struct mtx ap_boot_mtx;
124
125 /* Set to 1 once we're ready to let the APs out of the pen. */
126 static volatile int aps_ready = 0;
127
128 /*
129 * Store data from cpu_add() until later in the boot when we actually setup
130 * the APs.
131 */
132 struct cpu_info {
133 int cpu_present:1;
134 int cpu_bsp:1;
135 int cpu_disabled:1;
136 } static cpu_info[MAX_APIC_ID + 1];
137 static int cpu_apic_ids[MAXCPU];
138
139 /* Holds pending bitmap based IPIs per CPU */
140 static volatile u_int cpu_ipi_pending[MAXCPU];
141
142 static u_int boot_address;
143
144 static void assign_cpu_ids(void);
145 static void set_interrupt_apic_ids(void);
146 static int start_all_aps(void);
147 static int start_ap(int apic_id);
148 static void release_aps(void *dummy);
149
150 static int hlt_logical_cpus;
151 static u_int hyperthreading_cpus;
152 static cpumask_t hyperthreading_cpus_mask;
153 static int hyperthreading_allowed;
154 static struct sysctl_ctx_list logical_cpu_clist;
155 static u_int bootMP_size;
156
157 static void
158 mem_range_AP_init(void)
159 {
160 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
161 mem_range_softc.mr_op->initAP(&mem_range_softc);
162 }
163
164 void
165 mp_topology(void)
166 {
167 struct cpu_group *group;
168 int logical_cpus;
169 int apic_id;
170 int groups;
171 int cpu;
172
173 /* Build the smp_topology map. */
174 /* Nothing to do if there is no HTT support. */
175 if ((cpu_feature & CPUID_HTT) == 0)
176 return;
177 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
178 if (logical_cpus <= 1)
179 return;
180 group = &mp_groups[0];
181 groups = 1;
182 for (cpu = 0, apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
183 if (!cpu_info[apic_id].cpu_present)
184 continue;
185 /*
186 * If the current group has members and we're not a logical
187 * cpu, create a new group.
188 */
189 if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
190 group++;
191 groups++;
192 }
193 group->cg_count++;
194 group->cg_mask |= 1 << cpu;
195 cpu++;
196 }
197
198 mp_top.ct_count = groups;
199 mp_top.ct_group = mp_groups;
200 smp_topology = &mp_top;
201 }
202
203
204 #ifdef KDB_STOP_NMI
205 volatile cpumask_t ipi_nmi_pending;
206 #endif
207
208 /*
209 * Calculate usable address in base memory for AP trampoline code.
210 */
211 u_int
212 mp_bootaddress(u_int basemem)
213 {
214
215 bootMP_size = mptramp_end - mptramp_start;
216 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
217 if (((basemem * 1024) - boot_address) < bootMP_size)
218 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
219 /* 3 levels of page table pages */
220 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
221
222 return mptramp_pagetables;
223 }
224
225 void
226 cpu_add(u_int apic_id, char boot_cpu)
227 {
228
229 if (apic_id > MAX_APIC_ID) {
230 panic("SMP: APIC ID %d too high", apic_id);
231 return;
232 }
233 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
234 apic_id));
235 cpu_info[apic_id].cpu_present = 1;
236 if (boot_cpu) {
237 KASSERT(boot_cpu_id == -1,
238 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
239 boot_cpu_id));
240 boot_cpu_id = apic_id;
241 cpu_info[apic_id].cpu_bsp = 1;
242 }
243 if (mp_ncpus < MAXCPU) {
244 mp_ncpus++;
245 mp_maxid = mp_ncpus -1;
246 }
247 if (bootverbose)
248 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
249 "AP");
250 }
251
252 void
253 cpu_mp_setmaxid(void)
254 {
255
256 /*
257 * mp_maxid should be already set by calls to cpu_add().
258 * Just sanity check its value here.
259 */
260 if (mp_ncpus == 0)
261 KASSERT(mp_maxid == 0,
262 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
263 else if (mp_ncpus == 1)
264 mp_maxid = 0;
265 else
266 KASSERT(mp_maxid >= mp_ncpus - 1,
267 ("%s: counters out of sync: max %d, count %d", __func__,
268 mp_maxid, mp_ncpus));
269 }
270
271 int
272 cpu_mp_probe(void)
273 {
274
275 /*
276 * Always record BSP in CPU map so that the mbuf init code works
277 * correctly.
278 */
279 all_cpus = 1;
280 if (mp_ncpus == 0) {
281 /*
282 * No CPUs were found, so this must be a UP system. Setup
283 * the variables to represent a system with a single CPU
284 * with an id of 0.
285 */
286 mp_ncpus = 1;
287 return (0);
288 }
289
290 /* At least one CPU was found. */
291 if (mp_ncpus == 1) {
292 /*
293 * One CPU was found, so this must be a UP system with
294 * an I/O APIC.
295 */
296 mp_maxid = 0;
297 return (0);
298 }
299
300 /* At least two CPUs were found. */
301 return (1);
302 }
303
304 /*
305 * Initialize the IPI handlers and start up the AP's.
306 */
307 void
308 cpu_mp_start(void)
309 {
310 int i;
311 u_int threads_per_cache, p[4];
312
313 /* Initialize the logical ID to APIC ID table. */
314 for (i = 0; i < MAXCPU; i++) {
315 cpu_apic_ids[i] = -1;
316 cpu_ipi_pending[i] = 0;
317 }
318
319 /* Install an inter-CPU IPI for TLB invalidation */
320 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
321 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
322 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
323
324 /* Install an inter-CPU IPI for cache invalidation. */
325 setidt(IPI_INVLCACHE, IDTVEC(invlcache), SDT_SYSIGT, SEL_KPL, 0);
326
327 /* Install an inter-CPU IPI for all-CPU rendezvous */
328 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
329
330 /* Install generic inter-CPU IPI handler */
331 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
332 SDT_SYSIGT, SEL_KPL, 0);
333
334 /* Install an inter-CPU IPI for CPU stop/restart */
335 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
336
337 /* Set boot_cpu_id if needed. */
338 if (boot_cpu_id == -1) {
339 boot_cpu_id = PCPU_GET(apic_id);
340 cpu_info[boot_cpu_id].cpu_bsp = 1;
341 } else
342 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
343 ("BSP's APIC ID doesn't match boot_cpu_id"));
344 cpu_apic_ids[0] = boot_cpu_id;
345
346 assign_cpu_ids();
347
348 /* Start each Application Processor */
349 start_all_aps();
350
351 /* Setup the initial logical CPUs info. */
352 logical_cpus = logical_cpus_mask = 0;
353 if (cpu_feature & CPUID_HTT)
354 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
355
356 /*
357 * Work out if hyperthreading is *really* enabled. This
358 * is made really ugly by the fact that processors lie: Dual
359 * core processors claim to be hyperthreaded even when they're
360 * not, presumably because they want to be treated the same
361 * way as HTT with respect to per-cpu software licensing.
362 * At the time of writing (May 12, 2005) the only hyperthreaded
363 * cpus are from Intel, and Intel's dual-core processors can be
364 * identified via the "deterministic cache parameters" cpuid
365 * calls.
366 */
367 /*
368 * First determine if this is an Intel processor which claims
369 * to have hyperthreading support.
370 */
371 if ((cpu_feature & CPUID_HTT) &&
372 (strcmp(cpu_vendor, "GenuineIntel") == 0)) {
373 /*
374 * If the "deterministic cache parameters" cpuid calls
375 * are available, use them.
376 */
377 if (cpu_high >= 4) {
378 /* Ask the processor about the L1 cache. */
379 for (i = 0; i < 1; i++) {
380 cpuid_count(4, i, p);
381 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
382 if (hyperthreading_cpus < threads_per_cache)
383 hyperthreading_cpus = threads_per_cache;
384 if ((p[0] & 0x1f) == 0)
385 break;
386 }
387 }
388
389 /*
390 * If the deterministic cache parameters are not
391 * available, or if no caches were reported to exist,
392 * just accept what the HTT flag indicated.
393 */
394 if (hyperthreading_cpus == 0)
395 hyperthreading_cpus = logical_cpus;
396 }
397
398 set_interrupt_apic_ids();
399 }
400
401
402 /*
403 * Print various information about the SMP system hardware and setup.
404 */
405 void
406 cpu_mp_announce(void)
407 {
408 int i, x;
409
410 /* List CPUs */
411 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
412 for (i = 1, x = 0; x <= MAX_APIC_ID; x++) {
413 if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp)
414 continue;
415 if (cpu_info[x].cpu_disabled)
416 printf(" cpu (AP): APIC ID: %2d (disabled)\n", x);
417 else {
418 KASSERT(i < mp_ncpus,
419 ("mp_ncpus and actual cpus are out of whack"));
420 printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
421 }
422 }
423 }
424
425 /*
426 * AP CPU's call this to initialize themselves.
427 */
428 void
429 init_secondary(void)
430 {
431 struct pcpu *pc;
432 u_int64_t msr, cr0;
433 int cpu, gsel_tss;
434
435 /* Set by the startup code for us to use */
436 cpu = bootAP;
437
438 /* Init tss */
439 common_tss[cpu] = common_tss[0];
440 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */
441 common_tss[cpu].tss_iobase = sizeof(struct amd64tss);
442 common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
443
444 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
445 ssdtosyssd(&gdt_segs[GPROC0_SEL],
446 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
447
448 lgdt(&r_gdt); /* does magic intra-segment return */
449
450 /* Get per-cpu data */
451 pc = &__pcpu[cpu];
452
453 /* prime data page for it to use */
454 pcpu_init(pc, cpu, sizeof(struct pcpu));
455 pc->pc_apic_id = cpu_apic_ids[cpu];
456 pc->pc_prvspace = pc;
457 pc->pc_curthread = 0;
458 pc->pc_tssp = &common_tss[cpu];
459 pc->pc_rsp0 = 0;
460
461 wrmsr(MSR_FSBASE, 0); /* User value */
462 wrmsr(MSR_GSBASE, (u_int64_t)pc);
463 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
464
465 lidt(&r_idt);
466
467 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
468 ltr(gsel_tss);
469
470 /*
471 * Set to a known state:
472 * Set by mpboot.s: CR0_PG, CR0_PE
473 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
474 */
475 cr0 = rcr0();
476 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
477 load_cr0(cr0);
478
479 /* Set up the fast syscall stuff */
480 msr = rdmsr(MSR_EFER) | EFER_SCE;
481 wrmsr(MSR_EFER, msr);
482 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
483 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
484 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
485 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
486 wrmsr(MSR_STAR, msr);
487 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
488
489 /* Disable local APIC just to be sure. */
490 lapic_disable();
491
492 /* signal our startup to the BSP. */
493 mp_naps++;
494
495 /* Spin until the BSP releases the AP's. */
496 while (!aps_ready)
497 ia32_pause();
498
499 /* Initialize the PAT MSR. */
500 pmap_init_pat();
501
502 /* set up CPU registers and state */
503 cpu_setregs();
504
505 /* set up SSE/NX registers */
506 initializecpu();
507
508 /* set up FPU state on the AP */
509 fpuinit();
510
511 /* A quick check from sanity claus */
512 if (PCPU_GET(apic_id) != lapic_id()) {
513 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
514 printf("SMP: actual apic_id = %d\n", lapic_id());
515 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
516 panic("cpuid mismatch! boom!!");
517 }
518
519 /* Initialize curthread. */
520 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
521 PCPU_SET(curthread, PCPU_GET(idlethread));
522
523 mtx_lock_spin(&ap_boot_mtx);
524
525 /* Init local apic for irq's */
526 lapic_setup(1);
527
528 /* Set memory range attributes for this CPU to match the BSP */
529 mem_range_AP_init();
530
531 smp_cpus++;
532
533 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
534 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
535
536 /* Determine if we are a logical CPU. */
537 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
538 logical_cpus_mask |= PCPU_GET(cpumask);
539
540 /* Determine if we are a hyperthread. */
541 if (hyperthreading_cpus > 1 &&
542 PCPU_GET(apic_id) % hyperthreading_cpus != 0)
543 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
544
545 /* Build our map of 'other' CPUs. */
546 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
547
548 if (bootverbose)
549 lapic_dump("AP");
550
551 if (smp_cpus == mp_ncpus) {
552 /* enable IPI's, tlb shootdown, freezes etc */
553 atomic_store_rel_int(&smp_started, 1);
554 smp_active = 1; /* historic */
555 }
556
557 /*
558 * Enable global pages TLB extension
559 * This also implicitly flushes the TLB
560 */
561
562 load_cr4(rcr4() | CR4_PGE);
563
564 mtx_unlock_spin(&ap_boot_mtx);
565
566 /* wait until all the AP's are up */
567 while (smp_started == 0)
568 ia32_pause();
569
570 /* ok, now grab sched_lock and enter the scheduler */
571 mtx_lock_spin(&sched_lock);
572
573 /*
574 * Correct spinlock nesting. The idle thread context that we are
575 * borrowing was created so that it would start out with a single
576 * spin lock (sched_lock) held in fork_trampoline(). Since we've
577 * explicitly acquired locks in this function, the nesting count
578 * is now 2 rather than 1. Since we are nested, calling
579 * spinlock_exit() will simply adjust the counts without allowing
580 * spin lock using code to interrupt us.
581 */
582 spinlock_exit();
583 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
584
585 binuptime(PCPU_PTR(switchtime));
586 PCPU_SET(switchticks, ticks);
587
588 cpu_throw(NULL, choosethread()); /* doesn't return */
589
590 panic("scheduler returned us to %s", __func__);
591 /* NOTREACHED */
592 }
593
594 /*******************************************************************
595 * local functions and data
596 */
597
598 /*
599 * We tell the I/O APIC code about all the CPUs we want to receive
600 * interrupts. If we don't want certain CPUs to receive IRQs we
601 * can simply not tell the I/O APIC code about them in this function.
602 * We also do not tell it about the BSP since it tells itself about
603 * the BSP internally to work with UP kernels and on UP machines.
604 */
605 static void
606 set_interrupt_apic_ids(void)
607 {
608 u_int apic_id;
609
610 for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
611 if (!cpu_info[apic_id].cpu_present)
612 continue;
613 if (cpu_info[apic_id].cpu_bsp)
614 continue;
615 if (cpu_info[apic_id].cpu_disabled)
616 continue;
617
618 /* Don't let hyperthreads service interrupts. */
619 if (hyperthreading_cpus > 1 &&
620 apic_id % hyperthreading_cpus != 0)
621 continue;
622
623 intr_add_cpu(apic_id);
624 }
625 }
626
627 /*
628 * Assign logical CPU IDs to local APICs.
629 */
630 static void
631 assign_cpu_ids(void)
632 {
633 u_int i;
634
635 /* Check for explicitly disabled CPUs. */
636 for (i = 0; i <= MAX_APIC_ID; i++) {
637 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
638 continue;
639
640 /* Don't use this CPU if it has been disabled by a tunable. */
641 if (resource_disabled("lapic", i)) {
642 cpu_info[i].cpu_disabled = 1;
643 continue;
644 }
645 }
646
647 /*
648 * Assign CPU IDs to local APIC IDs and disable any CPUs
649 * beyond MAXCPU. CPU 0 has already been assigned to the BSP,
650 * so we only have to assign IDs for APs.
651 */
652 mp_ncpus = 1;
653 for (i = 0; i <= MAX_APIC_ID; i++) {
654 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
655 cpu_info[i].cpu_disabled)
656 continue;
657
658 if (mp_ncpus < MAXCPU) {
659 cpu_apic_ids[mp_ncpus] = i;
660 mp_ncpus++;
661 } else
662 cpu_info[i].cpu_disabled = 1;
663 }
664 KASSERT(mp_maxid >= mp_ncpus - 1,
665 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
666 mp_ncpus));
667 }
668
669 /*
670 * start each AP in our list
671 */
672 static int
673 start_all_aps(void)
674 {
675 vm_offset_t va = boot_address + KERNBASE;
676 u_int64_t *pt4, *pt3, *pt2;
677 u_int32_t mpbioswarmvec;
678 int apic_id, cpu, i;
679 u_char mpbiosreason;
680
681 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
682
683 /* install the AP 1st level boot code */
684 pmap_kenter(va, boot_address);
685 pmap_invalidate_page(kernel_pmap, va);
686 bcopy(mptramp_start, (void *)va, bootMP_size);
687
688 /* Locate the page tables, they'll be below the trampoline */
689 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
690 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
691 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
692
693 /* Create the initial 1GB replicated page tables */
694 for (i = 0; i < 512; i++) {
695 /* Each slot of the level 4 pages points to the same level 3 page */
696 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
697 pt4[i] |= PG_V | PG_RW | PG_U;
698
699 /* Each slot of the level 3 pages points to the same level 2 page */
700 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
701 pt3[i] |= PG_V | PG_RW | PG_U;
702
703 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
704 pt2[i] = i * (2 * 1024 * 1024);
705 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
706 }
707
708 /* save the current value of the warm-start vector */
709 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
710 outb(CMOS_REG, BIOS_RESET);
711 mpbiosreason = inb(CMOS_DATA);
712
713 /* setup a vector to our boot code */
714 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
715 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
716 outb(CMOS_REG, BIOS_RESET);
717 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
718
719 /* start each AP */
720 for (cpu = 1; cpu < mp_ncpus; cpu++) {
721 apic_id = cpu_apic_ids[cpu];
722
723 /* allocate and set up an idle stack data page */
724 bootstacks[cpu] = (void *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
725 doublefault_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
726
727 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
728 bootAP = cpu;
729
730 /* attempt to start the Application Processor */
731 if (!start_ap(apic_id)) {
732 /* restore the warmstart vector */
733 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
734 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
735 }
736
737 all_cpus |= (1 << cpu); /* record AP in CPU map */
738 }
739
740 /* build our map of 'other' CPUs */
741 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
742
743 /* restore the warmstart vector */
744 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
745
746 outb(CMOS_REG, BIOS_RESET);
747 outb(CMOS_DATA, mpbiosreason);
748
749 /* number of APs actually started */
750 return mp_naps;
751 }
752
753
754 /*
755 * This function starts the AP (application processor) identified
756 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
757 * to accomplish this. This is necessary because of the nuances
758 * of the different hardware we might encounter. It isn't pretty,
759 * but it seems to work.
760 */
761 static int
762 start_ap(int apic_id)
763 {
764 int vector, ms;
765 int cpus;
766
767 /* calculate the vector */
768 vector = (boot_address >> 12) & 0xff;
769
770 /* used as a watchpoint to signal AP startup */
771 cpus = mp_naps;
772
773 /*
774 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
775 * and running the target CPU. OR this INIT IPI might be latched (P5
776 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
777 * ignored.
778 */
779
780 /* do an INIT IPI: assert RESET */
781 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
782 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
783
784 /* wait for pending status end */
785 lapic_ipi_wait(-1);
786
787 /* do an INIT IPI: deassert RESET */
788 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
789 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
790
791 /* wait for pending status end */
792 DELAY(10000); /* wait ~10mS */
793 lapic_ipi_wait(-1);
794
795 /*
796 * next we do a STARTUP IPI: the previous INIT IPI might still be
797 * latched, (P5 bug) this 1st STARTUP would then terminate
798 * immediately, and the previously started INIT IPI would continue. OR
799 * the previous INIT IPI has already run. and this STARTUP IPI will
800 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
801 * will run.
802 */
803
804 /* do a STARTUP IPI */
805 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
806 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
807 vector, apic_id);
808 lapic_ipi_wait(-1);
809 DELAY(200); /* wait ~200uS */
810
811 /*
812 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
813 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
814 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
815 * recognized after hardware RESET or INIT IPI.
816 */
817
818 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
819 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
820 vector, apic_id);
821 lapic_ipi_wait(-1);
822 DELAY(200); /* wait ~200uS */
823
824 /* Wait up to 5 seconds for it to start. */
825 for (ms = 0; ms < 5000; ms++) {
826 if (mp_naps > cpus)
827 return 1; /* return SUCCESS */
828 DELAY(1000);
829 }
830 return 0; /* return FAILURE */
831 }
832
833 /*
834 * Flush the TLB on all other CPU's
835 */
836 static void
837 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
838 {
839 u_int ncpu;
840
841 ncpu = mp_ncpus - 1; /* does not shootdown self */
842 if (ncpu < 1)
843 return; /* no other cpus */
844 if (!(read_rflags() & PSL_I))
845 panic("%s: interrupts disabled", __func__);
846 mtx_lock_spin(&smp_ipi_mtx);
847 smp_tlb_addr1 = addr1;
848 smp_tlb_addr2 = addr2;
849 atomic_store_rel_int(&smp_tlb_wait, 0);
850 ipi_all_but_self(vector);
851 while (smp_tlb_wait < ncpu)
852 ia32_pause();
853 mtx_unlock_spin(&smp_ipi_mtx);
854 }
855
856 static void
857 smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
858 {
859 int ncpu, othercpus;
860
861 othercpus = mp_ncpus - 1;
862 if (mask == (u_int)-1) {
863 ncpu = othercpus;
864 if (ncpu < 1)
865 return;
866 } else {
867 mask &= ~PCPU_GET(cpumask);
868 if (mask == 0)
869 return;
870 ncpu = bitcount32(mask);
871 if (ncpu > othercpus) {
872 /* XXX this should be a panic offence */
873 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
874 ncpu, othercpus);
875 ncpu = othercpus;
876 }
877 /* XXX should be a panic, implied by mask == 0 above */
878 if (ncpu < 1)
879 return;
880 }
881 if (!(read_rflags() & PSL_I))
882 panic("%s: interrupts disabled", __func__);
883 mtx_lock_spin(&smp_ipi_mtx);
884 smp_tlb_addr1 = addr1;
885 smp_tlb_addr2 = addr2;
886 atomic_store_rel_int(&smp_tlb_wait, 0);
887 if (mask == (u_int)-1)
888 ipi_all_but_self(vector);
889 else
890 ipi_selected(mask, vector);
891 while (smp_tlb_wait < ncpu)
892 ia32_pause();
893 mtx_unlock_spin(&smp_ipi_mtx);
894 }
895
896 void
897 smp_cache_flush(void)
898 {
899
900 if (smp_started)
901 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
902 }
903
904 void
905 smp_invltlb(void)
906 {
907
908 if (smp_started) {
909 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
910 }
911 }
912
913 void
914 smp_invlpg(vm_offset_t addr)
915 {
916
917 if (smp_started)
918 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
919 }
920
921 void
922 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
923 {
924
925 if (smp_started) {
926 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
927 }
928 }
929
930 void
931 smp_masked_invltlb(u_int mask)
932 {
933
934 if (smp_started) {
935 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
936 }
937 }
938
939 void
940 smp_masked_invlpg(u_int mask, vm_offset_t addr)
941 {
942
943 if (smp_started) {
944 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
945 }
946 }
947
948 void
949 smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
950 {
951
952 if (smp_started) {
953 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
954 }
955 }
956
957
958 void
959 ipi_bitmap_handler(struct clockframe frame)
960 {
961 int cpu = PCPU_GET(cpuid);
962 u_int ipi_bitmap;
963
964 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
965
966 #ifdef IPI_PREEMPTION
967 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
968 mtx_lock_spin(&sched_lock);
969 /* Don't preempt the idle thread */
970 if (curthread != PCPU_GET(idlethread)) {
971 struct thread *running_thread = curthread;
972 if (running_thread->td_critnest > 1)
973 running_thread->td_owepreempt = 1;
974 else
975 mi_switch(SW_INVOL | SW_PREEMPT, NULL);
976 }
977 mtx_unlock_spin(&sched_lock);
978 }
979 #endif
980
981 /* Nothing to do for AST */
982 }
983
984 /*
985 * send an IPI to a set of cpus.
986 */
987 void
988 ipi_selected(u_int32_t cpus, u_int ipi)
989 {
990 int cpu;
991 u_int bitmap = 0;
992 u_int old_pending;
993 u_int new_pending;
994
995 if (IPI_IS_BITMAPED(ipi)) {
996 bitmap = 1 << ipi;
997 ipi = IPI_BITMAP_VECTOR;
998 }
999
1000 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
1001 while ((cpu = ffs(cpus)) != 0) {
1002 cpu--;
1003 cpus &= ~(1 << cpu);
1004
1005 KASSERT(cpu_apic_ids[cpu] != -1,
1006 ("IPI to non-existent CPU %d", cpu));
1007
1008 if (bitmap) {
1009 do {
1010 old_pending = cpu_ipi_pending[cpu];
1011 new_pending = old_pending | bitmap;
1012 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
1013
1014 if (old_pending)
1015 continue;
1016 }
1017
1018 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1019 }
1020
1021 }
1022
1023 /*
1024 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
1025 */
1026 void
1027 ipi_all(u_int ipi)
1028 {
1029
1030 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1031 lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
1032 }
1033
1034 /*
1035 * send an IPI to all CPUs EXCEPT myself
1036 */
1037 void
1038 ipi_all_but_self(u_int ipi)
1039 {
1040
1041 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1042 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1043 }
1044
1045 /*
1046 * send an IPI to myself
1047 */
1048 void
1049 ipi_self(u_int ipi)
1050 {
1051
1052 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1053 lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
1054 }
1055
1056 #ifdef KDB_STOP_NMI
1057 /*
1058 * send NMI IPI to selected CPUs
1059 */
1060
1061 #define BEFORE_SPIN 1000000
1062
1063 void
1064 ipi_nmi_selected(u_int32_t cpus)
1065 {
1066
1067 int cpu;
1068 register_t icrlo;
1069
1070 icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
1071 | APIC_TRIGMOD_EDGE;
1072
1073 CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
1074
1075
1076 atomic_set_int(&ipi_nmi_pending, cpus);
1077
1078
1079 while ((cpu = ffs(cpus)) != 0) {
1080 cpu--;
1081 cpus &= ~(1 << cpu);
1082
1083 KASSERT(cpu_apic_ids[cpu] != -1,
1084 ("IPI NMI to non-existent CPU %d", cpu));
1085
1086 /* Wait for an earlier IPI to finish. */
1087 if (!lapic_ipi_wait(BEFORE_SPIN))
1088 panic("ipi_nmi_selected: previous IPI has not cleared");
1089
1090 lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]);
1091 }
1092 }
1093
1094
1095 int
1096 ipi_nmi_handler()
1097 {
1098 int cpu = PCPU_GET(cpuid);
1099
1100 if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu)))
1101 return 1;
1102
1103 atomic_clear_int(&ipi_nmi_pending,1 << cpu);
1104
1105 savectx(&stoppcbs[cpu]);
1106
1107 /* Indicate that we are stopped */
1108 atomic_set_int(&stopped_cpus,1 << cpu);
1109
1110
1111 /* Wait for restart */
1112 while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu)))
1113 ia32_pause();
1114
1115 atomic_clear_int(&started_cpus,1 << cpu);
1116 atomic_clear_int(&stopped_cpus,1 << cpu);
1117
1118 if(cpu == 0 && cpustop_restartfunc != NULL)
1119 cpustop_restartfunc();
1120
1121 return 0;
1122 }
1123
1124 #endif /* KDB_STOP_NMI */
1125
1126 /*
1127 * This is called once the rest of the system is up and running and we're
1128 * ready to let the AP's out of the pen.
1129 */
1130 static void
1131 release_aps(void *dummy __unused)
1132 {
1133
1134 if (mp_ncpus == 1)
1135 return;
1136 mtx_lock_spin(&sched_lock);
1137 atomic_store_rel_int(&aps_ready, 1);
1138 while (smp_started == 0)
1139 ia32_pause();
1140 mtx_unlock_spin(&sched_lock);
1141 }
1142 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1143
1144 static int
1145 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1146 {
1147 u_int mask;
1148 int error;
1149
1150 mask = hlt_cpus_mask;
1151 error = sysctl_handle_int(oidp, &mask, 0, req);
1152 if (error || !req->newptr)
1153 return (error);
1154
1155 if (logical_cpus_mask != 0 &&
1156 (mask & logical_cpus_mask) == logical_cpus_mask)
1157 hlt_logical_cpus = 1;
1158 else
1159 hlt_logical_cpus = 0;
1160
1161 if (! hyperthreading_allowed)
1162 mask |= hyperthreading_cpus_mask;
1163
1164 if ((mask & all_cpus) == all_cpus)
1165 mask &= ~(1<<0);
1166 hlt_cpus_mask = mask;
1167 return (error);
1168 }
1169 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1170 0, 0, sysctl_hlt_cpus, "IU",
1171 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
1172
1173 static int
1174 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1175 {
1176 int disable, error;
1177
1178 disable = hlt_logical_cpus;
1179 error = sysctl_handle_int(oidp, &disable, 0, req);
1180 if (error || !req->newptr)
1181 return (error);
1182
1183 if (disable)
1184 hlt_cpus_mask |= logical_cpus_mask;
1185 else
1186 hlt_cpus_mask &= ~logical_cpus_mask;
1187
1188 if (! hyperthreading_allowed)
1189 hlt_cpus_mask |= hyperthreading_cpus_mask;
1190
1191 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1192 hlt_cpus_mask &= ~(1<<0);
1193
1194 hlt_logical_cpus = disable;
1195 return (error);
1196 }
1197
1198 static int
1199 sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
1200 {
1201 int allowed, error;
1202
1203 allowed = hyperthreading_allowed;
1204 error = sysctl_handle_int(oidp, &allowed, 0, req);
1205 if (error || !req->newptr)
1206 return (error);
1207
1208 if (allowed)
1209 hlt_cpus_mask &= ~hyperthreading_cpus_mask;
1210 else
1211 hlt_cpus_mask |= hyperthreading_cpus_mask;
1212
1213 if (logical_cpus_mask != 0 &&
1214 (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
1215 hlt_logical_cpus = 1;
1216 else
1217 hlt_logical_cpus = 0;
1218
1219 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1220 hlt_cpus_mask &= ~(1<<0);
1221
1222 hyperthreading_allowed = allowed;
1223 return (error);
1224 }
1225
1226 static void
1227 cpu_hlt_setup(void *dummy __unused)
1228 {
1229
1230 if (logical_cpus_mask != 0) {
1231 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1232 &hlt_logical_cpus);
1233 sysctl_ctx_init(&logical_cpu_clist);
1234 SYSCTL_ADD_PROC(&logical_cpu_clist,
1235 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1236 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1237 sysctl_hlt_logical_cpus, "IU", "");
1238 SYSCTL_ADD_UINT(&logical_cpu_clist,
1239 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1240 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1241 &logical_cpus_mask, 0, "");
1242
1243 if (hlt_logical_cpus)
1244 hlt_cpus_mask |= logical_cpus_mask;
1245
1246 /*
1247 * If necessary for security purposes, force
1248 * hyperthreading off, regardless of the value
1249 * of hlt_logical_cpus.
1250 */
1251 if (hyperthreading_cpus_mask) {
1252 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
1253 &hyperthreading_allowed);
1254 SYSCTL_ADD_PROC(&logical_cpu_clist,
1255 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1256 "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
1257 0, 0, sysctl_hyperthreading_allowed, "IU", "");
1258 if (! hyperthreading_allowed)
1259 hlt_cpus_mask |= hyperthreading_cpus_mask;
1260 }
1261 }
1262 }
1263 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1264
1265 int
1266 mp_grab_cpu_hlt(void)
1267 {
1268 u_int mask = PCPU_GET(cpumask);
1269 #ifdef MP_WATCHDOG
1270 u_int cpuid = PCPU_GET(cpuid);
1271 #endif
1272 int retval;
1273
1274 #ifdef MP_WATCHDOG
1275 ap_watchdog(cpuid);
1276 #endif
1277
1278 retval = mask & hlt_cpus_mask;
1279 while (mask & hlt_cpus_mask)
1280 __asm __volatile("sti; hlt" : : : "memory");
1281 return (retval);
1282 }
Cache object: adfc777f28588ab6f0b0c06adbec739a
|