1 /*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include "opt_cpu.h"
31 #include "opt_kdb.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_mp_watchdog.h"
34 #include "opt_sched.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #ifdef GPROF
40 #include <sys/gmon.h>
41 #endif
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/memrange.h>
47 #include <sys/mutex.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/smp.h>
51 #include <sys/sysctl.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_extern.h>
58
59 #include <machine/apicreg.h>
60 #include <machine/clock.h>
61 #include <machine/md_var.h>
62 #include <machine/mp_watchdog.h>
63 #include <machine/pcb.h>
64 #include <machine/psl.h>
65 #include <machine/smp.h>
66 #include <machine/specialreg.h>
67 #include <machine/tss.h>
68
69 #define WARMBOOT_TARGET 0
70 #define WARMBOOT_OFF (KERNBASE + 0x0467)
71 #define WARMBOOT_SEG (KERNBASE + 0x0469)
72
73 #define CMOS_REG (0x70)
74 #define CMOS_DATA (0x71)
75 #define BIOS_RESET (0x0f)
76 #define BIOS_WARM (0x0a)
77
78 /* lock region used by kernel profiling */
79 int mcount_lock;
80
81 int mp_naps; /* # of Applications processors */
82 int boot_cpu_id = -1; /* designated BSP */
83 extern int nkpt;
84
85 /*
86 * CPU topology map datastructures for HTT.
87 */
88 static struct cpu_group mp_groups[MAXCPU];
89 static struct cpu_top mp_top;
90
91 /* AP uses this during bootstrap. Do not staticize. */
92 char *bootSTK;
93 static int bootAP;
94
95 /* Free these after use */
96 void *bootstacks[MAXCPU];
97
98 /* Temporary holder for double fault stack */
99 char *doublefault_stack;
100
101 /* Hotwire a 0->4MB V==P mapping */
102 extern pt_entry_t *KPTphys;
103
104 /* SMP page table page */
105 extern pt_entry_t *SMPpt;
106
107 struct pcb stoppcbs[MAXCPU];
108
109 /* Variables needed for SMP tlb shootdown. */
110 vm_offset_t smp_tlb_addr1;
111 vm_offset_t smp_tlb_addr2;
112 volatile int smp_tlb_wait;
113
114 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
115
116 /*
117 * Local data and functions.
118 */
119
120 static u_int logical_cpus;
121
122 /* used to hold the AP's until we are ready to release them */
123 static struct mtx ap_boot_mtx;
124
125 /* Set to 1 once we're ready to let the APs out of the pen. */
126 static volatile int aps_ready = 0;
127
128 /*
129 * Store data from cpu_add() until later in the boot when we actually setup
130 * the APs.
131 */
132 struct cpu_info {
133 int cpu_present:1;
134 int cpu_bsp:1;
135 int cpu_disabled:1;
136 int cpu_hyperthread:1;
137 } static cpu_info[MAX_APIC_ID + 1];
138 static int cpu_apic_ids[MAXCPU];
139
140 /* Holds pending bitmap based IPIs per CPU */
141 static volatile u_int cpu_ipi_pending[MAXCPU];
142
143 static u_int boot_address;
144
145 static void assign_cpu_ids(void);
146 static void set_interrupt_apic_ids(void);
147 static int start_all_aps(void);
148 static int start_ap(int apic_id);
149 static void release_aps(void *dummy);
150
151 static int hlt_logical_cpus;
152 static u_int hyperthreading_cpus;
153 static cpumask_t hyperthreading_cpus_mask;
154 static int hyperthreading_allowed;
155 static struct sysctl_ctx_list logical_cpu_clist;
156 static u_int bootMP_size;
157
158 static void
159 mem_range_AP_init(void)
160 {
161 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
162 mem_range_softc.mr_op->initAP(&mem_range_softc);
163 }
164
165 void
166 mp_topology(void)
167 {
168 struct cpu_group *group;
169 int logical_cpus;
170 int apic_id;
171 int groups;
172 int cpu;
173
174 /* Build the smp_topology map. */
175 /* Nothing to do if there is no HTT support. */
176 if ((cpu_feature & CPUID_HTT) == 0)
177 return;
178 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
179 if (logical_cpus <= 1)
180 return;
181 group = &mp_groups[0];
182 groups = 1;
183 for (cpu = 0, apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
184 if (!cpu_info[apic_id].cpu_present)
185 continue;
186 /*
187 * If the current group has members and we're not a logical
188 * cpu, create a new group.
189 */
190 if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
191 group++;
192 groups++;
193 }
194 group->cg_count++;
195 group->cg_mask |= 1 << cpu;
196 cpu++;
197 }
198
199 mp_top.ct_count = groups;
200 mp_top.ct_group = mp_groups;
201 smp_topology = &mp_top;
202 }
203
204
205 #ifdef KDB_STOP_NMI
206 volatile cpumask_t ipi_nmi_pending;
207 #endif
208
209 /*
210 * Calculate usable address in base memory for AP trampoline code.
211 */
212 u_int
213 mp_bootaddress(u_int basemem)
214 {
215
216 bootMP_size = mptramp_end - mptramp_start;
217 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
218 if (((basemem * 1024) - boot_address) < bootMP_size)
219 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
220 /* 3 levels of page table pages */
221 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
222
223 return mptramp_pagetables;
224 }
225
226 void
227 cpu_add(u_int apic_id, char boot_cpu)
228 {
229
230 if (apic_id > MAX_APIC_ID) {
231 panic("SMP: APIC ID %d too high", apic_id);
232 return;
233 }
234 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
235 apic_id));
236 cpu_info[apic_id].cpu_present = 1;
237 if (boot_cpu) {
238 KASSERT(boot_cpu_id == -1,
239 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
240 boot_cpu_id));
241 boot_cpu_id = apic_id;
242 cpu_info[apic_id].cpu_bsp = 1;
243 }
244 if (mp_ncpus < MAXCPU) {
245 mp_ncpus++;
246 mp_maxid = mp_ncpus -1;
247 }
248 if (bootverbose)
249 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
250 "AP");
251 }
252
253 void
254 cpu_mp_setmaxid(void)
255 {
256
257 /*
258 * mp_maxid should be already set by calls to cpu_add().
259 * Just sanity check its value here.
260 */
261 if (mp_ncpus == 0)
262 KASSERT(mp_maxid == 0,
263 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
264 else if (mp_ncpus == 1)
265 mp_maxid = 0;
266 else
267 KASSERT(mp_maxid >= mp_ncpus - 1,
268 ("%s: counters out of sync: max %d, count %d", __func__,
269 mp_maxid, mp_ncpus));
270 }
271
272 int
273 cpu_mp_probe(void)
274 {
275
276 /*
277 * Always record BSP in CPU map so that the mbuf init code works
278 * correctly.
279 */
280 all_cpus = 1;
281 if (mp_ncpus == 0) {
282 /*
283 * No CPUs were found, so this must be a UP system. Setup
284 * the variables to represent a system with a single CPU
285 * with an id of 0.
286 */
287 mp_ncpus = 1;
288 return (0);
289 }
290
291 /* At least one CPU was found. */
292 if (mp_ncpus == 1) {
293 /*
294 * One CPU was found, so this must be a UP system with
295 * an I/O APIC.
296 */
297 mp_maxid = 0;
298 return (0);
299 }
300
301 /* At least two CPUs were found. */
302 return (1);
303 }
304
305 /*
306 * Initialize the IPI handlers and start up the AP's.
307 */
308 void
309 cpu_mp_start(void)
310 {
311 int i;
312 u_int threads_per_cache, p[4];
313
314 /* Initialize the logical ID to APIC ID table. */
315 for (i = 0; i < MAXCPU; i++) {
316 cpu_apic_ids[i] = -1;
317 cpu_ipi_pending[i] = 0;
318 }
319
320 /* Install an inter-CPU IPI for TLB invalidation */
321 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
322 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
323 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
324
325 /* Install an inter-CPU IPI for cache invalidation. */
326 setidt(IPI_INVLCACHE, IDTVEC(invlcache), SDT_SYSIGT, SEL_KPL, 0);
327
328 /* Install an inter-CPU IPI for all-CPU rendezvous */
329 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
330
331 /* Install generic inter-CPU IPI handler */
332 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
333 SDT_SYSIGT, SEL_KPL, 0);
334
335 /* Install an inter-CPU IPI for CPU stop/restart */
336 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
337
338 /* Set boot_cpu_id if needed. */
339 if (boot_cpu_id == -1) {
340 boot_cpu_id = PCPU_GET(apic_id);
341 cpu_info[boot_cpu_id].cpu_bsp = 1;
342 } else
343 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
344 ("BSP's APIC ID doesn't match boot_cpu_id"));
345
346 /* Setup the initial logical CPUs info. */
347 logical_cpus = logical_cpus_mask = 0;
348 if (cpu_feature & CPUID_HTT)
349 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
350
351 /*
352 * Work out if hyperthreading is *really* enabled. This
353 * is made really ugly by the fact that processors lie: Dual
354 * core processors claim to be hyperthreaded even when they're
355 * not, presumably because they want to be treated the same
356 * way as HTT with respect to per-cpu software licensing.
357 * At the time of writing (May 12, 2005) the only hyperthreaded
358 * cpus are from Intel, and Intel's dual-core processors can be
359 * identified via the "deterministic cache parameters" cpuid
360 * calls.
361 */
362 /*
363 * First determine if this is an Intel processor which claims
364 * to have hyperthreading support.
365 */
366 if ((cpu_feature & CPUID_HTT) &&
367 (strcmp(cpu_vendor, "GenuineIntel") == 0)) {
368 /*
369 * If the "deterministic cache parameters" cpuid calls
370 * are available, use them.
371 */
372 if (cpu_high >= 4) {
373 /* Ask the processor about the L1 cache. */
374 for (i = 0; i < 1; i++) {
375 cpuid_count(4, i, p);
376 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
377 if (hyperthreading_cpus < threads_per_cache)
378 hyperthreading_cpus = threads_per_cache;
379 if ((p[0] & 0x1f) == 0)
380 break;
381 }
382 }
383
384 /*
385 * If the deterministic cache parameters are not
386 * available, or if no caches were reported to exist,
387 * just accept what the HTT flag indicated.
388 */
389 if (hyperthreading_cpus == 0)
390 hyperthreading_cpus = logical_cpus;
391 }
392
393 assign_cpu_ids();
394
395 /* Start each Application Processor */
396 start_all_aps();
397
398
399 set_interrupt_apic_ids();
400 }
401
402
403 /*
404 * Print various information about the SMP system hardware and setup.
405 */
406 void
407 cpu_mp_announce(void)
408 {
409 const char *hyperthread;
410 int i;
411
412 /* List Active CPUs first. */
413 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
414 for (i = 1; i < mp_ncpus; i++) {
415 if (cpu_info[cpu_apic_ids[i]].cpu_hyperthread)
416 hyperthread = "/HT";
417 else
418 hyperthread = "";
419 printf(" cpu%d (AP%s): APIC ID: %2d\n", i, hyperthread,
420 cpu_apic_ids[i]);
421 }
422
423 /* List disabled CPUs last. */
424 for (i = 0; i <= MAX_APIC_ID; i++) {
425 if (!cpu_info[i].cpu_present || !cpu_info[i].cpu_disabled)
426 continue;
427 if (cpu_info[i].cpu_hyperthread)
428 hyperthread = "/HT";
429 else
430 hyperthread = "";
431 printf(" cpu (AP%s): APIC ID: %2d (disabled)\n", hyperthread,
432 i);
433 }
434 }
435
436 /*
437 * AP CPU's call this to initialize themselves.
438 */
439 void
440 init_secondary(void)
441 {
442 struct pcpu *pc;
443 u_int64_t msr, cr0;
444 int cpu, gsel_tss;
445
446 /* Set by the startup code for us to use */
447 cpu = bootAP;
448
449 /* Init tss */
450 common_tss[cpu] = common_tss[0];
451 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */
452 common_tss[cpu].tss_iobase = sizeof(struct amd64tss);
453 common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
454
455 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
456 ssdtosyssd(&gdt_segs[GPROC0_SEL],
457 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
458
459 lgdt(&r_gdt); /* does magic intra-segment return */
460
461 /* Get per-cpu data */
462 pc = &__pcpu[cpu];
463
464 /* prime data page for it to use */
465 pcpu_init(pc, cpu, sizeof(struct pcpu));
466 pc->pc_apic_id = cpu_apic_ids[cpu];
467 pc->pc_prvspace = pc;
468 pc->pc_curthread = 0;
469 pc->pc_tssp = &common_tss[cpu];
470 pc->pc_rsp0 = 0;
471
472 wrmsr(MSR_FSBASE, 0); /* User value */
473 wrmsr(MSR_GSBASE, (u_int64_t)pc);
474 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
475
476 lidt(&r_idt);
477
478 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
479 ltr(gsel_tss);
480
481 /*
482 * Set to a known state:
483 * Set by mpboot.s: CR0_PG, CR0_PE
484 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
485 */
486 cr0 = rcr0();
487 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
488 load_cr0(cr0);
489
490 /* Set up the fast syscall stuff */
491 msr = rdmsr(MSR_EFER) | EFER_SCE;
492 wrmsr(MSR_EFER, msr);
493 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
494 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
495 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
496 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
497 wrmsr(MSR_STAR, msr);
498 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
499
500 /* Disable local APIC just to be sure. */
501 lapic_disable();
502
503 /* signal our startup to the BSP. */
504 mp_naps++;
505
506 /* Spin until the BSP releases the AP's. */
507 while (!aps_ready)
508 ia32_pause();
509
510 /* Initialize the PAT MSR. */
511 pmap_init_pat();
512
513 /* set up CPU registers and state */
514 cpu_setregs();
515
516 /* set up SSE/NX registers */
517 initializecpu();
518
519 /* set up FPU state on the AP */
520 fpuinit();
521
522 /* A quick check from sanity claus */
523 if (PCPU_GET(apic_id) != lapic_id()) {
524 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
525 printf("SMP: actual apic_id = %d\n", lapic_id());
526 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
527 panic("cpuid mismatch! boom!!");
528 }
529
530 /* Initialize curthread. */
531 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
532 PCPU_SET(curthread, PCPU_GET(idlethread));
533
534 mtx_lock_spin(&ap_boot_mtx);
535
536 /* Init local apic for irq's */
537 lapic_setup(1);
538
539 /* Set memory range attributes for this CPU to match the BSP */
540 mem_range_AP_init();
541
542 smp_cpus++;
543
544 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
545 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
546
547 /* Determine if we are a logical CPU. */
548 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
549 logical_cpus_mask |= PCPU_GET(cpumask);
550
551 /* Determine if we are a hyperthread. */
552 if (hyperthreading_cpus > 1 &&
553 PCPU_GET(apic_id) % hyperthreading_cpus != 0)
554 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
555
556 /* Build our map of 'other' CPUs. */
557 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
558
559 if (bootverbose)
560 lapic_dump("AP");
561
562 if (smp_cpus == mp_ncpus) {
563 /* enable IPI's, tlb shootdown, freezes etc */
564 atomic_store_rel_int(&smp_started, 1);
565 smp_active = 1; /* historic */
566 }
567
568 /*
569 * Enable global pages TLB extension
570 * This also implicitly flushes the TLB
571 */
572
573 load_cr4(rcr4() | CR4_PGE);
574
575 mtx_unlock_spin(&ap_boot_mtx);
576
577 /* wait until all the AP's are up */
578 while (smp_started == 0)
579 ia32_pause();
580
581 /* ok, now grab sched_lock and enter the scheduler */
582 mtx_lock_spin(&sched_lock);
583
584 /*
585 * Correct spinlock nesting. The idle thread context that we are
586 * borrowing was created so that it would start out with a single
587 * spin lock (sched_lock) held in fork_trampoline(). Since we've
588 * explicitly acquired locks in this function, the nesting count
589 * is now 2 rather than 1. Since we are nested, calling
590 * spinlock_exit() will simply adjust the counts without allowing
591 * spin lock using code to interrupt us.
592 */
593 spinlock_exit();
594 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
595
596 binuptime(PCPU_PTR(switchtime));
597 PCPU_SET(switchticks, ticks);
598
599 cpu_throw(NULL, choosethread()); /* doesn't return */
600
601 panic("scheduler returned us to %s", __func__);
602 /* NOTREACHED */
603 }
604
605 /*******************************************************************
606 * local functions and data
607 */
608
609 /*
610 * We tell the I/O APIC code about all the CPUs we want to receive
611 * interrupts. If we don't want certain CPUs to receive IRQs we
612 * can simply not tell the I/O APIC code about them in this function.
613 * We also do not tell it about the BSP since it tells itself about
614 * the BSP internally to work with UP kernels and on UP machines.
615 */
616 static void
617 set_interrupt_apic_ids(void)
618 {
619 u_int apic_id;
620
621 for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
622 if (!cpu_info[apic_id].cpu_present)
623 continue;
624 if (cpu_info[apic_id].cpu_bsp)
625 continue;
626 if (cpu_info[apic_id].cpu_disabled)
627 continue;
628
629 /* Don't let hyperthreads service interrupts. */
630 if (hyperthreading_cpus > 1 &&
631 apic_id % hyperthreading_cpus != 0)
632 continue;
633
634 intr_add_cpu(apic_id);
635 }
636 }
637
638 /*
639 * Assign logical CPU IDs to local APICs.
640 */
641 static void
642 assign_cpu_ids(void)
643 {
644 u_int i;
645
646 /* Check for explicitly disabled CPUs. */
647 for (i = 0; i <= MAX_APIC_ID; i++) {
648 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
649 continue;
650
651 if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0)
652 cpu_info[i].cpu_hyperthread = 1;
653
654 /* Don't use this CPU if it has been disabled by a tunable. */
655 if (resource_disabled("lapic", i)) {
656 cpu_info[i].cpu_disabled = 1;
657 continue;
658 }
659 }
660
661 /*
662 * Assign CPU IDs to local APIC IDs and disable any CPUs
663 * beyond MAXCPU. CPU 0 is always assigned to the BSP.
664 *
665 * To minimize confusion for userland, we attempt to number
666 * CPUs such that all the threads and cores in a package are
667 * grouped together. For now we assume that the BSP is always
668 * the first thread in a package and just start adding APs
669 * starting with the BSP's APIC ID.
670 */
671 mp_ncpus = 1;
672 cpu_apic_ids[0] = boot_cpu_id;
673 for (i = boot_cpu_id + 1; i != boot_cpu_id;
674 i == MAX_APIC_ID ? i = 0 : i++) {
675 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
676 cpu_info[i].cpu_disabled)
677 continue;
678
679 if (mp_ncpus < MAXCPU) {
680 cpu_apic_ids[mp_ncpus] = i;
681 mp_ncpus++;
682 } else
683 cpu_info[i].cpu_disabled = 1;
684 }
685 KASSERT(mp_maxid >= mp_ncpus - 1,
686 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
687 mp_ncpus));
688 }
689
690 /*
691 * start each AP in our list
692 */
693 static int
694 start_all_aps(void)
695 {
696 vm_offset_t va = boot_address + KERNBASE;
697 u_int64_t *pt4, *pt3, *pt2;
698 u_int32_t mpbioswarmvec;
699 int apic_id, cpu, i;
700 u_char mpbiosreason;
701
702 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
703
704 /* install the AP 1st level boot code */
705 pmap_kenter(va, boot_address);
706 pmap_invalidate_page(kernel_pmap, va);
707 bcopy(mptramp_start, (void *)va, bootMP_size);
708
709 /* Locate the page tables, they'll be below the trampoline */
710 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
711 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
712 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
713
714 /* Create the initial 1GB replicated page tables */
715 for (i = 0; i < 512; i++) {
716 /* Each slot of the level 4 pages points to the same level 3 page */
717 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
718 pt4[i] |= PG_V | PG_RW | PG_U;
719
720 /* Each slot of the level 3 pages points to the same level 2 page */
721 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
722 pt3[i] |= PG_V | PG_RW | PG_U;
723
724 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
725 pt2[i] = i * (2 * 1024 * 1024);
726 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
727 }
728
729 /* save the current value of the warm-start vector */
730 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
731 outb(CMOS_REG, BIOS_RESET);
732 mpbiosreason = inb(CMOS_DATA);
733
734 /* setup a vector to our boot code */
735 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
736 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
737 outb(CMOS_REG, BIOS_RESET);
738 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
739
740 /* start each AP */
741 for (cpu = 1; cpu < mp_ncpus; cpu++) {
742 apic_id = cpu_apic_ids[cpu];
743
744 /* allocate and set up an idle stack data page */
745 bootstacks[cpu] = (void *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
746 doublefault_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
747
748 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
749 bootAP = cpu;
750
751 /* attempt to start the Application Processor */
752 if (!start_ap(apic_id)) {
753 /* restore the warmstart vector */
754 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
755 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
756 }
757
758 all_cpus |= (1 << cpu); /* record AP in CPU map */
759 }
760
761 /* build our map of 'other' CPUs */
762 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
763
764 /* restore the warmstart vector */
765 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
766
767 outb(CMOS_REG, BIOS_RESET);
768 outb(CMOS_DATA, mpbiosreason);
769
770 /* number of APs actually started */
771 return mp_naps;
772 }
773
774
775 /*
776 * This function starts the AP (application processor) identified
777 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
778 * to accomplish this. This is necessary because of the nuances
779 * of the different hardware we might encounter. It isn't pretty,
780 * but it seems to work.
781 */
782 static int
783 start_ap(int apic_id)
784 {
785 int vector, ms;
786 int cpus;
787
788 /* calculate the vector */
789 vector = (boot_address >> 12) & 0xff;
790
791 /* used as a watchpoint to signal AP startup */
792 cpus = mp_naps;
793
794 /*
795 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
796 * and running the target CPU. OR this INIT IPI might be latched (P5
797 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
798 * ignored.
799 */
800
801 /* do an INIT IPI: assert RESET */
802 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
803 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
804
805 /* wait for pending status end */
806 lapic_ipi_wait(-1);
807
808 /* do an INIT IPI: deassert RESET */
809 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
810 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
811
812 /* wait for pending status end */
813 DELAY(10000); /* wait ~10mS */
814 lapic_ipi_wait(-1);
815
816 /*
817 * next we do a STARTUP IPI: the previous INIT IPI might still be
818 * latched, (P5 bug) this 1st STARTUP would then terminate
819 * immediately, and the previously started INIT IPI would continue. OR
820 * the previous INIT IPI has already run. and this STARTUP IPI will
821 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
822 * will run.
823 */
824
825 /* do a STARTUP IPI */
826 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
827 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
828 vector, apic_id);
829 lapic_ipi_wait(-1);
830 DELAY(200); /* wait ~200uS */
831
832 /*
833 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
834 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
835 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
836 * recognized after hardware RESET or INIT IPI.
837 */
838
839 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
840 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
841 vector, apic_id);
842 lapic_ipi_wait(-1);
843 DELAY(200); /* wait ~200uS */
844
845 /* Wait up to 5 seconds for it to start. */
846 for (ms = 0; ms < 5000; ms++) {
847 if (mp_naps > cpus)
848 return 1; /* return SUCCESS */
849 DELAY(1000);
850 }
851 return 0; /* return FAILURE */
852 }
853
854 /*
855 * Flush the TLB on all other CPU's
856 */
857 static void
858 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
859 {
860 u_int ncpu;
861
862 ncpu = mp_ncpus - 1; /* does not shootdown self */
863 if (ncpu < 1)
864 return; /* no other cpus */
865 if (!(read_rflags() & PSL_I))
866 panic("%s: interrupts disabled", __func__);
867 mtx_lock_spin(&smp_ipi_mtx);
868 smp_tlb_addr1 = addr1;
869 smp_tlb_addr2 = addr2;
870 atomic_store_rel_int(&smp_tlb_wait, 0);
871 ipi_all_but_self(vector);
872 while (smp_tlb_wait < ncpu)
873 ia32_pause();
874 mtx_unlock_spin(&smp_ipi_mtx);
875 }
876
877 static void
878 smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
879 {
880 int ncpu, othercpus;
881
882 othercpus = mp_ncpus - 1;
883 if (mask == (u_int)-1) {
884 ncpu = othercpus;
885 if (ncpu < 1)
886 return;
887 } else {
888 mask &= ~PCPU_GET(cpumask);
889 if (mask == 0)
890 return;
891 ncpu = bitcount32(mask);
892 if (ncpu > othercpus) {
893 /* XXX this should be a panic offence */
894 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
895 ncpu, othercpus);
896 ncpu = othercpus;
897 }
898 /* XXX should be a panic, implied by mask == 0 above */
899 if (ncpu < 1)
900 return;
901 }
902 if (!(read_rflags() & PSL_I))
903 panic("%s: interrupts disabled", __func__);
904 mtx_lock_spin(&smp_ipi_mtx);
905 smp_tlb_addr1 = addr1;
906 smp_tlb_addr2 = addr2;
907 atomic_store_rel_int(&smp_tlb_wait, 0);
908 if (mask == (u_int)-1)
909 ipi_all_but_self(vector);
910 else
911 ipi_selected(mask, vector);
912 while (smp_tlb_wait < ncpu)
913 ia32_pause();
914 mtx_unlock_spin(&smp_ipi_mtx);
915 }
916
917 void
918 smp_cache_flush(void)
919 {
920
921 if (smp_started)
922 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
923 }
924
925 void
926 smp_invltlb(void)
927 {
928
929 if (smp_started) {
930 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
931 }
932 }
933
934 void
935 smp_invlpg(vm_offset_t addr)
936 {
937
938 if (smp_started)
939 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
940 }
941
942 void
943 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
944 {
945
946 if (smp_started) {
947 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
948 }
949 }
950
951 void
952 smp_masked_invltlb(u_int mask)
953 {
954
955 if (smp_started) {
956 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
957 }
958 }
959
960 void
961 smp_masked_invlpg(u_int mask, vm_offset_t addr)
962 {
963
964 if (smp_started) {
965 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
966 }
967 }
968
969 void
970 smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
971 {
972
973 if (smp_started) {
974 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
975 }
976 }
977
978
979 void
980 ipi_bitmap_handler(struct clockframe frame)
981 {
982 int cpu = PCPU_GET(cpuid);
983 u_int ipi_bitmap;
984
985 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
986
987 #ifdef IPI_PREEMPTION
988 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
989 mtx_lock_spin(&sched_lock);
990 /* Don't preempt the idle thread */
991 if (curthread != PCPU_GET(idlethread)) {
992 struct thread *running_thread = curthread;
993 if (running_thread->td_critnest > 1)
994 running_thread->td_owepreempt = 1;
995 else
996 mi_switch(SW_INVOL | SW_PREEMPT, NULL);
997 }
998 mtx_unlock_spin(&sched_lock);
999 }
1000 #endif
1001
1002 /* Nothing to do for AST */
1003 }
1004
1005 /*
1006 * send an IPI to a set of cpus.
1007 */
1008 void
1009 ipi_selected(u_int32_t cpus, u_int ipi)
1010 {
1011 int cpu;
1012 u_int bitmap = 0;
1013 u_int old_pending;
1014 u_int new_pending;
1015
1016 if (IPI_IS_BITMAPED(ipi)) {
1017 bitmap = 1 << ipi;
1018 ipi = IPI_BITMAP_VECTOR;
1019 }
1020
1021 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
1022 while ((cpu = ffs(cpus)) != 0) {
1023 cpu--;
1024 cpus &= ~(1 << cpu);
1025
1026 KASSERT(cpu_apic_ids[cpu] != -1,
1027 ("IPI to non-existent CPU %d", cpu));
1028
1029 if (bitmap) {
1030 do {
1031 old_pending = cpu_ipi_pending[cpu];
1032 new_pending = old_pending | bitmap;
1033 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
1034
1035 if (old_pending)
1036 continue;
1037 }
1038
1039 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1040 }
1041
1042 }
1043
1044 /*
1045 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
1046 */
1047 void
1048 ipi_all(u_int ipi)
1049 {
1050
1051 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1052 lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
1053 }
1054
1055 /*
1056 * send an IPI to all CPUs EXCEPT myself
1057 */
1058 void
1059 ipi_all_but_self(u_int ipi)
1060 {
1061
1062 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1063 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1064 }
1065
1066 /*
1067 * send an IPI to myself
1068 */
1069 void
1070 ipi_self(u_int ipi)
1071 {
1072
1073 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1074 lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
1075 }
1076
1077 #ifdef KDB_STOP_NMI
1078 /*
1079 * send NMI IPI to selected CPUs
1080 */
1081
1082 #define BEFORE_SPIN 1000000
1083
1084 void
1085 ipi_nmi_selected(u_int32_t cpus)
1086 {
1087
1088 int cpu;
1089 register_t icrlo;
1090
1091 icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
1092 | APIC_TRIGMOD_EDGE;
1093
1094 CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
1095
1096
1097 atomic_set_int(&ipi_nmi_pending, cpus);
1098
1099
1100 while ((cpu = ffs(cpus)) != 0) {
1101 cpu--;
1102 cpus &= ~(1 << cpu);
1103
1104 KASSERT(cpu_apic_ids[cpu] != -1,
1105 ("IPI NMI to non-existent CPU %d", cpu));
1106
1107 /* Wait for an earlier IPI to finish. */
1108 if (!lapic_ipi_wait(BEFORE_SPIN))
1109 panic("ipi_nmi_selected: previous IPI has not cleared");
1110
1111 lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]);
1112 }
1113 }
1114
1115
1116 int
1117 ipi_nmi_handler()
1118 {
1119 int cpu = PCPU_GET(cpuid);
1120
1121 if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu)))
1122 return 1;
1123
1124 atomic_clear_int(&ipi_nmi_pending,1 << cpu);
1125
1126 savectx(&stoppcbs[cpu]);
1127
1128 /* Indicate that we are stopped */
1129 atomic_set_int(&stopped_cpus,1 << cpu);
1130
1131
1132 /* Wait for restart */
1133 while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu)))
1134 ia32_pause();
1135
1136 atomic_clear_int(&started_cpus,1 << cpu);
1137 atomic_clear_int(&stopped_cpus,1 << cpu);
1138
1139 if(cpu == 0 && cpustop_restartfunc != NULL)
1140 cpustop_restartfunc();
1141
1142 return 0;
1143 }
1144
1145 #endif /* KDB_STOP_NMI */
1146
1147 /*
1148 * This is called once the rest of the system is up and running and we're
1149 * ready to let the AP's out of the pen.
1150 */
1151 static void
1152 release_aps(void *dummy __unused)
1153 {
1154
1155 if (mp_ncpus == 1)
1156 return;
1157 mtx_lock_spin(&sched_lock);
1158 atomic_store_rel_int(&aps_ready, 1);
1159 while (smp_started == 0)
1160 ia32_pause();
1161 mtx_unlock_spin(&sched_lock);
1162 }
1163 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1164
1165 static int
1166 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1167 {
1168 u_int mask;
1169 int error;
1170
1171 mask = hlt_cpus_mask;
1172 error = sysctl_handle_int(oidp, &mask, 0, req);
1173 if (error || !req->newptr)
1174 return (error);
1175
1176 if (logical_cpus_mask != 0 &&
1177 (mask & logical_cpus_mask) == logical_cpus_mask)
1178 hlt_logical_cpus = 1;
1179 else
1180 hlt_logical_cpus = 0;
1181
1182 if (! hyperthreading_allowed)
1183 mask |= hyperthreading_cpus_mask;
1184
1185 if ((mask & all_cpus) == all_cpus)
1186 mask &= ~(1<<0);
1187 hlt_cpus_mask = mask;
1188 return (error);
1189 }
1190 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1191 0, 0, sysctl_hlt_cpus, "IU",
1192 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
1193
1194 static int
1195 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1196 {
1197 int disable, error;
1198
1199 disable = hlt_logical_cpus;
1200 error = sysctl_handle_int(oidp, &disable, 0, req);
1201 if (error || !req->newptr)
1202 return (error);
1203
1204 if (disable)
1205 hlt_cpus_mask |= logical_cpus_mask;
1206 else
1207 hlt_cpus_mask &= ~logical_cpus_mask;
1208
1209 if (! hyperthreading_allowed)
1210 hlt_cpus_mask |= hyperthreading_cpus_mask;
1211
1212 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1213 hlt_cpus_mask &= ~(1<<0);
1214
1215 hlt_logical_cpus = disable;
1216 return (error);
1217 }
1218
1219 static int
1220 sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
1221 {
1222 int allowed, error;
1223
1224 allowed = hyperthreading_allowed;
1225 error = sysctl_handle_int(oidp, &allowed, 0, req);
1226 if (error || !req->newptr)
1227 return (error);
1228
1229 if (allowed)
1230 hlt_cpus_mask &= ~hyperthreading_cpus_mask;
1231 else
1232 hlt_cpus_mask |= hyperthreading_cpus_mask;
1233
1234 if (logical_cpus_mask != 0 &&
1235 (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
1236 hlt_logical_cpus = 1;
1237 else
1238 hlt_logical_cpus = 0;
1239
1240 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1241 hlt_cpus_mask &= ~(1<<0);
1242
1243 hyperthreading_allowed = allowed;
1244 return (error);
1245 }
1246
1247 static void
1248 cpu_hlt_setup(void *dummy __unused)
1249 {
1250
1251 if (logical_cpus_mask != 0) {
1252 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1253 &hlt_logical_cpus);
1254 sysctl_ctx_init(&logical_cpu_clist);
1255 SYSCTL_ADD_PROC(&logical_cpu_clist,
1256 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1257 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1258 sysctl_hlt_logical_cpus, "IU", "");
1259 SYSCTL_ADD_UINT(&logical_cpu_clist,
1260 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1261 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1262 &logical_cpus_mask, 0, "");
1263
1264 if (hlt_logical_cpus)
1265 hlt_cpus_mask |= logical_cpus_mask;
1266
1267 /*
1268 * If necessary for security purposes, force
1269 * hyperthreading off, regardless of the value
1270 * of hlt_logical_cpus.
1271 */
1272 if (hyperthreading_cpus_mask) {
1273 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
1274 &hyperthreading_allowed);
1275 SYSCTL_ADD_PROC(&logical_cpu_clist,
1276 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1277 "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
1278 0, 0, sysctl_hyperthreading_allowed, "IU", "");
1279 if (! hyperthreading_allowed)
1280 hlt_cpus_mask |= hyperthreading_cpus_mask;
1281 }
1282 }
1283 }
1284 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1285
1286 int
1287 mp_grab_cpu_hlt(void)
1288 {
1289 u_int mask = PCPU_GET(cpumask);
1290 #ifdef MP_WATCHDOG
1291 u_int cpuid = PCPU_GET(cpuid);
1292 #endif
1293 int retval;
1294
1295 #ifdef MP_WATCHDOG
1296 ap_watchdog(cpuid);
1297 #endif
1298
1299 retval = mask & hlt_cpus_mask;
1300 while (mask & hlt_cpus_mask)
1301 __asm __volatile("sti; hlt" : : : "memory");
1302 return (retval);
1303 }
Cache object: cf14514053d1e415bc354a6f5f95d3bd
|