1 /*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: src/sys/amd64/amd64/mp_machdep.c,v 1.242.2.12 2006/04/28 06:53:23 cperciva Exp $");
29
30 #include "opt_cpu.h"
31 #include "opt_kstack_pages.h"
32 #include "opt_mp_watchdog.h"
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #ifdef GPROF
38 #include <sys/gmon.h>
39 #endif
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/memrange.h>
45 #include <sys/mutex.h>
46 #include <sys/pcpu.h>
47 #include <sys/proc.h>
48 #include <sys/smp.h>
49 #include <sys/sysctl.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_param.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_extern.h>
56
57 #include <machine/apicreg.h>
58 #include <machine/clock.h>
59 #include <machine/md_var.h>
60 #include <machine/mp_watchdog.h>
61 #include <machine/pcb.h>
62 #include <machine/psl.h>
63 #include <machine/smp.h>
64 #include <machine/specialreg.h>
65 #include <machine/tss.h>
66
67 #define WARMBOOT_TARGET 0
68 #define WARMBOOT_OFF (KERNBASE + 0x0467)
69 #define WARMBOOT_SEG (KERNBASE + 0x0469)
70
71 #define CMOS_REG (0x70)
72 #define CMOS_DATA (0x71)
73 #define BIOS_RESET (0x0f)
74 #define BIOS_WARM (0x0a)
75
76 /* lock region used by kernel profiling */
77 int mcount_lock;
78
79 int mp_naps; /* # of Applications processors */
80 int boot_cpu_id = -1; /* designated BSP */
81 extern int nkpt;
82
83 /*
84 * CPU topology map datastructures for HTT.
85 */
86 static struct cpu_group mp_groups[MAXCPU];
87 static struct cpu_top mp_top;
88
89 /* AP uses this during bootstrap. Do not staticize. */
90 char *bootSTK;
91 static int bootAP;
92
93 /* Free these after use */
94 void *bootstacks[MAXCPU];
95
96 /* Hotwire a 0->4MB V==P mapping */
97 extern pt_entry_t *KPTphys;
98
99 /* SMP page table page */
100 extern pt_entry_t *SMPpt;
101
102 struct pcb stoppcbs[MAXCPU];
103
104 /* Variables needed for SMP tlb shootdown. */
105 vm_offset_t smp_tlb_addr1;
106 vm_offset_t smp_tlb_addr2;
107 volatile int smp_tlb_wait;
108
109 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
110
111 /*
112 * Local data and functions.
113 */
114
115 static u_int logical_cpus;
116
117 /* used to hold the AP's until we are ready to release them */
118 static struct mtx ap_boot_mtx;
119
120 /* Set to 1 once we're ready to let the APs out of the pen. */
121 static volatile int aps_ready = 0;
122
123 /*
124 * Store data from cpu_add() until later in the boot when we actually setup
125 * the APs.
126 */
127 struct cpu_info {
128 int cpu_present:1;
129 int cpu_bsp:1;
130 int cpu_disabled:1;
131 } static cpu_info[MAXCPU];
132 static int cpu_apic_ids[MAXCPU];
133
134 /* Holds pending bitmap based IPIs per CPU */
135 static volatile u_int cpu_ipi_pending[MAXCPU];
136
137 static u_int boot_address;
138
139 static void set_logical_apic_ids(void);
140 static int start_all_aps(void);
141 static int start_ap(int apic_id);
142 static void release_aps(void *dummy);
143
144 static int hlt_logical_cpus;
145 static u_int hyperthreading_cpus;
146 static cpumask_t hyperthreading_cpus_mask;
147 static int hyperthreading_allowed;
148 static struct sysctl_ctx_list logical_cpu_clist;
149 static u_int bootMP_size;
150
151 static void
152 mem_range_AP_init(void)
153 {
154 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
155 mem_range_softc.mr_op->initAP(&mem_range_softc);
156 }
157
158 void
159 mp_topology(void)
160 {
161 struct cpu_group *group;
162 int logical_cpus;
163 int apic_id;
164 int groups;
165 int cpu;
166
167 /* Build the smp_topology map. */
168 /* Nothing to do if there is no HTT support. */
169 if ((cpu_feature & CPUID_HTT) == 0)
170 return;
171 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
172 if (logical_cpus <= 1)
173 return;
174 group = &mp_groups[0];
175 groups = 1;
176 for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
177 if (!cpu_info[apic_id].cpu_present)
178 continue;
179 /*
180 * If the current group has members and we're not a logical
181 * cpu, create a new group.
182 */
183 if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
184 group++;
185 groups++;
186 }
187 group->cg_count++;
188 group->cg_mask |= 1 << cpu;
189 cpu++;
190 }
191
192 mp_top.ct_count = groups;
193 mp_top.ct_group = mp_groups;
194 smp_topology = &mp_top;
195 }
196
197
198 #ifdef KDB_STOP_NMI
199 volatile cpumask_t ipi_nmi_pending;
200 #endif
201
202 /*
203 * Calculate usable address in base memory for AP trampoline code.
204 */
205 u_int
206 mp_bootaddress(u_int basemem)
207 {
208
209 bootMP_size = mptramp_end - mptramp_start;
210 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
211 if (((basemem * 1024) - boot_address) < bootMP_size)
212 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
213 /* 3 levels of page table pages */
214 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
215
216 return mptramp_pagetables;
217 }
218
219 void
220 cpu_add(u_int apic_id, char boot_cpu)
221 {
222
223 if (apic_id >= MAXCPU) {
224 printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n",
225 apic_id, MAXCPU - 1);
226 return;
227 }
228 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
229 apic_id));
230 cpu_info[apic_id].cpu_present = 1;
231 if (boot_cpu) {
232 KASSERT(boot_cpu_id == -1,
233 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
234 boot_cpu_id));
235 boot_cpu_id = apic_id;
236 cpu_info[apic_id].cpu_bsp = 1;
237 }
238 mp_ncpus++;
239 if (apic_id > mp_maxid)
240 mp_maxid = apic_id;
241 if (bootverbose)
242 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
243 "AP");
244
245 }
246
247 void
248 cpu_mp_setmaxid(void)
249 {
250
251 /*
252 * mp_maxid should be already set by calls to cpu_add().
253 * Just sanity check its value here.
254 */
255 if (mp_ncpus == 0)
256 KASSERT(mp_maxid == 0,
257 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
258 else if (mp_ncpus == 1)
259 mp_maxid = 0;
260 else
261 KASSERT(mp_maxid >= mp_ncpus - 1,
262 ("%s: counters out of sync: max %d, count %d", __func__,
263 mp_maxid, mp_ncpus));
264
265 }
266
267 int
268 cpu_mp_probe(void)
269 {
270
271 /*
272 * Always record BSP in CPU map so that the mbuf init code works
273 * correctly.
274 */
275 all_cpus = 1;
276 if (mp_ncpus == 0) {
277 /*
278 * No CPUs were found, so this must be a UP system. Setup
279 * the variables to represent a system with a single CPU
280 * with an id of 0.
281 */
282 mp_ncpus = 1;
283 return (0);
284 }
285
286 /* At least one CPU was found. */
287 if (mp_ncpus == 1) {
288 /*
289 * One CPU was found, so this must be a UP system with
290 * an I/O APIC.
291 */
292 mp_maxid = 0;
293 return (0);
294 }
295
296 /* At least two CPUs were found. */
297 return (1);
298 }
299
300 /*
301 * Initialize the IPI handlers and start up the AP's.
302 */
303 void
304 cpu_mp_start(void)
305 {
306 int i;
307 u_int threads_per_cache, p[4];
308
309 /* Initialize the logical ID to APIC ID table. */
310 for (i = 0; i < MAXCPU; i++) {
311 cpu_apic_ids[i] = -1;
312 cpu_ipi_pending[i] = 0;
313 }
314
315 /* Install an inter-CPU IPI for TLB invalidation */
316 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
317 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
318 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
319
320 /* Install an inter-CPU IPI for all-CPU rendezvous */
321 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
322
323 /* Install generic inter-CPU IPI handler */
324 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
325 SDT_SYSIGT, SEL_KPL, 0);
326
327 /* Install an inter-CPU IPI for CPU stop/restart */
328 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
329
330 /* Set boot_cpu_id if needed. */
331 if (boot_cpu_id == -1) {
332 boot_cpu_id = PCPU_GET(apic_id);
333 cpu_info[boot_cpu_id].cpu_bsp = 1;
334 } else
335 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
336 ("BSP's APIC ID doesn't match boot_cpu_id"));
337 cpu_apic_ids[0] = boot_cpu_id;
338
339 /* Start each Application Processor */
340 start_all_aps();
341
342 /* Setup the initial logical CPUs info. */
343 logical_cpus = logical_cpus_mask = 0;
344 if (cpu_feature & CPUID_HTT)
345 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
346
347 /*
348 * Work out if hyperthreading is *really* enabled. This
349 * is made really ugly by the fact that processors lie: Dual
350 * core processors claim to be hyperthreaded even when they're
351 * not, presumably because they want to be treated the same
352 * way as HTT with respect to per-cpu software licensing.
353 * At the time of writing (May 12, 2005) the only hyperthreaded
354 * cpus are from Intel, and Intel's dual-core processors can be
355 * identified via the "deterministic cache parameters" cpuid
356 * calls.
357 */
358 /*
359 * First determine if this is an Intel processor which claims
360 * to have hyperthreading support.
361 */
362 if ((cpu_feature & CPUID_HTT) &&
363 (strcmp(cpu_vendor, "GenuineIntel") == 0)) {
364 /*
365 * If the "deterministic cache parameters" cpuid calls
366 * are available, use them.
367 */
368 if (cpu_high >= 4) {
369 /* Ask the processor about the L1 cache. */
370 for (i = 0; i < 1; i++) {
371 cpuid_count(4, i, p);
372 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
373 if (hyperthreading_cpus < threads_per_cache)
374 hyperthreading_cpus = threads_per_cache;
375 if ((p[0] & 0x1f) == 0)
376 break;
377 }
378 }
379
380 /*
381 * If the deterministic cache parameters are not
382 * available, or if no caches were reported to exist,
383 * just accept what the HTT flag indicated.
384 */
385 if (hyperthreading_cpus == 0)
386 hyperthreading_cpus = logical_cpus;
387 }
388
389 set_logical_apic_ids();
390 }
391
392
393 /*
394 * Print various information about the SMP system hardware and setup.
395 */
396 void
397 cpu_mp_announce(void)
398 {
399 int i, x;
400
401 /* List CPUs */
402 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
403 for (i = 1, x = 0; x < MAXCPU; x++) {
404 if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp)
405 continue;
406 if (cpu_info[x].cpu_disabled)
407 printf(" cpu (AP): APIC ID: %2d (disabled)\n", x);
408 else {
409 KASSERT(i < mp_ncpus,
410 ("mp_ncpus and actual cpus are out of whack"));
411 printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
412 }
413 }
414 }
415
416 /*
417 * AP CPU's call this to initialize themselves.
418 */
419 void
420 init_secondary(void)
421 {
422 struct pcpu *pc;
423 u_int64_t msr, cr0;
424 int cpu, gsel_tss;
425
426 /* Set by the startup code for us to use */
427 cpu = bootAP;
428
429 /* Init tss */
430 common_tss[cpu] = common_tss[0];
431 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */
432 common_tss[cpu].tss_iobase = sizeof(struct amd64tss);
433
434 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
435 ssdtosyssd(&gdt_segs[GPROC0_SEL],
436 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
437
438 lgdt(&r_gdt); /* does magic intra-segment return */
439
440 /* Get per-cpu data */
441 pc = &__pcpu[cpu];
442
443 /* prime data page for it to use */
444 pcpu_init(pc, cpu, sizeof(struct pcpu));
445 pc->pc_apic_id = cpu_apic_ids[cpu];
446 pc->pc_prvspace = pc;
447 pc->pc_curthread = 0;
448 pc->pc_tssp = &common_tss[cpu];
449 pc->pc_rsp0 = 0;
450
451 wrmsr(MSR_FSBASE, 0); /* User value */
452 wrmsr(MSR_GSBASE, (u_int64_t)pc);
453 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
454
455 lidt(&r_idt);
456
457 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
458 ltr(gsel_tss);
459
460 /*
461 * Set to a known state:
462 * Set by mpboot.s: CR0_PG, CR0_PE
463 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
464 */
465 cr0 = rcr0();
466 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
467 load_cr0(cr0);
468
469 /* Set up the fast syscall stuff */
470 msr = rdmsr(MSR_EFER) | EFER_SCE;
471 wrmsr(MSR_EFER, msr);
472 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
473 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
474 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
475 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
476 wrmsr(MSR_STAR, msr);
477 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
478
479 /* Disable local apic just to be sure. */
480 lapic_disable();
481
482 /* signal our startup to the BSP. */
483 mp_naps++;
484
485 /* Spin until the BSP releases the AP's. */
486 while (!aps_ready)
487 ia32_pause();
488
489 /* set up CPU registers and state */
490 cpu_setregs();
491
492 /* set up SSE/NX registers */
493 initializecpu();
494
495 /* set up FPU state on the AP */
496 fpuinit();
497
498 /* A quick check from sanity claus */
499 if (PCPU_GET(apic_id) != lapic_id()) {
500 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
501 printf("SMP: actual apic_id = %d\n", lapic_id());
502 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
503 panic("cpuid mismatch! boom!!");
504 }
505
506 mtx_lock_spin(&ap_boot_mtx);
507
508 /* Init local apic for irq's */
509 lapic_setup();
510
511 /* Set memory range attributes for this CPU to match the BSP */
512 mem_range_AP_init();
513
514 smp_cpus++;
515
516 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
517 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
518
519 /* Determine if we are a logical CPU. */
520 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
521 logical_cpus_mask |= PCPU_GET(cpumask);
522
523 /* Determine if we are a hyperthread. */
524 if (hyperthreading_cpus > 1 &&
525 PCPU_GET(apic_id) % hyperthreading_cpus != 0)
526 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
527
528 /* Build our map of 'other' CPUs. */
529 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
530
531 if (bootverbose)
532 lapic_dump("AP");
533
534 if (smp_cpus == mp_ncpus) {
535 /* enable IPI's, tlb shootdown, freezes etc */
536 atomic_store_rel_int(&smp_started, 1);
537 smp_active = 1; /* historic */
538 }
539
540 mtx_unlock_spin(&ap_boot_mtx);
541
542 /* wait until all the AP's are up */
543 while (smp_started == 0)
544 ia32_pause();
545
546 /* ok, now grab sched_lock and enter the scheduler */
547 mtx_lock_spin(&sched_lock);
548
549 binuptime(PCPU_PTR(switchtime));
550 PCPU_SET(switchticks, ticks);
551
552 cpu_throw(NULL, choosethread()); /* doesn't return */
553
554 panic("scheduler returned us to %s", __func__);
555 /* NOTREACHED */
556 }
557
558 /*******************************************************************
559 * local functions and data
560 */
561
562 /*
563 * Set the APIC logical IDs.
564 *
565 * We want to cluster logical CPU's within the same APIC ID cluster.
566 * Since logical CPU's are aligned simply filling in the clusters in
567 * APIC ID order works fine. Note that this does not try to balance
568 * the number of CPU's in each cluster. (XXX?)
569 */
570 static void
571 set_logical_apic_ids(void)
572 {
573 u_int apic_id, cluster, cluster_id;
574
575 /* Force us to allocate cluster 0 at the start. */
576 cluster = -1;
577 cluster_id = APIC_MAX_INTRACLUSTER_ID;
578 for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
579 if (!cpu_info[apic_id].cpu_present)
580 continue;
581 if (cluster_id == APIC_MAX_INTRACLUSTER_ID) {
582 cluster = ioapic_next_logical_cluster();
583 cluster_id = 0;
584 } else
585 cluster_id++;
586 if (bootverbose)
587 printf("APIC ID: physical %u, logical %u:%u\n",
588 apic_id, cluster, cluster_id);
589 lapic_set_logical_id(apic_id, cluster, cluster_id);
590 }
591 }
592
593 /*
594 * start each AP in our list
595 */
596 static int
597 start_all_aps(void)
598 {
599 u_char mpbiosreason;
600 u_int32_t mpbioswarmvec;
601 int apic_id, cpu, i;
602 u_int64_t *pt4, *pt3, *pt2;
603 vm_offset_t va = boot_address + KERNBASE;
604
605 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
606
607 /* install the AP 1st level boot code */
608 pmap_kenter(va, boot_address);
609 pmap_invalidate_page(kernel_pmap, va);
610 bcopy(mptramp_start, (void *)va, bootMP_size);
611
612 /* Locate the page tables, they'll be below the trampoline */
613 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
614 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
615 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
616
617 /* Create the initial 1GB replicated page tables */
618 for (i = 0; i < 512; i++) {
619 /* Each slot of the level 4 pages points to the same level 3 page */
620 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
621 pt4[i] |= PG_V | PG_RW | PG_U;
622
623 /* Each slot of the level 3 pages points to the same level 2 page */
624 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
625 pt3[i] |= PG_V | PG_RW | PG_U;
626
627 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
628 pt2[i] = i * (2 * 1024 * 1024);
629 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
630 }
631
632 /* save the current value of the warm-start vector */
633 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
634 outb(CMOS_REG, BIOS_RESET);
635 mpbiosreason = inb(CMOS_DATA);
636
637 /* setup a vector to our boot code */
638 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
639 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
640 outb(CMOS_REG, BIOS_RESET);
641 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
642
643 /* start each AP */
644 cpu = 0;
645 for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
646
647 /* Ignore non-existent CPUs and the BSP. */
648 if (!cpu_info[apic_id].cpu_present ||
649 cpu_info[apic_id].cpu_bsp)
650 continue;
651
652 /* Don't use this CPU if it has been disabled by a tunable. */
653 if (resource_disabled("lapic", apic_id)) {
654 cpu_info[apic_id].cpu_disabled = 1;
655 mp_ncpus--;
656 continue;
657 }
658
659 cpu++;
660
661 /* save APIC ID for this logical ID */
662 cpu_apic_ids[cpu] = apic_id;
663
664 /* allocate and set up an idle stack data page */
665 bootstacks[cpu] = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
666
667 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
668 bootAP = cpu;
669
670 /* attempt to start the Application Processor */
671 if (!start_ap(apic_id)) {
672 /* restore the warmstart vector */
673 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
674 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
675 }
676
677 all_cpus |= (1 << cpu); /* record AP in CPU map */
678 }
679
680 /* build our map of 'other' CPUs */
681 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
682
683 /* restore the warmstart vector */
684 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
685
686 outb(CMOS_REG, BIOS_RESET);
687 outb(CMOS_DATA, mpbiosreason);
688
689 /* number of APs actually started */
690 return mp_naps;
691 }
692
693
694 /*
695 * This function starts the AP (application processor) identified
696 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
697 * to accomplish this. This is necessary because of the nuances
698 * of the different hardware we might encounter. It isn't pretty,
699 * but it seems to work.
700 */
701 static int
702 start_ap(int apic_id)
703 {
704 int vector, ms;
705 int cpus;
706
707 /* calculate the vector */
708 vector = (boot_address >> 12) & 0xff;
709
710 /* used as a watchpoint to signal AP startup */
711 cpus = mp_naps;
712
713 /*
714 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
715 * and running the target CPU. OR this INIT IPI might be latched (P5
716 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
717 * ignored.
718 */
719
720 /* do an INIT IPI: assert RESET */
721 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
722 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
723
724 /* wait for pending status end */
725 lapic_ipi_wait(-1);
726
727 /* do an INIT IPI: deassert RESET */
728 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
729 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
730
731 /* wait for pending status end */
732 DELAY(10000); /* wait ~10mS */
733 lapic_ipi_wait(-1);
734
735 /*
736 * next we do a STARTUP IPI: the previous INIT IPI might still be
737 * latched, (P5 bug) this 1st STARTUP would then terminate
738 * immediately, and the previously started INIT IPI would continue. OR
739 * the previous INIT IPI has already run. and this STARTUP IPI will
740 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
741 * will run.
742 */
743
744 /* do a STARTUP IPI */
745 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
746 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
747 vector, apic_id);
748 lapic_ipi_wait(-1);
749 DELAY(200); /* wait ~200uS */
750
751 /*
752 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
753 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
754 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
755 * recognized after hardware RESET or INIT IPI.
756 */
757
758 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
759 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
760 vector, apic_id);
761 lapic_ipi_wait(-1);
762 DELAY(200); /* wait ~200uS */
763
764 /* Wait up to 5 seconds for it to start. */
765 for (ms = 0; ms < 50; ms++) {
766 if (mp_naps > cpus)
767 return 1; /* return SUCCESS */
768 DELAY(100000);
769 }
770 return 0; /* return FAILURE */
771 }
772
773 /*
774 * Flush the TLB on all other CPU's
775 */
776 static void
777 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
778 {
779 u_int ncpu;
780
781 ncpu = mp_ncpus - 1; /* does not shootdown self */
782 if (ncpu < 1)
783 return; /* no other cpus */
784 mtx_assert(&smp_ipi_mtx, MA_OWNED);
785 smp_tlb_addr1 = addr1;
786 smp_tlb_addr2 = addr2;
787 atomic_store_rel_int(&smp_tlb_wait, 0);
788 ipi_all_but_self(vector);
789 /*
790 * Enable interrupts here to workaround Opteron Errata 106.
791 * The while loop runs entirely out of instruction cache,
792 * which blocks updates to the cache from other CPUs.
793 * Interrupts break the lock, allowing the write to post.
794 */
795 enable_intr();
796 while (smp_tlb_wait < ncpu)
797 ia32_pause();
798 disable_intr();
799 }
800
801 /*
802 * This is about as magic as it gets. fortune(1) has got similar code
803 * for reversing bits in a word. Who thinks up this stuff??
804 *
805 * Yes, it does appear to be consistently faster than:
806 * while (i = ffs(m)) {
807 * m >>= i;
808 * bits++;
809 * }
810 * and
811 * while (lsb = (m & -m)) { // This is magic too
812 * m &= ~lsb; // or: m ^= lsb
813 * bits++;
814 * }
815 * Both of these latter forms do some very strange things on gcc-3.1 with
816 * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2.
817 * There is probably an SSE or MMX popcnt instruction.
818 *
819 * I wonder if this should be in libkern?
820 *
821 * XXX Stop the presses! Another one:
822 * static __inline u_int32_t
823 * popcnt1(u_int32_t v)
824 * {
825 * v -= ((v >> 1) & 0x55555555);
826 * v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
827 * v = (v + (v >> 4)) & 0x0F0F0F0F;
828 * return (v * 0x01010101) >> 24;
829 * }
830 * The downside is that it has a multiply. With a pentium3 with
831 * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use
832 * an imull, and in that case it is faster. In most other cases
833 * it appears slightly slower.
834 *
835 * Another variant (also from fortune):
836 * #define BITCOUNT(x) (((BX_(x)+(BX_(x)>>4)) & 0x0F0F0F0F) % 255)
837 * #define BX_(x) ((x) - (((x)>>1)&0x77777777) \
838 * - (((x)>>2)&0x33333333) \
839 * - (((x)>>3)&0x11111111))
840 */
841 static __inline u_int32_t
842 popcnt(u_int32_t m)
843 {
844
845 m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1);
846 m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2);
847 m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4);
848 m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8);
849 m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16);
850 return m;
851 }
852
853 static void
854 smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
855 {
856 int ncpu, othercpus;
857
858 othercpus = mp_ncpus - 1;
859 if (mask == (u_int)-1) {
860 ncpu = othercpus;
861 if (ncpu < 1)
862 return;
863 } else {
864 mask &= ~PCPU_GET(cpumask);
865 if (mask == 0)
866 return;
867 ncpu = popcnt(mask);
868 if (ncpu > othercpus) {
869 /* XXX this should be a panic offence */
870 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
871 ncpu, othercpus);
872 ncpu = othercpus;
873 }
874 /* XXX should be a panic, implied by mask == 0 above */
875 if (ncpu < 1)
876 return;
877 }
878 mtx_assert(&smp_ipi_mtx, MA_OWNED);
879 smp_tlb_addr1 = addr1;
880 smp_tlb_addr2 = addr2;
881 atomic_store_rel_int(&smp_tlb_wait, 0);
882 if (mask == (u_int)-1)
883 ipi_all_but_self(vector);
884 else
885 ipi_selected(mask, vector);
886 /*
887 * Enable interrupts here to workaround Opteron Errata 106.
888 * The while loop runs entirely out of instruction cache,
889 * which blocks updates to the cache from other CPUs.
890 * Interrupts break the lock, allowing the write to post.
891 */
892 enable_intr();
893 while (smp_tlb_wait < ncpu)
894 ia32_pause();
895 disable_intr();
896 }
897
898 void
899 smp_invltlb(void)
900 {
901
902 if (smp_started)
903 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
904 }
905
906 void
907 smp_invlpg(vm_offset_t addr)
908 {
909
910 if (smp_started)
911 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
912 }
913
914 void
915 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
916 {
917
918 if (smp_started)
919 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
920 }
921
922 void
923 smp_masked_invltlb(u_int mask)
924 {
925
926 if (smp_started)
927 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
928 }
929
930 void
931 smp_masked_invlpg(u_int mask, vm_offset_t addr)
932 {
933
934 if (smp_started)
935 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
936 }
937
938 void
939 smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
940 {
941
942 if (smp_started)
943 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
944 }
945
946
947 /*
948 * For statclock, we send an IPI to all CPU's to have them call this
949 * function.
950 */
951
952 void
953 forward_statclock(void)
954 {
955 int map;
956
957 CTR0(KTR_SMP, "forward_statclock");
958
959 if (!smp_started || cold || panicstr)
960 return;
961
962 map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
963 if (map != 0)
964 ipi_selected(map, IPI_STATCLOCK);
965 }
966
967 /*
968 * For each hardclock(), we send an IPI to all other CPU's to have them
969 * execute this function. It would be nice to reduce contention on
970 * sched_lock if we could simply peek at the CPU to determine the user/kernel
971 * state and call hardclock_process() on the CPU receiving the clock interrupt
972 * and then just use a simple IPI to handle any ast's if needed.
973 */
974
975 void
976 forward_hardclock(void)
977 {
978 u_int map;
979
980 CTR0(KTR_SMP, "forward_hardclock");
981
982 if (!smp_started || cold || panicstr)
983 return;
984
985 map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
986 if (map != 0)
987 ipi_selected(map, IPI_HARDCLOCK);
988 }
989
990 void
991 ipi_bitmap_handler(struct clockframe frame)
992 {
993 int cpu = PCPU_GET(cpuid);
994 u_int ipi_bitmap;
995 struct thread *td;
996
997 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
998
999 critical_enter();
1000
1001 /* Nothing to do for AST */
1002
1003 if (ipi_bitmap & (1 << IPI_HARDCLOCK)) {
1004 td = curthread;
1005 td->td_intr_nesting_level++;
1006 hardclock_process(&frame);
1007 td->td_intr_nesting_level--;
1008 }
1009
1010 if (ipi_bitmap & (1 << IPI_STATCLOCK)) {
1011 CTR0(KTR_SMP, "forwarded_statclock");
1012
1013 td = curthread;
1014 td->td_intr_nesting_level++;
1015 if (profprocs != 0)
1016 profclock(&frame);
1017 if (pscnt == psdiv)
1018 statclock(&frame);
1019 td->td_intr_nesting_level--;
1020 }
1021
1022 critical_exit();
1023 }
1024
1025 /*
1026 * send an IPI to a set of cpus.
1027 */
1028 void
1029 ipi_selected(u_int32_t cpus, u_int ipi)
1030 {
1031 int cpu;
1032 u_int bitmap = 0;
1033 u_int old_pending;
1034 u_int new_pending;
1035
1036 if (IPI_IS_BITMAPED(ipi)) {
1037 bitmap = 1 << ipi;
1038 ipi = IPI_BITMAP_VECTOR;
1039 }
1040
1041 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
1042 while ((cpu = ffs(cpus)) != 0) {
1043 cpu--;
1044 cpus &= ~(1 << cpu);
1045
1046 KASSERT(cpu_apic_ids[cpu] != -1,
1047 ("IPI to non-existent CPU %d", cpu));
1048
1049 if (bitmap) {
1050 do {
1051 old_pending = cpu_ipi_pending[cpu];
1052 new_pending = old_pending | bitmap;
1053 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
1054
1055 if (old_pending)
1056 continue;
1057 }
1058
1059 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1060 }
1061
1062 }
1063
1064 /*
1065 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
1066 */
1067 void
1068 ipi_all(u_int ipi)
1069 {
1070
1071 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1072 lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
1073 }
1074
1075 /*
1076 * send an IPI to all CPUs EXCEPT myself
1077 */
1078 void
1079 ipi_all_but_self(u_int ipi)
1080 {
1081
1082 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1083 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1084 }
1085
1086 /*
1087 * send an IPI to myself
1088 */
1089 void
1090 ipi_self(u_int ipi)
1091 {
1092
1093 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1094 lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
1095 }
1096
1097 #ifdef KDB_STOP_NMI
1098 /*
1099 * send NMI IPI to selected CPUs
1100 */
1101
1102 #define BEFORE_SPIN 1000000
1103
1104 void
1105 ipi_nmi_selected(u_int32_t cpus)
1106 {
1107
1108 int cpu;
1109 register_t icrlo;
1110
1111 icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
1112 | APIC_TRIGMOD_EDGE;
1113
1114 CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
1115
1116
1117 atomic_set_int(&ipi_nmi_pending, cpus);
1118
1119
1120 while ((cpu = ffs(cpus)) != 0) {
1121 cpu--;
1122 cpus &= ~(1 << cpu);
1123
1124 KASSERT(cpu_apic_ids[cpu] != -1,
1125 ("IPI NMI to non-existent CPU %d", cpu));
1126
1127 /* Wait for an earlier IPI to finish. */
1128 if (!lapic_ipi_wait(BEFORE_SPIN))
1129 panic("ipi_nmi_selected: previous IPI has not cleared");
1130
1131 lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]);
1132 }
1133 }
1134
1135
1136 int
1137 ipi_nmi_handler()
1138 {
1139 int cpu = PCPU_GET(cpuid);
1140
1141 if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu)))
1142 return 1;
1143
1144 atomic_clear_int(&ipi_nmi_pending,1 << cpu);
1145
1146 savectx(&stoppcbs[cpu]);
1147
1148 /* Indicate that we are stopped */
1149 atomic_set_int(&stopped_cpus,1 << cpu);
1150
1151
1152 /* Wait for restart */
1153 while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu)))
1154 ia32_pause();
1155
1156 atomic_clear_int(&started_cpus,1 << cpu);
1157 atomic_clear_int(&stopped_cpus,1 << cpu);
1158
1159 if(cpu == 0 && cpustop_restartfunc != NULL)
1160 cpustop_restartfunc();
1161
1162 return 0;
1163 }
1164
1165 #endif /* KDB_STOP_NMI */
1166
1167 /*
1168 * This is called once the rest of the system is up and running and we're
1169 * ready to let the AP's out of the pen.
1170 */
1171 static void
1172 release_aps(void *dummy __unused)
1173 {
1174
1175 if (mp_ncpus == 1)
1176 return;
1177 mtx_lock_spin(&sched_lock);
1178 atomic_store_rel_int(&aps_ready, 1);
1179 while (smp_started == 0)
1180 ia32_pause();
1181 mtx_unlock_spin(&sched_lock);
1182 }
1183 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1184
1185 static int
1186 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1187 {
1188 u_int mask;
1189 int error;
1190
1191 mask = hlt_cpus_mask;
1192 error = sysctl_handle_int(oidp, &mask, 0, req);
1193 if (error || !req->newptr)
1194 return (error);
1195
1196 if (logical_cpus_mask != 0 &&
1197 (mask & logical_cpus_mask) == logical_cpus_mask)
1198 hlt_logical_cpus = 1;
1199 else
1200 hlt_logical_cpus = 0;
1201
1202 if (! hyperthreading_allowed)
1203 mask |= hyperthreading_cpus_mask;
1204
1205 if ((mask & all_cpus) == all_cpus)
1206 mask &= ~(1<<0);
1207 hlt_cpus_mask = mask;
1208 return (error);
1209 }
1210 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1211 0, 0, sysctl_hlt_cpus, "IU",
1212 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
1213
1214 static int
1215 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1216 {
1217 int disable, error;
1218
1219 disable = hlt_logical_cpus;
1220 error = sysctl_handle_int(oidp, &disable, 0, req);
1221 if (error || !req->newptr)
1222 return (error);
1223
1224 if (disable)
1225 hlt_cpus_mask |= logical_cpus_mask;
1226 else
1227 hlt_cpus_mask &= ~logical_cpus_mask;
1228
1229 if (! hyperthreading_allowed)
1230 hlt_cpus_mask |= hyperthreading_cpus_mask;
1231
1232 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1233 hlt_cpus_mask &= ~(1<<0);
1234
1235 hlt_logical_cpus = disable;
1236 return (error);
1237 }
1238
1239 static int
1240 sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
1241 {
1242 int allowed, error;
1243
1244 allowed = hyperthreading_allowed;
1245 error = sysctl_handle_int(oidp, &allowed, 0, req);
1246 if (error || !req->newptr)
1247 return (error);
1248
1249 if (allowed)
1250 hlt_cpus_mask &= ~hyperthreading_cpus_mask;
1251 else
1252 hlt_cpus_mask |= hyperthreading_cpus_mask;
1253
1254 if (logical_cpus_mask != 0 &&
1255 (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
1256 hlt_logical_cpus = 1;
1257 else
1258 hlt_logical_cpus = 0;
1259
1260 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1261 hlt_cpus_mask &= ~(1<<0);
1262
1263 hyperthreading_allowed = allowed;
1264 return (error);
1265 }
1266
1267 static void
1268 cpu_hlt_setup(void *dummy __unused)
1269 {
1270
1271 if (logical_cpus_mask != 0) {
1272 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1273 &hlt_logical_cpus);
1274 sysctl_ctx_init(&logical_cpu_clist);
1275 SYSCTL_ADD_PROC(&logical_cpu_clist,
1276 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1277 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1278 sysctl_hlt_logical_cpus, "IU", "");
1279 SYSCTL_ADD_UINT(&logical_cpu_clist,
1280 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1281 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1282 &logical_cpus_mask, 0, "");
1283
1284 if (hlt_logical_cpus)
1285 hlt_cpus_mask |= logical_cpus_mask;
1286
1287 /*
1288 * If necessary for security purposes, force
1289 * hyperthreading off, regardless of the value
1290 * of hlt_logical_cpus.
1291 */
1292 if (hyperthreading_cpus_mask) {
1293 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
1294 &hyperthreading_allowed);
1295 SYSCTL_ADD_PROC(&logical_cpu_clist,
1296 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1297 "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
1298 0, 0, sysctl_hyperthreading_allowed, "IU", "");
1299 if (! hyperthreading_allowed)
1300 hlt_cpus_mask |= hyperthreading_cpus_mask;
1301 }
1302 }
1303 }
1304 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1305
1306 int
1307 mp_grab_cpu_hlt(void)
1308 {
1309 u_int mask = PCPU_GET(cpumask);
1310 #ifdef MP_WATCHDOG
1311 u_int cpuid = PCPU_GET(cpuid);
1312 #endif
1313 int retval;
1314
1315 #ifdef MP_WATCHDOG
1316 ap_watchdog(cpuid);
1317 #endif
1318
1319 retval = mask & hlt_cpus_mask;
1320 while (mask & hlt_cpus_mask)
1321 __asm __volatile("sti; hlt" : : : "memory");
1322 return (retval);
1323 }
Cache object: 2f79f66e8a95afd7f37698f3be771a6a
|