1 /*-
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28
29 #include "opt_apic.h"
30 #include "opt_cpu.h"
31 #include "opt_kstack_pages.h"
32 #include "opt_mp_watchdog.h"
33 #include "opt_sched.h"
34 #include "opt_smp.h"
35
36 #if !defined(lint)
37 #if !defined(SMP)
38 #error How did you get here?
39 #endif
40
41 #ifndef DEV_APIC
42 #error The apic device is required for SMP, add "device apic" to your config file.
43 #endif
44 #if defined(CPU_DISABLE_CMPXCHG) && !defined(COMPILING_LINT)
45 #error SMP not supported with CPU_DISABLE_CMPXCHG
46 #endif
47 #endif /* not lint */
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/bus.h>
52 #include <sys/cons.h> /* cngetc() */
53 #ifdef GPROF
54 #include <sys/gmon.h>
55 #endif
56 #include <sys/kernel.h>
57 #include <sys/ktr.h>
58 #include <sys/lock.h>
59 #include <sys/malloc.h>
60 #include <sys/memrange.h>
61 #include <sys/mutex.h>
62 #include <sys/pcpu.h>
63 #include <sys/proc.h>
64 #include <sys/sched.h>
65 #include <sys/smp.h>
66 #include <sys/sysctl.h>
67
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_extern.h>
73
74 #include <machine/apicreg.h>
75 #include <machine/md_var.h>
76 #include <machine/mp_watchdog.h>
77 #include <machine/pcb.h>
78 #include <machine/psl.h>
79 #include <machine/smp.h>
80 #include <machine/specialreg.h>
81
82 #define WARMBOOT_TARGET 0
83 #define WARMBOOT_OFF (KERNBASE + 0x0467)
84 #define WARMBOOT_SEG (KERNBASE + 0x0469)
85
86 #define CMOS_REG (0x70)
87 #define CMOS_DATA (0x71)
88 #define BIOS_RESET (0x0f)
89 #define BIOS_WARM (0x0a)
90
91 /*
92 * this code MUST be enabled here and in mpboot.s.
93 * it follows the very early stages of AP boot by placing values in CMOS ram.
94 * it NORMALLY will never be needed and thus the primitive method for enabling.
95 *
96 #define CHECK_POINTS
97 */
98
99 #if defined(CHECK_POINTS) && !defined(PC98)
100 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
101 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
102
103 #define CHECK_INIT(D); \
104 CHECK_WRITE(0x34, (D)); \
105 CHECK_WRITE(0x35, (D)); \
106 CHECK_WRITE(0x36, (D)); \
107 CHECK_WRITE(0x37, (D)); \
108 CHECK_WRITE(0x38, (D)); \
109 CHECK_WRITE(0x39, (D));
110
111 #define CHECK_PRINT(S); \
112 printf("%s: %d, %d, %d, %d, %d, %d\n", \
113 (S), \
114 CHECK_READ(0x34), \
115 CHECK_READ(0x35), \
116 CHECK_READ(0x36), \
117 CHECK_READ(0x37), \
118 CHECK_READ(0x38), \
119 CHECK_READ(0x39));
120
121 #else /* CHECK_POINTS */
122
123 #define CHECK_INIT(D)
124 #define CHECK_PRINT(S)
125 #define CHECK_WRITE(A, D)
126
127 #endif /* CHECK_POINTS */
128
129 /* lock region used by kernel profiling */
130 int mcount_lock;
131
132 int mp_naps; /* # of Applications processors */
133 int boot_cpu_id = -1; /* designated BSP */
134
135 extern struct pcpu __pcpu[];
136
137 /*
138 * CPU topology map datastructures for HTT.
139 */
140 static struct cpu_group mp_groups[MAXCPU];
141 static struct cpu_top mp_top;
142
143 /* AP uses this during bootstrap. Do not staticize. */
144 char *bootSTK;
145 static int bootAP;
146
147 /* Free these after use */
148 void *bootstacks[MAXCPU];
149
150 /* Hotwire a 0->4MB V==P mapping */
151 extern pt_entry_t *KPTphys;
152
153 struct pcb stoppcbs[MAXCPU];
154
155 /* Variables needed for SMP tlb shootdown. */
156 vm_offset_t smp_tlb_addr1;
157 vm_offset_t smp_tlb_addr2;
158 volatile int smp_tlb_wait;
159
160 #ifdef STOP_NMI
161 volatile cpumask_t ipi_nmi_pending;
162
163 static void ipi_nmi_selected(u_int32_t cpus);
164 #endif
165
166 #ifdef COUNT_IPIS
167 /* Interrupt counts. */
168 static u_long *ipi_preempt_counts[MAXCPU];
169 static u_long *ipi_ast_counts[MAXCPU];
170 u_long *ipi_invltlb_counts[MAXCPU];
171 u_long *ipi_invlrng_counts[MAXCPU];
172 u_long *ipi_invlpg_counts[MAXCPU];
173 u_long *ipi_invlcache_counts[MAXCPU];
174 u_long *ipi_rendezvous_counts[MAXCPU];
175 u_long *ipi_lazypmap_counts[MAXCPU];
176 #endif
177
178 /*
179 * Local data and functions.
180 */
181
182 #ifdef STOP_NMI
183 /*
184 * Provide an alternate method of stopping other CPUs. If another CPU has
185 * disabled interrupts the conventional STOP IPI will be blocked. This
186 * NMI-based stop should get through in that case.
187 */
188 static int stop_cpus_with_nmi = 1;
189 SYSCTL_INT(_debug, OID_AUTO, stop_cpus_with_nmi, CTLTYPE_INT | CTLFLAG_RW,
190 &stop_cpus_with_nmi, 0, "");
191 TUNABLE_INT("debug.stop_cpus_with_nmi", &stop_cpus_with_nmi);
192 #else
193 #define stop_cpus_with_nmi 0
194 #endif
195
196 static u_int logical_cpus;
197
198 /* used to hold the AP's until we are ready to release them */
199 static struct mtx ap_boot_mtx;
200
201 /* Set to 1 once we're ready to let the APs out of the pen. */
202 static volatile int aps_ready = 0;
203
204 /*
205 * Store data from cpu_add() until later in the boot when we actually setup
206 * the APs.
207 */
208 struct cpu_info {
209 int cpu_present:1;
210 int cpu_bsp:1;
211 int cpu_disabled:1;
212 int cpu_hyperthread:1;
213 } static cpu_info[MAX_APIC_ID + 1];
214 int cpu_apic_ids[MAXCPU];
215
216 /* Holds pending bitmap based IPIs per CPU */
217 static volatile u_int cpu_ipi_pending[MAXCPU];
218
219 static u_int boot_address;
220
221 static void assign_cpu_ids(void);
222 static void install_ap_tramp(void);
223 static void set_interrupt_apic_ids(void);
224 static int start_all_aps(void);
225 static int start_ap(int apic_id);
226 static void release_aps(void *dummy);
227
228 static int hlt_logical_cpus;
229 static u_int hyperthreading_cpus;
230 static cpumask_t hyperthreading_cpus_mask;
231 static int hyperthreading_allowed = 1;
232 static struct sysctl_ctx_list logical_cpu_clist;
233
234 static void
235 mem_range_AP_init(void)
236 {
237 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
238 mem_range_softc.mr_op->initAP(&mem_range_softc);
239 }
240
241 void
242 mp_topology(void)
243 {
244 struct cpu_group *group;
245 int apic_id;
246 int groups;
247 int cpu;
248
249 /* Build the smp_topology map. */
250 /* Nothing to do if there is no HTT support. */
251 if (hyperthreading_cpus <= 1)
252 return;
253 group = &mp_groups[0];
254 groups = 1;
255 for (cpu = 0, apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
256 if (!cpu_info[apic_id].cpu_present)
257 continue;
258 /*
259 * If the current group has members and we're not a logical
260 * cpu, create a new group.
261 */
262 if (group->cg_count != 0 &&
263 (apic_id % hyperthreading_cpus) == 0) {
264 group++;
265 groups++;
266 }
267 group->cg_count++;
268 group->cg_mask |= 1 << cpu;
269 cpu++;
270 }
271
272 mp_top.ct_count = groups;
273 mp_top.ct_group = mp_groups;
274 smp_topology = &mp_top;
275 }
276
277
278 /*
279 * Calculate usable address in base memory for AP trampoline code.
280 */
281 u_int
282 mp_bootaddress(u_int basemem)
283 {
284
285 boot_address = trunc_page(basemem); /* round down to 4k boundary */
286 if ((basemem - boot_address) < bootMP_size)
287 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
288
289 return boot_address;
290 }
291
292 void
293 cpu_add(u_int apic_id, char boot_cpu)
294 {
295
296 if (apic_id > MAX_APIC_ID) {
297 panic("SMP: APIC ID %d too high", apic_id);
298 return;
299 }
300 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
301 apic_id));
302 cpu_info[apic_id].cpu_present = 1;
303 if (boot_cpu) {
304 KASSERT(boot_cpu_id == -1,
305 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
306 boot_cpu_id));
307 boot_cpu_id = apic_id;
308 cpu_info[apic_id].cpu_bsp = 1;
309 }
310 if (mp_ncpus < MAXCPU)
311 mp_ncpus++;
312 if (bootverbose)
313 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
314 "AP");
315 }
316
317 void
318 cpu_mp_setmaxid(void)
319 {
320
321 mp_maxid = MAXCPU - 1;
322 }
323
324 int
325 cpu_mp_probe(void)
326 {
327
328 /*
329 * Always record BSP in CPU map so that the mbuf init code works
330 * correctly.
331 */
332 all_cpus = 1;
333 if (mp_ncpus == 0) {
334 /*
335 * No CPUs were found, so this must be a UP system. Setup
336 * the variables to represent a system with a single CPU
337 * with an id of 0.
338 */
339 mp_ncpus = 1;
340 return (0);
341 }
342
343 /* At least one CPU was found. */
344 if (mp_ncpus == 1) {
345 /*
346 * One CPU was found, so this must be a UP system with
347 * an I/O APIC.
348 */
349 return (0);
350 }
351
352 /* At least two CPUs were found. */
353 return (1);
354 }
355
356 /*
357 * Initialize the IPI handlers and start up the AP's.
358 */
359 void
360 cpu_mp_start(void)
361 {
362 int i;
363 u_int threads_per_cache, p[4];
364
365 /* Initialize the logical ID to APIC ID table. */
366 for (i = 0; i < MAXCPU; i++) {
367 cpu_apic_ids[i] = -1;
368 cpu_ipi_pending[i] = 0;
369 }
370
371 /* Install an inter-CPU IPI for TLB invalidation */
372 setidt(IPI_INVLTLB, IDTVEC(invltlb),
373 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
374 setidt(IPI_INVLPG, IDTVEC(invlpg),
375 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
376 setidt(IPI_INVLRNG, IDTVEC(invlrng),
377 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
378
379 /* Install an inter-CPU IPI for cache invalidation. */
380 setidt(IPI_INVLCACHE, IDTVEC(invlcache),
381 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
382
383 /* Install an inter-CPU IPI for lazy pmap release */
384 setidt(IPI_LAZYPMAP, IDTVEC(lazypmap),
385 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
386
387 /* Install an inter-CPU IPI for all-CPU rendezvous */
388 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous),
389 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
390
391 /* Install generic inter-CPU IPI handler */
392 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
393 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
394
395 /* Install an inter-CPU IPI for CPU stop/restart */
396 setidt(IPI_STOP, IDTVEC(cpustop),
397 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
398
399
400 /* Set boot_cpu_id if needed. */
401 if (boot_cpu_id == -1) {
402 boot_cpu_id = PCPU_GET(apic_id);
403 cpu_info[boot_cpu_id].cpu_bsp = 1;
404 } else
405 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
406 ("BSP's APIC ID doesn't match boot_cpu_id"));
407 cpu_apic_ids[0] = boot_cpu_id;
408
409 /* Setup the initial logical CPUs info. */
410 logical_cpus = logical_cpus_mask = 0;
411 if (cpu_feature & CPUID_HTT)
412 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
413
414 /*
415 * Work out if hyperthreading is *really* enabled. This
416 * is made really ugly by the fact that processors lie: Dual
417 * core processors claim to be hyperthreaded even when they're
418 * not, presumably because they want to be treated the same
419 * way as HTT with respect to per-cpu software licensing.
420 * At the time of writing (May 12, 2005) the only hyperthreaded
421 * cpus are from Intel, and Intel's dual-core processors can be
422 * identified via the "deterministic cache parameters" cpuid
423 * calls.
424 */
425 /*
426 * First determine if this is an Intel processor which claims
427 * to have hyperthreading support.
428 */
429 if ((cpu_feature & CPUID_HTT) &&
430 (strcmp(cpu_vendor, "GenuineIntel") == 0)) {
431 /*
432 * If the "deterministic cache parameters" cpuid calls
433 * are available, use them.
434 */
435 if (cpu_high >= 4) {
436 /* Ask the processor about the L1 cache. */
437 for (i = 0; i < 1; i++) {
438 cpuid_count(4, i, p);
439 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
440 if (hyperthreading_cpus < threads_per_cache)
441 hyperthreading_cpus = threads_per_cache;
442 if ((p[0] & 0x1f) == 0)
443 break;
444 }
445 }
446
447 /*
448 * If the deterministic cache parameters are not
449 * available, or if no caches were reported to exist,
450 * just accept what the HTT flag indicated.
451 */
452 if (hyperthreading_cpus == 0)
453 hyperthreading_cpus = logical_cpus;
454 }
455
456 assign_cpu_ids();
457
458 /* Start each Application Processor */
459 start_all_aps();
460
461 set_interrupt_apic_ids();
462
463 /* Last, setup the cpu topology now that we have probed CPUs */
464 mp_topology();
465 }
466
467
468 /*
469 * Print various information about the SMP system hardware and setup.
470 */
471 void
472 cpu_mp_announce(void)
473 {
474 int i, x;
475 const char *hyperthread;
476
477 /* List CPUs */
478 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
479 for (i = 1, x = 0; x <= MAX_APIC_ID; x++) {
480 if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp)
481 continue;
482 if (cpu_info[x].cpu_hyperthread) {
483 hyperthread = "/HT";
484 } else {
485 hyperthread = "";
486 }
487 if (cpu_info[x].cpu_disabled)
488 printf(" cpu (AP%s): APIC ID: %2d (disabled)\n",
489 hyperthread, x);
490 else {
491 KASSERT(i < mp_ncpus,
492 ("mp_ncpus and actual cpus are out of whack"));
493 printf(" cpu%d (AP%s): APIC ID: %2d\n", i++,
494 hyperthread, x);
495 }
496 }
497 }
498
499 /*
500 * AP CPU's call this to initialize themselves.
501 */
502 void
503 init_secondary(void)
504 {
505 struct pcpu *pc;
506 vm_offset_t addr;
507 int gsel_tss;
508 int x, myid;
509 u_int cr0;
510
511 /* bootAP is set in start_ap() to our ID. */
512 myid = bootAP;
513
514 /* Get per-cpu data */
515 pc = &__pcpu[myid];
516
517 /* prime data page for it to use */
518 pcpu_init(pc, myid, sizeof(struct pcpu));
519 pc->pc_apic_id = cpu_apic_ids[myid];
520 pc->pc_prvspace = pc;
521 pc->pc_curthread = 0;
522
523 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
524 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
525
526 for (x = 0; x < NGDT; x++) {
527 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
528 }
529
530 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
531 r_gdt.rd_base = (int) &gdt[myid * NGDT];
532 lgdt(&r_gdt); /* does magic intra-segment return */
533
534 lidt(&r_idt);
535
536 lldt(_default_ldt);
537 PCPU_SET(currentldt, _default_ldt);
538
539 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
540 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
541 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
542 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
543 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
544 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
545 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
546 ltr(gsel_tss);
547
548 PCPU_SET(fsgs_gdt, &gdt[myid * NGDT + GUFS_SEL].sd);
549
550 /*
551 * Set to a known state:
552 * Set by mpboot.s: CR0_PG, CR0_PE
553 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
554 */
555 cr0 = rcr0();
556 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
557 load_cr0(cr0);
558 CHECK_WRITE(0x38, 5);
559
560 /* Disable local APIC just to be sure. */
561 lapic_disable();
562
563 /* signal our startup to the BSP. */
564 mp_naps++;
565 CHECK_WRITE(0x39, 6);
566
567 /* Spin until the BSP releases the AP's. */
568 while (!aps_ready)
569 ia32_pause();
570
571 /* BSP may have changed PTD while we were waiting */
572 invltlb();
573 for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE)
574 invlpg(addr);
575
576 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
577 lidt(&r_idt);
578 #endif
579
580 /* Initialize the PAT MSR if present. */
581 pmap_init_pat();
582
583 /* set up CPU registers and state */
584 cpu_setregs();
585
586 /* set up FPU state on the AP */
587 npxinit();
588
589 /* set up SSE registers */
590 enable_sse();
591
592 #ifdef PAE
593 /* Enable the PTE no-execute bit. */
594 if ((amd_feature & AMDID_NX) != 0) {
595 uint64_t msr;
596
597 msr = rdmsr(MSR_EFER) | EFER_NXE;
598 wrmsr(MSR_EFER, msr);
599 }
600 #endif
601
602 /* A quick check from sanity claus */
603 if (PCPU_GET(apic_id) != lapic_id()) {
604 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
605 printf("SMP: actual apic_id = %d\n", lapic_id());
606 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
607 panic("cpuid mismatch! boom!!");
608 }
609
610 /* Initialize curthread. */
611 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
612 PCPU_SET(curthread, PCPU_GET(idlethread));
613
614 mtx_lock_spin(&ap_boot_mtx);
615
616 /* Init local apic for irq's */
617 lapic_setup(1);
618
619 /* Set memory range attributes for this CPU to match the BSP */
620 mem_range_AP_init();
621
622 smp_cpus++;
623
624 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
625 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
626
627 /* Determine if we are a logical CPU. */
628 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
629 logical_cpus_mask |= PCPU_GET(cpumask);
630
631 /* Determine if we are a hyperthread. */
632 if (hyperthreading_cpus > 1 &&
633 PCPU_GET(apic_id) % hyperthreading_cpus != 0)
634 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
635
636 /* Build our map of 'other' CPUs. */
637 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
638
639 if (bootverbose)
640 lapic_dump("AP");
641
642 if (smp_cpus == mp_ncpus) {
643 /* enable IPI's, tlb shootdown, freezes etc */
644 atomic_store_rel_int(&smp_started, 1);
645 smp_active = 1; /* historic */
646 }
647
648 mtx_unlock_spin(&ap_boot_mtx);
649
650 /* wait until all the AP's are up */
651 while (smp_started == 0)
652 ia32_pause();
653
654 /* enter the scheduler */
655 sched_throw(NULL);
656
657 panic("scheduler returned us to %s", __func__);
658 /* NOTREACHED */
659 }
660
661 /*******************************************************************
662 * local functions and data
663 */
664
665 /*
666 * We tell the I/O APIC code about all the CPUs we want to receive
667 * interrupts. If we don't want certain CPUs to receive IRQs we
668 * can simply not tell the I/O APIC code about them in this function.
669 * We also do not tell it about the BSP since it tells itself about
670 * the BSP internally to work with UP kernels and on UP machines.
671 */
672 static void
673 set_interrupt_apic_ids(void)
674 {
675 u_int i, apic_id;
676
677 for (i = 0; i < MAXCPU; i++) {
678 apic_id = cpu_apic_ids[i];
679 if (apic_id == -1)
680 continue;
681 if (cpu_info[apic_id].cpu_bsp)
682 continue;
683 if (cpu_info[apic_id].cpu_disabled)
684 continue;
685
686 /* Don't let hyperthreads service interrupts. */
687 if (hyperthreading_cpus > 1 &&
688 apic_id % hyperthreading_cpus != 0)
689 continue;
690
691 intr_add_cpu(i);
692 }
693 }
694
695 /*
696 * Assign logical CPU IDs to local APICs.
697 */
698 static void
699 assign_cpu_ids(void)
700 {
701 u_int i;
702
703 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
704 &hyperthreading_allowed);
705
706 /* Check for explicitly disabled CPUs. */
707 for (i = 0; i <= MAX_APIC_ID; i++) {
708 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
709 continue;
710
711 if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
712 cpu_info[i].cpu_hyperthread = 1;
713 #if defined(SCHED_ULE)
714 /*
715 * Don't use HT CPU if it has been disabled by a
716 * tunable.
717 */
718 if (hyperthreading_allowed == 0) {
719 cpu_info[i].cpu_disabled = 1;
720 continue;
721 }
722 #endif
723 }
724
725 /* Don't use this CPU if it has been disabled by a tunable. */
726 if (resource_disabled("lapic", i)) {
727 cpu_info[i].cpu_disabled = 1;
728 continue;
729 }
730 }
731
732 /*
733 * Assign CPU IDs to local APIC IDs and disable any CPUs
734 * beyond MAXCPU. CPU 0 has already been assigned to the BSP,
735 * so we only have to assign IDs for APs.
736 */
737 mp_ncpus = 1;
738 for (i = 0; i <= MAX_APIC_ID; i++) {
739 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
740 cpu_info[i].cpu_disabled)
741 continue;
742
743 if (mp_ncpus < MAXCPU) {
744 cpu_apic_ids[mp_ncpus] = i;
745 mp_ncpus++;
746 } else
747 cpu_info[i].cpu_disabled = 1;
748 }
749 KASSERT(mp_maxid >= mp_ncpus - 1,
750 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
751 mp_ncpus));
752 }
753
754 /*
755 * start each AP in our list
756 */
757 static int
758 start_all_aps(void)
759 {
760 #ifndef PC98
761 u_char mpbiosreason;
762 #endif
763 uintptr_t kptbase;
764 u_int32_t mpbioswarmvec;
765 int apic_id, cpu, i;
766
767 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
768
769 /* install the AP 1st level boot code */
770 install_ap_tramp();
771
772 /* save the current value of the warm-start vector */
773 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
774 #ifndef PC98
775 outb(CMOS_REG, BIOS_RESET);
776 mpbiosreason = inb(CMOS_DATA);
777 #endif
778
779 /* set up temporary P==V mapping for AP boot */
780 /* XXX this is a hack, we should boot the AP on its own stack/PTD */
781 kptbase = (uintptr_t)(void *)KPTphys;
782 for (i = 0; i < NKPT; i++)
783 PTD[i] = (pd_entry_t)(PG_V | PG_RW |
784 ((kptbase + i * PAGE_SIZE) & PG_FRAME));
785 invltlb();
786
787 /* start each AP */
788 for (cpu = 1; cpu < mp_ncpus; cpu++) {
789 apic_id = cpu_apic_ids[cpu];
790
791 /* allocate and set up a boot stack data page */
792 bootstacks[cpu] = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
793
794 /* setup a vector to our boot code */
795 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
796 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
797 #ifndef PC98
798 outb(CMOS_REG, BIOS_RESET);
799 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
800 #endif
801
802 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 4;
803 bootAP = cpu;
804
805 /* attempt to start the Application Processor */
806 CHECK_INIT(99); /* setup checkpoints */
807 if (!start_ap(apic_id)) {
808 printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id);
809 CHECK_PRINT("trace"); /* show checkpoints */
810 /* better panic as the AP may be running loose */
811 printf("panic y/n? [y] ");
812 if (cngetc() != 'n')
813 panic("bye-bye");
814 }
815 CHECK_PRINT("trace"); /* show checkpoints */
816
817 all_cpus |= (1 << cpu); /* record AP in CPU map */
818 }
819
820 /* build our map of 'other' CPUs */
821 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
822
823 /* restore the warmstart vector */
824 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
825
826 #ifndef PC98
827 outb(CMOS_REG, BIOS_RESET);
828 outb(CMOS_DATA, mpbiosreason);
829 #endif
830
831 /* Undo V==P hack from above */
832 for (i = 0; i < NKPT; i++)
833 PTD[i] = 0;
834 pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
835
836 /* number of APs actually started */
837 return mp_naps;
838 }
839
840 /*
841 * load the 1st level AP boot code into base memory.
842 */
843
844 /* targets for relocation */
845 extern void bigJump(void);
846 extern void bootCodeSeg(void);
847 extern void bootDataSeg(void);
848 extern void MPentry(void);
849 extern u_int MP_GDT;
850 extern u_int mp_gdtbase;
851
852 static void
853 install_ap_tramp(void)
854 {
855 int x;
856 int size = *(int *) ((u_long) & bootMP_size);
857 vm_offset_t va = boot_address + KERNBASE;
858 u_char *src = (u_char *) ((u_long) bootMP);
859 u_char *dst = (u_char *) va;
860 u_int boot_base = (u_int) bootMP;
861 u_int8_t *dst8;
862 u_int16_t *dst16;
863 u_int32_t *dst32;
864
865 KASSERT (size <= PAGE_SIZE,
866 ("'size' do not fit into PAGE_SIZE, as expected."));
867 pmap_kenter(va, boot_address);
868 pmap_invalidate_page (kernel_pmap, va);
869 for (x = 0; x < size; ++x)
870 *dst++ = *src++;
871
872 /*
873 * modify addresses in code we just moved to basemem. unfortunately we
874 * need fairly detailed info about mpboot.s for this to work. changes
875 * to mpboot.s might require changes here.
876 */
877
878 /* boot code is located in KERNEL space */
879 dst = (u_char *) va;
880
881 /* modify the lgdt arg */
882 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
883 *dst32 = boot_address + ((u_int) & MP_GDT - boot_base);
884
885 /* modify the ljmp target for MPentry() */
886 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
887 *dst32 = ((u_int) MPentry - KERNBASE);
888
889 /* modify the target for boot code segment */
890 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
891 dst8 = (u_int8_t *) (dst16 + 1);
892 *dst16 = (u_int) boot_address & 0xffff;
893 *dst8 = ((u_int) boot_address >> 16) & 0xff;
894
895 /* modify the target for boot data segment */
896 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
897 dst8 = (u_int8_t *) (dst16 + 1);
898 *dst16 = (u_int) boot_address & 0xffff;
899 *dst8 = ((u_int) boot_address >> 16) & 0xff;
900 }
901
902 /*
903 * This function starts the AP (application processor) identified
904 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
905 * to accomplish this. This is necessary because of the nuances
906 * of the different hardware we might encounter. It isn't pretty,
907 * but it seems to work.
908 */
909 static int
910 start_ap(int apic_id)
911 {
912 int vector, ms;
913 int cpus;
914
915 /* calculate the vector */
916 vector = (boot_address >> 12) & 0xff;
917
918 /* used as a watchpoint to signal AP startup */
919 cpus = mp_naps;
920
921 /*
922 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
923 * and running the target CPU. OR this INIT IPI might be latched (P5
924 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
925 * ignored.
926 */
927
928 /* do an INIT IPI: assert RESET */
929 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
930 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
931
932 /* wait for pending status end */
933 lapic_ipi_wait(-1);
934
935 /* do an INIT IPI: deassert RESET */
936 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
937 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
938
939 /* wait for pending status end */
940 DELAY(10000); /* wait ~10mS */
941 lapic_ipi_wait(-1);
942
943 /*
944 * next we do a STARTUP IPI: the previous INIT IPI might still be
945 * latched, (P5 bug) this 1st STARTUP would then terminate
946 * immediately, and the previously started INIT IPI would continue. OR
947 * the previous INIT IPI has already run. and this STARTUP IPI will
948 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
949 * will run.
950 */
951
952 /* do a STARTUP IPI */
953 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
954 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
955 vector, apic_id);
956 lapic_ipi_wait(-1);
957 DELAY(200); /* wait ~200uS */
958
959 /*
960 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
961 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
962 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
963 * recognized after hardware RESET or INIT IPI.
964 */
965
966 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
967 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
968 vector, apic_id);
969 lapic_ipi_wait(-1);
970 DELAY(200); /* wait ~200uS */
971
972 /* Wait up to 5 seconds for it to start. */
973 for (ms = 0; ms < 5000; ms++) {
974 if (mp_naps > cpus)
975 return 1; /* return SUCCESS */
976 DELAY(1000);
977 }
978 return 0; /* return FAILURE */
979 }
980
981 #ifdef COUNT_XINVLTLB_HITS
982 u_int xhits_gbl[MAXCPU];
983 u_int xhits_pg[MAXCPU];
984 u_int xhits_rng[MAXCPU];
985 SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
986 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
987 sizeof(xhits_gbl), "IU", "");
988 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
989 sizeof(xhits_pg), "IU", "");
990 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
991 sizeof(xhits_rng), "IU", "");
992
993 u_int ipi_global;
994 u_int ipi_page;
995 u_int ipi_range;
996 u_int ipi_range_size;
997 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
998 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
999 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
1000 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
1001 0, "");
1002
1003 u_int ipi_masked_global;
1004 u_int ipi_masked_page;
1005 u_int ipi_masked_range;
1006 u_int ipi_masked_range_size;
1007 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW,
1008 &ipi_masked_global, 0, "");
1009 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW,
1010 &ipi_masked_page, 0, "");
1011 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW,
1012 &ipi_masked_range, 0, "");
1013 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
1014 &ipi_masked_range_size, 0, "");
1015 #endif /* COUNT_XINVLTLB_HITS */
1016
1017 /*
1018 * Flush the TLB on all other CPU's
1019 */
1020 static void
1021 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1022 {
1023 u_int ncpu;
1024
1025 ncpu = mp_ncpus - 1; /* does not shootdown self */
1026 if (ncpu < 1)
1027 return; /* no other cpus */
1028 if (!(read_eflags() & PSL_I))
1029 panic("%s: interrupts disabled", __func__);
1030 mtx_lock_spin(&smp_ipi_mtx);
1031 smp_tlb_addr1 = addr1;
1032 smp_tlb_addr2 = addr2;
1033 atomic_store_rel_int(&smp_tlb_wait, 0);
1034 ipi_all_but_self(vector);
1035 while (smp_tlb_wait < ncpu)
1036 ia32_pause();
1037 mtx_unlock_spin(&smp_ipi_mtx);
1038 }
1039
1040 static void
1041 smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1042 {
1043 int ncpu, othercpus;
1044
1045 othercpus = mp_ncpus - 1;
1046 if (mask == (u_int)-1) {
1047 ncpu = othercpus;
1048 if (ncpu < 1)
1049 return;
1050 } else {
1051 mask &= ~PCPU_GET(cpumask);
1052 if (mask == 0)
1053 return;
1054 ncpu = bitcount32(mask);
1055 if (ncpu > othercpus) {
1056 /* XXX this should be a panic offence */
1057 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
1058 ncpu, othercpus);
1059 ncpu = othercpus;
1060 }
1061 /* XXX should be a panic, implied by mask == 0 above */
1062 if (ncpu < 1)
1063 return;
1064 }
1065 if (!(read_eflags() & PSL_I))
1066 panic("%s: interrupts disabled", __func__);
1067 mtx_lock_spin(&smp_ipi_mtx);
1068 smp_tlb_addr1 = addr1;
1069 smp_tlb_addr2 = addr2;
1070 atomic_store_rel_int(&smp_tlb_wait, 0);
1071 if (mask == (u_int)-1)
1072 ipi_all_but_self(vector);
1073 else
1074 ipi_selected(mask, vector);
1075 while (smp_tlb_wait < ncpu)
1076 ia32_pause();
1077 mtx_unlock_spin(&smp_ipi_mtx);
1078 }
1079
1080 void
1081 smp_cache_flush(void)
1082 {
1083
1084 if (smp_started)
1085 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
1086 }
1087
1088 void
1089 smp_invltlb(void)
1090 {
1091
1092 if (smp_started) {
1093 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
1094 #ifdef COUNT_XINVLTLB_HITS
1095 ipi_global++;
1096 #endif
1097 }
1098 }
1099
1100 void
1101 smp_invlpg(vm_offset_t addr)
1102 {
1103
1104 if (smp_started) {
1105 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
1106 #ifdef COUNT_XINVLTLB_HITS
1107 ipi_page++;
1108 #endif
1109 }
1110 }
1111
1112 void
1113 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
1114 {
1115
1116 if (smp_started) {
1117 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
1118 #ifdef COUNT_XINVLTLB_HITS
1119 ipi_range++;
1120 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
1121 #endif
1122 }
1123 }
1124
1125 void
1126 smp_masked_invltlb(u_int mask)
1127 {
1128
1129 if (smp_started) {
1130 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
1131 #ifdef COUNT_XINVLTLB_HITS
1132 ipi_masked_global++;
1133 #endif
1134 }
1135 }
1136
1137 void
1138 smp_masked_invlpg(u_int mask, vm_offset_t addr)
1139 {
1140
1141 if (smp_started) {
1142 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
1143 #ifdef COUNT_XINVLTLB_HITS
1144 ipi_masked_page++;
1145 #endif
1146 }
1147 }
1148
1149 void
1150 smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
1151 {
1152
1153 if (smp_started) {
1154 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
1155 #ifdef COUNT_XINVLTLB_HITS
1156 ipi_masked_range++;
1157 ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE;
1158 #endif
1159 }
1160 }
1161
1162 void
1163 ipi_bitmap_handler(struct trapframe frame)
1164 {
1165 int cpu = PCPU_GET(cpuid);
1166 u_int ipi_bitmap;
1167
1168 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1169
1170 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
1171 struct thread *running_thread = curthread;
1172 #ifdef COUNT_IPIS
1173 (*ipi_preempt_counts[cpu])++;
1174 #endif
1175 thread_lock(running_thread);
1176 if (running_thread->td_critnest > 1)
1177 running_thread->td_owepreempt = 1;
1178 else
1179 mi_switch(SW_INVOL | SW_PREEMPT, NULL);
1180 thread_unlock(running_thread);
1181 }
1182
1183 if (ipi_bitmap & (1 << IPI_AST)) {
1184 #ifdef COUNT_IPIS
1185 (*ipi_ast_counts[cpu])++;
1186 #endif
1187 /* Nothing to do for AST */
1188 }
1189 }
1190
1191 /*
1192 * send an IPI to a set of cpus.
1193 */
1194 void
1195 ipi_selected(u_int32_t cpus, u_int ipi)
1196 {
1197 int cpu;
1198 u_int bitmap = 0;
1199 u_int old_pending;
1200 u_int new_pending;
1201
1202 if (IPI_IS_BITMAPED(ipi)) {
1203 bitmap = 1 << ipi;
1204 ipi = IPI_BITMAP_VECTOR;
1205 }
1206
1207 #ifdef STOP_NMI
1208 if (ipi == IPI_STOP && stop_cpus_with_nmi) {
1209 ipi_nmi_selected(cpus);
1210 return;
1211 }
1212 #endif
1213 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
1214 while ((cpu = ffs(cpus)) != 0) {
1215 cpu--;
1216 cpus &= ~(1 << cpu);
1217
1218 KASSERT(cpu_apic_ids[cpu] != -1,
1219 ("IPI to non-existent CPU %d", cpu));
1220
1221 if (bitmap) {
1222 do {
1223 old_pending = cpu_ipi_pending[cpu];
1224 new_pending = old_pending | bitmap;
1225 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
1226
1227 if (old_pending)
1228 continue;
1229 }
1230
1231 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1232 }
1233
1234 }
1235
1236 /*
1237 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
1238 */
1239 void
1240 ipi_all(u_int ipi)
1241 {
1242
1243 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) {
1244 ipi_selected(all_cpus, ipi);
1245 return;
1246 }
1247 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1248 lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
1249 }
1250
1251 /*
1252 * send an IPI to all CPUs EXCEPT myself
1253 */
1254 void
1255 ipi_all_but_self(u_int ipi)
1256 {
1257
1258 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) {
1259 ipi_selected(PCPU_GET(other_cpus), ipi);
1260 return;
1261 }
1262 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1263 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1264 }
1265
1266 /*
1267 * send an IPI to myself
1268 */
1269 void
1270 ipi_self(u_int ipi)
1271 {
1272
1273 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) {
1274 ipi_selected(PCPU_GET(cpumask), ipi);
1275 return;
1276 }
1277 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1278 lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
1279 }
1280
1281 #ifdef STOP_NMI
1282 /*
1283 * send NMI IPI to selected CPUs
1284 */
1285
1286 #define BEFORE_SPIN 1000000
1287
1288 void
1289 ipi_nmi_selected(u_int32_t cpus)
1290 {
1291 int cpu;
1292 register_t icrlo;
1293
1294 icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
1295 | APIC_TRIGMOD_EDGE;
1296
1297 CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
1298
1299 atomic_set_int(&ipi_nmi_pending, cpus);
1300
1301 while ((cpu = ffs(cpus)) != 0) {
1302 cpu--;
1303 cpus &= ~(1 << cpu);
1304
1305 KASSERT(cpu_apic_ids[cpu] != -1,
1306 ("IPI NMI to non-existent CPU %d", cpu));
1307
1308 /* Wait for an earlier IPI to finish. */
1309 if (!lapic_ipi_wait(BEFORE_SPIN))
1310 panic("ipi_nmi_selected: previous IPI has not cleared");
1311
1312 lapic_ipi_raw(icrlo, cpu_apic_ids[cpu]);
1313 }
1314 }
1315
1316 int
1317 ipi_nmi_handler(void)
1318 {
1319 int cpumask = PCPU_GET(cpumask);
1320
1321 if (!(ipi_nmi_pending & cpumask))
1322 return 1;
1323
1324 atomic_clear_int(&ipi_nmi_pending, cpumask);
1325 cpustop_handler();
1326 return 0;
1327 }
1328
1329 #endif /* STOP_NMI */
1330
1331 /*
1332 * Handle an IPI_STOP by saving our current context and spinning until we
1333 * are resumed.
1334 */
1335 void
1336 cpustop_handler(void)
1337 {
1338 int cpu = PCPU_GET(cpuid);
1339 int cpumask = PCPU_GET(cpumask);
1340
1341 savectx(&stoppcbs[cpu]);
1342
1343 /* Indicate that we are stopped */
1344 atomic_set_int(&stopped_cpus, cpumask);
1345
1346 /* Wait for restart */
1347 while (!(started_cpus & cpumask))
1348 ia32_pause();
1349
1350 atomic_clear_int(&started_cpus, cpumask);
1351 atomic_clear_int(&stopped_cpus, cpumask);
1352
1353 if (cpu == 0 && cpustop_restartfunc != NULL) {
1354 cpustop_restartfunc();
1355 cpustop_restartfunc = NULL;
1356 }
1357 }
1358
1359 /*
1360 * This is called once the rest of the system is up and running and we're
1361 * ready to let the AP's out of the pen.
1362 */
1363 static void
1364 release_aps(void *dummy __unused)
1365 {
1366
1367 if (mp_ncpus == 1)
1368 return;
1369 atomic_store_rel_int(&aps_ready, 1);
1370 while (smp_started == 0)
1371 ia32_pause();
1372 }
1373 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1374
1375 static int
1376 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1377 {
1378 u_int mask;
1379 int error;
1380
1381 mask = hlt_cpus_mask;
1382 error = sysctl_handle_int(oidp, &mask, 0, req);
1383 if (error || !req->newptr)
1384 return (error);
1385
1386 if (logical_cpus_mask != 0 &&
1387 (mask & logical_cpus_mask) == logical_cpus_mask)
1388 hlt_logical_cpus = 1;
1389 else
1390 hlt_logical_cpus = 0;
1391
1392 if (! hyperthreading_allowed)
1393 mask |= hyperthreading_cpus_mask;
1394
1395 if ((mask & all_cpus) == all_cpus)
1396 mask &= ~(1<<0);
1397 hlt_cpus_mask = mask;
1398 return (error);
1399 }
1400 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1401 0, 0, sysctl_hlt_cpus, "IU",
1402 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
1403
1404 static int
1405 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1406 {
1407 int disable, error;
1408
1409 disable = hlt_logical_cpus;
1410 error = sysctl_handle_int(oidp, &disable, 0, req);
1411 if (error || !req->newptr)
1412 return (error);
1413
1414 if (disable)
1415 hlt_cpus_mask |= logical_cpus_mask;
1416 else
1417 hlt_cpus_mask &= ~logical_cpus_mask;
1418
1419 if (! hyperthreading_allowed)
1420 hlt_cpus_mask |= hyperthreading_cpus_mask;
1421
1422 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1423 hlt_cpus_mask &= ~(1<<0);
1424
1425 hlt_logical_cpus = disable;
1426 return (error);
1427 }
1428
1429 static int
1430 sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
1431 {
1432 int allowed, error;
1433
1434 allowed = hyperthreading_allowed;
1435 error = sysctl_handle_int(oidp, &allowed, 0, req);
1436 if (error || !req->newptr)
1437 return (error);
1438
1439 #ifdef SCHED_ULE
1440 /*
1441 * SCHED_ULE doesn't allow enabling/disabling HT cores at
1442 * run-time.
1443 */
1444 if (allowed != hyperthreading_allowed)
1445 return (ENOTSUP);
1446 return (error);
1447 #endif
1448
1449 if (allowed)
1450 hlt_cpus_mask &= ~hyperthreading_cpus_mask;
1451 else
1452 hlt_cpus_mask |= hyperthreading_cpus_mask;
1453
1454 if (logical_cpus_mask != 0 &&
1455 (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
1456 hlt_logical_cpus = 1;
1457 else
1458 hlt_logical_cpus = 0;
1459
1460 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1461 hlt_cpus_mask &= ~(1<<0);
1462
1463 hyperthreading_allowed = allowed;
1464 return (error);
1465 }
1466
1467 static void
1468 cpu_hlt_setup(void *dummy __unused)
1469 {
1470
1471 if (logical_cpus_mask != 0) {
1472 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1473 &hlt_logical_cpus);
1474 sysctl_ctx_init(&logical_cpu_clist);
1475 SYSCTL_ADD_PROC(&logical_cpu_clist,
1476 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1477 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1478 sysctl_hlt_logical_cpus, "IU", "");
1479 SYSCTL_ADD_UINT(&logical_cpu_clist,
1480 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1481 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1482 &logical_cpus_mask, 0, "");
1483
1484 if (hlt_logical_cpus)
1485 hlt_cpus_mask |= logical_cpus_mask;
1486
1487 /*
1488 * If necessary for security purposes, force
1489 * hyperthreading off, regardless of the value
1490 * of hlt_logical_cpus.
1491 */
1492 if (hyperthreading_cpus_mask) {
1493 SYSCTL_ADD_PROC(&logical_cpu_clist,
1494 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1495 "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
1496 0, 0, sysctl_hyperthreading_allowed, "IU", "");
1497 if (! hyperthreading_allowed)
1498 hlt_cpus_mask |= hyperthreading_cpus_mask;
1499 }
1500 }
1501 }
1502 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1503
1504 int
1505 mp_grab_cpu_hlt(void)
1506 {
1507 u_int mask = PCPU_GET(cpumask);
1508 #ifdef MP_WATCHDOG
1509 u_int cpuid = PCPU_GET(cpuid);
1510 #endif
1511 int retval;
1512
1513 #ifdef MP_WATCHDOG
1514 ap_watchdog(cpuid);
1515 #endif
1516
1517 retval = mask & hlt_cpus_mask;
1518 while (mask & hlt_cpus_mask)
1519 __asm __volatile("sti; hlt" : : : "memory");
1520 return (retval);
1521 }
1522
1523 #ifdef COUNT_IPIS
1524 /*
1525 * Setup interrupt counters for IPI handlers.
1526 */
1527 static void
1528 mp_ipi_intrcnt(void *dummy)
1529 {
1530 char buf[64];
1531 int i;
1532
1533 for (i = 0; i < mp_maxid; i++) {
1534 if (CPU_ABSENT(i))
1535 continue;
1536 snprintf(buf, sizeof(buf), "cpu%d: invltlb", i);
1537 intrcnt_add(buf, &ipi_invltlb_counts[i]);
1538 snprintf(buf, sizeof(buf), "cpu%d: invlrng", i);
1539 intrcnt_add(buf, &ipi_invlrng_counts[i]);
1540 snprintf(buf, sizeof(buf), "cpu%d: invlpg", i);
1541 intrcnt_add(buf, &ipi_invlpg_counts[i]);
1542 snprintf(buf, sizeof(buf), "cpu%d: preempt", i);
1543 intrcnt_add(buf, &ipi_preempt_counts[i]);
1544 snprintf(buf, sizeof(buf), "cpu%d: ast", i);
1545 intrcnt_add(buf, &ipi_ast_counts[i]);
1546 snprintf(buf, sizeof(buf), "cpu%d: rendezvous", i);
1547 intrcnt_add(buf, &ipi_rendezvous_counts[i]);
1548 snprintf(buf, sizeof(buf), "cpu%d: lazypmap", i);
1549 intrcnt_add(buf, &ipi_lazypmap_counts[i]);
1550 }
1551 }
1552 SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL);
1553 #endif
Cache object: 9a830c9fc7317f85060646a2c6395637
|