1 /*-
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD: releng/6.4/sys/i386/i386/mp_machdep.c 181131 2008-08-01 20:31:07Z jhb $");
28
29 #include "opt_apic.h"
30 #include "opt_cpu.h"
31 #include "opt_kdb.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_mp_watchdog.h"
34 #include "opt_sched.h"
35
36 #if !defined(lint)
37 #if !defined(SMP)
38 #error How did you get here?
39 #endif
40
41 #ifndef DEV_APIC
42 #error The apic device is required for SMP, add "device apic" to your config file.
43 #endif
44 #if defined(CPU_DISABLE_CMPXCHG) && !defined(COMPILING_LINT)
45 #error SMP not supported with CPU_DISABLE_CMPXCHG
46 #endif
47 #endif /* not lint */
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/bus.h>
52 #include <sys/cons.h> /* cngetc() */
53 #ifdef GPROF
54 #include <sys/gmon.h>
55 #endif
56 #include <sys/kernel.h>
57 #include <sys/ktr.h>
58 #include <sys/lock.h>
59 #include <sys/malloc.h>
60 #include <sys/memrange.h>
61 #include <sys/mutex.h>
62 #include <sys/pcpu.h>
63 #include <sys/proc.h>
64 #include <sys/smp.h>
65 #include <sys/sysctl.h>
66
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_extern.h>
72
73 #include <machine/apicreg.h>
74 #include <machine/clock.h>
75 #include <machine/md_var.h>
76 #include <machine/mp_watchdog.h>
77 #include <machine/pcb.h>
78 #include <machine/psl.h>
79 #include <machine/smp.h>
80 #include <machine/smptests.h> /** COUNT_XINVLTLB_HITS */
81 #include <machine/specialreg.h>
82
83 #define WARMBOOT_TARGET 0
84 #define WARMBOOT_OFF (KERNBASE + 0x0467)
85 #define WARMBOOT_SEG (KERNBASE + 0x0469)
86
87 #define CMOS_REG (0x70)
88 #define CMOS_DATA (0x71)
89 #define BIOS_RESET (0x0f)
90 #define BIOS_WARM (0x0a)
91
92 /*
93 * this code MUST be enabled here and in mpboot.s.
94 * it follows the very early stages of AP boot by placing values in CMOS ram.
95 * it NORMALLY will never be needed and thus the primitive method for enabling.
96 *
97 #define CHECK_POINTS
98 */
99
100 #if defined(CHECK_POINTS) && !defined(PC98)
101 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
102 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
103
104 #define CHECK_INIT(D); \
105 CHECK_WRITE(0x34, (D)); \
106 CHECK_WRITE(0x35, (D)); \
107 CHECK_WRITE(0x36, (D)); \
108 CHECK_WRITE(0x37, (D)); \
109 CHECK_WRITE(0x38, (D)); \
110 CHECK_WRITE(0x39, (D));
111
112 #define CHECK_PRINT(S); \
113 printf("%s: %d, %d, %d, %d, %d, %d\n", \
114 (S), \
115 CHECK_READ(0x34), \
116 CHECK_READ(0x35), \
117 CHECK_READ(0x36), \
118 CHECK_READ(0x37), \
119 CHECK_READ(0x38), \
120 CHECK_READ(0x39));
121
122 #else /* CHECK_POINTS */
123
124 #define CHECK_INIT(D)
125 #define CHECK_PRINT(S)
126 #define CHECK_WRITE(A, D)
127
128 #endif /* CHECK_POINTS */
129
130 /*
131 * Values to send to the POST hardware.
132 */
133 #define MP_BOOTADDRESS_POST 0x10
134 #define MP_PROBE_POST 0x11
135 #define MPTABLE_PASS1_POST 0x12
136
137 #define MP_START_POST 0x13
138 #define MP_ENABLE_POST 0x14
139 #define MPTABLE_PASS2_POST 0x15
140
141 #define START_ALL_APS_POST 0x16
142 #define INSTALL_AP_TRAMP_POST 0x17
143 #define START_AP_POST 0x18
144
145 #define MP_ANNOUNCE_POST 0x19
146
147 /* lock region used by kernel profiling */
148 int mcount_lock;
149
150 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
151 int current_postcode;
152
153 int mp_naps; /* # of Applications processors */
154 int boot_cpu_id = -1; /* designated BSP */
155 extern int nkpt;
156
157 extern struct pcpu __pcpu[];
158
159 /*
160 * CPU topology map datastructures for HTT.
161 */
162 static struct cpu_group mp_groups[MAXCPU];
163 static struct cpu_top mp_top;
164
165 /* AP uses this during bootstrap. Do not staticize. */
166 char *bootSTK;
167 static int bootAP;
168
169 /* Free these after use */
170 void *bootstacks[MAXCPU];
171
172 /* Hotwire a 0->4MB V==P mapping */
173 extern pt_entry_t *KPTphys;
174
175 struct pcb stoppcbs[MAXCPU];
176
177 /* Variables needed for SMP tlb shootdown. */
178 vm_offset_t smp_tlb_addr1;
179 vm_offset_t smp_tlb_addr2;
180 volatile int smp_tlb_wait;
181
182 #ifdef KDB_STOP_NMI
183 volatile cpumask_t ipi_nmi_pending;
184 #endif
185
186 #ifdef COUNT_IPIS
187 /* Interrupt counts. */
188 #ifdef IPI_PREEMPTION
189 static u_long *ipi_preempt_counts[MAXCPU];
190 #endif
191 static u_long *ipi_ast_counts[MAXCPU];
192 u_long *ipi_invltlb_counts[MAXCPU];
193 u_long *ipi_invlrng_counts[MAXCPU];
194 u_long *ipi_invlpg_counts[MAXCPU];
195 u_long *ipi_invlcache_counts[MAXCPU];
196 u_long *ipi_rendezvous_counts[MAXCPU];
197 u_long *ipi_lazypmap_counts[MAXCPU];
198 #endif
199
200 /*
201 * Local data and functions.
202 */
203
204 static u_int logical_cpus;
205
206 /* used to hold the AP's until we are ready to release them */
207 static struct mtx ap_boot_mtx;
208
209 /* Set to 1 once we're ready to let the APs out of the pen. */
210 static volatile int aps_ready = 0;
211
212 /*
213 * Store data from cpu_add() until later in the boot when we actually setup
214 * the APs.
215 */
216 struct cpu_info {
217 int cpu_present:1;
218 int cpu_bsp:1;
219 int cpu_disabled:1;
220 } static cpu_info[MAX_APIC_ID + 1];
221 static int cpu_apic_ids[MAXCPU];
222
223 /* Holds pending bitmap based IPIs per CPU */
224 static volatile u_int cpu_ipi_pending[MAXCPU];
225
226 static u_int boot_address;
227
228 static void assign_cpu_ids(void);
229 static void install_ap_tramp(void);
230 static void set_interrupt_apic_ids(void);
231 static int start_all_aps(void);
232 static int start_ap(int apic_id);
233 static void release_aps(void *dummy);
234
235 static int hlt_logical_cpus;
236 static u_int hyperthreading_cpus;
237 static cpumask_t hyperthreading_cpus_mask;
238 static int hyperthreading_allowed;
239 static struct sysctl_ctx_list logical_cpu_clist;
240
241 static void
242 mem_range_AP_init(void)
243 {
244 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
245 mem_range_softc.mr_op->initAP(&mem_range_softc);
246 }
247
248 void
249 mp_topology(void)
250 {
251 struct cpu_group *group;
252 int logical_cpus;
253 int apic_id;
254 int groups;
255 int cpu;
256
257 /* Build the smp_topology map. */
258 /* Nothing to do if there is no HTT support. */
259 if ((cpu_feature & CPUID_HTT) == 0)
260 return;
261 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
262 if (logical_cpus <= 1)
263 return;
264 group = &mp_groups[0];
265 groups = 1;
266 for (cpu = 0, apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
267 if (!cpu_info[apic_id].cpu_present)
268 continue;
269 /*
270 * If the current group has members and we're not a logical
271 * cpu, create a new group.
272 */
273 if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
274 group++;
275 groups++;
276 }
277 group->cg_count++;
278 group->cg_mask |= 1 << cpu;
279 cpu++;
280 }
281
282 mp_top.ct_count = groups;
283 mp_top.ct_group = mp_groups;
284 smp_topology = &mp_top;
285 }
286
287
288 /*
289 * Calculate usable address in base memory for AP trampoline code.
290 */
291 u_int
292 mp_bootaddress(u_int basemem)
293 {
294 POSTCODE(MP_BOOTADDRESS_POST);
295
296 boot_address = trunc_page(basemem); /* round down to 4k boundary */
297 if ((basemem - boot_address) < bootMP_size)
298 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
299
300 return boot_address;
301 }
302
303 void
304 cpu_add(u_int apic_id, char boot_cpu)
305 {
306
307 if (apic_id > MAX_APIC_ID) {
308 panic("SMP: APIC ID %d too high", apic_id);
309 return;
310 }
311 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
312 apic_id));
313 cpu_info[apic_id].cpu_present = 1;
314 if (boot_cpu) {
315 KASSERT(boot_cpu_id == -1,
316 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
317 boot_cpu_id));
318 boot_cpu_id = apic_id;
319 cpu_info[apic_id].cpu_bsp = 1;
320 }
321 if (mp_ncpus < MAXCPU)
322 mp_ncpus++;
323 if (bootverbose)
324 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
325 "AP");
326 }
327
328 void
329 cpu_mp_setmaxid(void)
330 {
331
332 mp_maxid = MAXCPU - 1;
333 }
334
335 int
336 cpu_mp_probe(void)
337 {
338
339 /*
340 * Always record BSP in CPU map so that the mbuf init code works
341 * correctly.
342 */
343 all_cpus = 1;
344 if (mp_ncpus == 0) {
345 /*
346 * No CPUs were found, so this must be a UP system. Setup
347 * the variables to represent a system with a single CPU
348 * with an id of 0.
349 */
350 mp_ncpus = 1;
351 return (0);
352 }
353
354 /* At least one CPU was found. */
355 if (mp_ncpus == 1) {
356 /*
357 * One CPU was found, so this must be a UP system with
358 * an I/O APIC.
359 */
360 return (0);
361 }
362
363 /* At least two CPUs were found. */
364 return (1);
365 }
366
367 /*
368 * Initialize the IPI handlers and start up the AP's.
369 */
370 void
371 cpu_mp_start(void)
372 {
373 int i;
374 u_int threads_per_cache, p[4];
375
376 POSTCODE(MP_START_POST);
377
378 /* Initialize the logical ID to APIC ID table. */
379 for (i = 0; i < MAXCPU; i++) {
380 cpu_apic_ids[i] = -1;
381 cpu_ipi_pending[i] = 0;
382 }
383
384 /* Install an inter-CPU IPI for TLB invalidation */
385 setidt(IPI_INVLTLB, IDTVEC(invltlb),
386 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
387 setidt(IPI_INVLPG, IDTVEC(invlpg),
388 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
389 setidt(IPI_INVLRNG, IDTVEC(invlrng),
390 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
391
392 /* Install an inter-CPU IPI for cache invalidation. */
393 setidt(IPI_INVLCACHE, IDTVEC(invlcache),
394 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
395
396 /* Install an inter-CPU IPI for lazy pmap release */
397 setidt(IPI_LAZYPMAP, IDTVEC(lazypmap),
398 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
399
400 /* Install an inter-CPU IPI for all-CPU rendezvous */
401 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous),
402 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
403
404 /* Install generic inter-CPU IPI handler */
405 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
406 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
407
408 /* Install an inter-CPU IPI for CPU stop/restart */
409 setidt(IPI_STOP, IDTVEC(cpustop),
410 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
411
412
413 /* Set boot_cpu_id if needed. */
414 if (boot_cpu_id == -1) {
415 boot_cpu_id = PCPU_GET(apic_id);
416 cpu_info[boot_cpu_id].cpu_bsp = 1;
417 } else
418 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
419 ("BSP's APIC ID doesn't match boot_cpu_id"));
420 cpu_apic_ids[0] = boot_cpu_id;
421
422 assign_cpu_ids();
423
424 /* Start each Application Processor */
425 start_all_aps();
426
427 /* Setup the initial logical CPUs info. */
428 logical_cpus = logical_cpus_mask = 0;
429 if (cpu_feature & CPUID_HTT)
430 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
431
432 /*
433 * Work out if hyperthreading is *really* enabled. This
434 * is made really ugly by the fact that processors lie: Dual
435 * core processors claim to be hyperthreaded even when they're
436 * not, presumably because they want to be treated the same
437 * way as HTT with respect to per-cpu software licensing.
438 * At the time of writing (May 12, 2005) the only hyperthreaded
439 * cpus are from Intel, and Intel's dual-core processors can be
440 * identified via the "deterministic cache parameters" cpuid
441 * calls.
442 */
443 /*
444 * First determine if this is an Intel processor which claims
445 * to have hyperthreading support.
446 */
447 if ((cpu_feature & CPUID_HTT) &&
448 (strcmp(cpu_vendor, "GenuineIntel") == 0)) {
449 /*
450 * If the "deterministic cache parameters" cpuid calls
451 * are available, use them.
452 */
453 if (cpu_high >= 4) {
454 /* Ask the processor about the L1 cache. */
455 for (i = 0; i < 1; i++) {
456 cpuid_count(4, i, p);
457 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
458 if (hyperthreading_cpus < threads_per_cache)
459 hyperthreading_cpus = threads_per_cache;
460 if ((p[0] & 0x1f) == 0)
461 break;
462 }
463 }
464
465 /*
466 * If the deterministic cache parameters are not
467 * available, or if no caches were reported to exist,
468 * just accept what the HTT flag indicated.
469 */
470 if (hyperthreading_cpus == 0)
471 hyperthreading_cpus = logical_cpus;
472 }
473
474 set_interrupt_apic_ids();
475 }
476
477
478 /*
479 * Print various information about the SMP system hardware and setup.
480 */
481 void
482 cpu_mp_announce(void)
483 {
484 int i, x;
485
486 POSTCODE(MP_ANNOUNCE_POST);
487
488 /* List CPUs */
489 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
490 for (i = 1, x = 0; x <= MAX_APIC_ID; x++) {
491 if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp)
492 continue;
493 if (cpu_info[x].cpu_disabled)
494 printf(" cpu (AP): APIC ID: %2d (disabled)\n", x);
495 else {
496 KASSERT(i < mp_ncpus,
497 ("mp_ncpus and actual cpus are out of whack"));
498 printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
499 }
500 }
501 }
502
503 /*
504 * AP CPU's call this to initialize themselves.
505 */
506 void
507 init_secondary(void)
508 {
509 struct pcpu *pc;
510 vm_offset_t addr;
511 int gsel_tss;
512 int x, myid;
513 u_int cr0;
514
515 /* bootAP is set in start_ap() to our ID. */
516 myid = bootAP;
517
518 /* Get per-cpu data */
519 pc = &__pcpu[myid];
520
521 /* prime data page for it to use */
522 pcpu_init(pc, myid, sizeof(struct pcpu));
523 pc->pc_apic_id = cpu_apic_ids[myid];
524 pc->pc_prvspace = pc;
525 pc->pc_curthread = 0;
526
527 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
528 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
529
530 for (x = 0; x < NGDT; x++) {
531 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
532 }
533
534 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
535 r_gdt.rd_base = (int) &gdt[myid * NGDT];
536 lgdt(&r_gdt); /* does magic intra-segment return */
537
538 lidt(&r_idt);
539
540 lldt(_default_ldt);
541 PCPU_SET(currentldt, _default_ldt);
542
543 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
544 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
545 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
546 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
547 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
548 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
549 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
550 ltr(gsel_tss);
551
552 PCPU_SET(fsgs_gdt, &gdt[myid * NGDT + GUFS_SEL].sd);
553
554 /*
555 * Set to a known state:
556 * Set by mpboot.s: CR0_PG, CR0_PE
557 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
558 */
559 cr0 = rcr0();
560 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
561 load_cr0(cr0);
562 CHECK_WRITE(0x38, 5);
563
564 /* Disable local APIC just to be sure. */
565 lapic_disable();
566
567 /* signal our startup to the BSP. */
568 mp_naps++;
569 CHECK_WRITE(0x39, 6);
570
571 /* Spin until the BSP releases the AP's. */
572 while (!aps_ready)
573 ia32_pause();
574
575 /* BSP may have changed PTD while we were waiting */
576 invltlb();
577 for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE)
578 invlpg(addr);
579
580 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
581 lidt(&r_idt);
582 #endif
583
584 /* Initialize the PAT MSR if present. */
585 pmap_init_pat();
586
587 /* set up CPU registers and state */
588 cpu_setregs();
589
590 /* set up FPU state on the AP */
591 npxinit(__INITIAL_NPXCW__);
592
593 /* set up SSE registers */
594 enable_sse();
595
596 /* A quick check from sanity claus */
597 if (PCPU_GET(apic_id) != lapic_id()) {
598 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
599 printf("SMP: actual apic_id = %d\n", lapic_id());
600 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
601 panic("cpuid mismatch! boom!!");
602 }
603
604 /* Initialize curthread. */
605 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
606 PCPU_SET(curthread, PCPU_GET(idlethread));
607
608 mtx_lock_spin(&ap_boot_mtx);
609
610 /* Init local apic for irq's */
611 lapic_setup(1);
612
613 /* Set memory range attributes for this CPU to match the BSP */
614 mem_range_AP_init();
615
616 smp_cpus++;
617
618 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
619 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
620
621 /* Determine if we are a logical CPU. */
622 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
623 logical_cpus_mask |= PCPU_GET(cpumask);
624
625 /* Determine if we are a hyperthread. */
626 if (hyperthreading_cpus > 1 &&
627 PCPU_GET(apic_id) % hyperthreading_cpus != 0)
628 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
629
630 /* Build our map of 'other' CPUs. */
631 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
632
633 if (bootverbose)
634 lapic_dump("AP");
635
636 if (smp_cpus == mp_ncpus) {
637 /* enable IPI's, tlb shootdown, freezes etc */
638 atomic_store_rel_int(&smp_started, 1);
639 smp_active = 1; /* historic */
640 }
641
642 mtx_unlock_spin(&ap_boot_mtx);
643
644 /* wait until all the AP's are up */
645 while (smp_started == 0)
646 ia32_pause();
647
648 /* ok, now grab sched_lock and enter the scheduler */
649 mtx_lock_spin(&sched_lock);
650
651 /*
652 * Correct spinlock nesting. The idle thread context that we are
653 * borrowing was created so that it would start out with a single
654 * spin lock (sched_lock) held in fork_trampoline(). Since we've
655 * explicitly acquired locks in this function, the nesting count
656 * is now 2 rather than 1. Since we are nested, calling
657 * spinlock_exit() will simply adjust the counts without allowing
658 * spin lock using code to interrupt us.
659 */
660 spinlock_exit();
661 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
662
663 binuptime(PCPU_PTR(switchtime));
664 PCPU_SET(switchticks, ticks);
665
666 cpu_throw(NULL, choosethread()); /* doesn't return */
667
668 panic("scheduler returned us to %s", __func__);
669 /* NOTREACHED */
670 }
671
672 /*******************************************************************
673 * local functions and data
674 */
675
676 /*
677 * We tell the I/O APIC code about all the CPUs we want to receive
678 * interrupts. If we don't want certain CPUs to receive IRQs we
679 * can simply not tell the I/O APIC code about them in this function.
680 * We also do not tell it about the BSP since it tells itself about
681 * the BSP internally to work with UP kernels and on UP machines.
682 */
683 static void
684 set_interrupt_apic_ids(void)
685 {
686 u_int apic_id;
687
688 for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
689 if (!cpu_info[apic_id].cpu_present)
690 continue;
691 if (cpu_info[apic_id].cpu_bsp)
692 continue;
693 if (cpu_info[apic_id].cpu_disabled)
694 continue;
695
696 /* Don't let hyperthreads service interrupts. */
697 if (hyperthreading_cpus > 1 &&
698 apic_id % hyperthreading_cpus != 0)
699 continue;
700
701 intr_add_cpu(apic_id);
702 }
703 }
704
705 /*
706 * Assign logical CPU IDs to local APICs.
707 */
708 static void
709 assign_cpu_ids(void)
710 {
711 u_int i;
712
713 /* Check for explicitly disabled CPUs. */
714 for (i = 0; i <= MAX_APIC_ID; i++) {
715 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
716 continue;
717
718 /* Don't use this CPU if it has been disabled by a tunable. */
719 if (resource_disabled("lapic", i)) {
720 cpu_info[i].cpu_disabled = 1;
721 continue;
722 }
723 }
724
725 /*
726 * Assign CPU IDs to local APIC IDs and disable any CPUs
727 * beyond MAXCPU. CPU 0 has already been assigned to the BSP,
728 * so we only have to assign IDs for APs.
729 */
730 mp_ncpus = 1;
731 for (i = 0; i <= MAX_APIC_ID; i++) {
732 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
733 cpu_info[i].cpu_disabled)
734 continue;
735
736 if (mp_ncpus < MAXCPU) {
737 cpu_apic_ids[mp_ncpus] = i;
738 mp_ncpus++;
739 } else
740 cpu_info[i].cpu_disabled = 1;
741 }
742 KASSERT(mp_maxid >= mp_ncpus - 1,
743 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
744 mp_ncpus));
745 }
746
747 /*
748 * start each AP in our list
749 */
750 static int
751 start_all_aps(void)
752 {
753 #ifndef PC98
754 u_char mpbiosreason;
755 #endif
756 uintptr_t kptbase;
757 u_int32_t mpbioswarmvec;
758 int apic_id, cpu, i;
759
760 POSTCODE(START_ALL_APS_POST);
761
762 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
763
764 /* install the AP 1st level boot code */
765 install_ap_tramp();
766
767 /* save the current value of the warm-start vector */
768 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
769 #ifndef PC98
770 outb(CMOS_REG, BIOS_RESET);
771 mpbiosreason = inb(CMOS_DATA);
772 #endif
773
774 /* set up temporary P==V mapping for AP boot */
775 /* XXX this is a hack, we should boot the AP on its own stack/PTD */
776 kptbase = (uintptr_t)(void *)KPTphys;
777 for (i = 0; i < NKPT; i++)
778 PTD[i] = (pd_entry_t)(PG_V | PG_RW |
779 ((kptbase + i * PAGE_SIZE) & PG_FRAME));
780 invltlb();
781
782 /* start each AP */
783 for (cpu = 1; cpu < mp_ncpus; cpu++) {
784 apic_id = cpu_apic_ids[cpu];
785
786 /* allocate and set up a boot stack data page */
787 bootstacks[cpu] = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
788
789 /* setup a vector to our boot code */
790 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
791 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
792 #ifndef PC98
793 outb(CMOS_REG, BIOS_RESET);
794 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
795 #endif
796
797 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 4;
798 bootAP = cpu;
799
800 /* attempt to start the Application Processor */
801 CHECK_INIT(99); /* setup checkpoints */
802 if (!start_ap(apic_id)) {
803 printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id);
804 CHECK_PRINT("trace"); /* show checkpoints */
805 /* better panic as the AP may be running loose */
806 printf("panic y/n? [y] ");
807 if (cngetc() != 'n')
808 panic("bye-bye");
809 }
810 CHECK_PRINT("trace"); /* show checkpoints */
811
812 all_cpus |= (1 << cpu); /* record AP in CPU map */
813 }
814
815 /* build our map of 'other' CPUs */
816 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
817
818 /* restore the warmstart vector */
819 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
820
821 #ifndef PC98
822 outb(CMOS_REG, BIOS_RESET);
823 outb(CMOS_DATA, mpbiosreason);
824 #endif
825
826 /* Undo V==P hack from above */
827 for (i = 0; i < NKPT; i++)
828 PTD[i] = 0;
829 pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
830
831 /* number of APs actually started */
832 return mp_naps;
833 }
834
835 /*
836 * load the 1st level AP boot code into base memory.
837 */
838
839 /* targets for relocation */
840 extern void bigJump(void);
841 extern void bootCodeSeg(void);
842 extern void bootDataSeg(void);
843 extern void MPentry(void);
844 extern u_int MP_GDT;
845 extern u_int mp_gdtbase;
846
847 static void
848 install_ap_tramp(void)
849 {
850 int x;
851 int size = *(int *) ((u_long) & bootMP_size);
852 vm_offset_t va = boot_address + KERNBASE;
853 u_char *src = (u_char *) ((u_long) bootMP);
854 u_char *dst = (u_char *) va;
855 u_int boot_base = (u_int) bootMP;
856 u_int8_t *dst8;
857 u_int16_t *dst16;
858 u_int32_t *dst32;
859
860 POSTCODE(INSTALL_AP_TRAMP_POST);
861
862 KASSERT (size <= PAGE_SIZE,
863 ("'size' do not fit into PAGE_SIZE, as expected."));
864 pmap_kenter(va, boot_address);
865 pmap_invalidate_page (kernel_pmap, va);
866 for (x = 0; x < size; ++x)
867 *dst++ = *src++;
868
869 /*
870 * modify addresses in code we just moved to basemem. unfortunately we
871 * need fairly detailed info about mpboot.s for this to work. changes
872 * to mpboot.s might require changes here.
873 */
874
875 /* boot code is located in KERNEL space */
876 dst = (u_char *) va;
877
878 /* modify the lgdt arg */
879 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
880 *dst32 = boot_address + ((u_int) & MP_GDT - boot_base);
881
882 /* modify the ljmp target for MPentry() */
883 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
884 *dst32 = ((u_int) MPentry - KERNBASE);
885
886 /* modify the target for boot code segment */
887 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
888 dst8 = (u_int8_t *) (dst16 + 1);
889 *dst16 = (u_int) boot_address & 0xffff;
890 *dst8 = ((u_int) boot_address >> 16) & 0xff;
891
892 /* modify the target for boot data segment */
893 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
894 dst8 = (u_int8_t *) (dst16 + 1);
895 *dst16 = (u_int) boot_address & 0xffff;
896 *dst8 = ((u_int) boot_address >> 16) & 0xff;
897 }
898
899 /*
900 * This function starts the AP (application processor) identified
901 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
902 * to accomplish this. This is necessary because of the nuances
903 * of the different hardware we might encounter. It isn't pretty,
904 * but it seems to work.
905 */
906 static int
907 start_ap(int apic_id)
908 {
909 int vector, ms;
910 int cpus;
911
912 POSTCODE(START_AP_POST);
913
914 /* calculate the vector */
915 vector = (boot_address >> 12) & 0xff;
916
917 /* used as a watchpoint to signal AP startup */
918 cpus = mp_naps;
919
920 /*
921 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
922 * and running the target CPU. OR this INIT IPI might be latched (P5
923 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
924 * ignored.
925 */
926
927 /* do an INIT IPI: assert RESET */
928 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
929 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
930
931 /* wait for pending status end */
932 lapic_ipi_wait(-1);
933
934 /* do an INIT IPI: deassert RESET */
935 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
936 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
937
938 /* wait for pending status end */
939 DELAY(10000); /* wait ~10mS */
940 lapic_ipi_wait(-1);
941
942 /*
943 * next we do a STARTUP IPI: the previous INIT IPI might still be
944 * latched, (P5 bug) this 1st STARTUP would then terminate
945 * immediately, and the previously started INIT IPI would continue. OR
946 * the previous INIT IPI has already run. and this STARTUP IPI will
947 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
948 * will run.
949 */
950
951 /* do a STARTUP IPI */
952 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
953 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
954 vector, apic_id);
955 lapic_ipi_wait(-1);
956 DELAY(200); /* wait ~200uS */
957
958 /*
959 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
960 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
961 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
962 * recognized after hardware RESET or INIT IPI.
963 */
964
965 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
966 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
967 vector, apic_id);
968 lapic_ipi_wait(-1);
969 DELAY(200); /* wait ~200uS */
970
971 /* Wait up to 5 seconds for it to start. */
972 for (ms = 0; ms < 5000; ms++) {
973 if (mp_naps > cpus)
974 return 1; /* return SUCCESS */
975 DELAY(1000);
976 }
977 return 0; /* return FAILURE */
978 }
979
980 #ifdef COUNT_XINVLTLB_HITS
981 u_int xhits_gbl[MAXCPU];
982 u_int xhits_pg[MAXCPU];
983 u_int xhits_rng[MAXCPU];
984 SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
985 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
986 sizeof(xhits_gbl), "IU", "");
987 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
988 sizeof(xhits_pg), "IU", "");
989 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
990 sizeof(xhits_rng), "IU", "");
991
992 u_int ipi_global;
993 u_int ipi_page;
994 u_int ipi_range;
995 u_int ipi_range_size;
996 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
997 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
998 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
999 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
1000 0, "");
1001
1002 u_int ipi_masked_global;
1003 u_int ipi_masked_page;
1004 u_int ipi_masked_range;
1005 u_int ipi_masked_range_size;
1006 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW,
1007 &ipi_masked_global, 0, "");
1008 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW,
1009 &ipi_masked_page, 0, "");
1010 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW,
1011 &ipi_masked_range, 0, "");
1012 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
1013 &ipi_masked_range_size, 0, "");
1014 #endif /* COUNT_XINVLTLB_HITS */
1015
1016 /*
1017 * Flush the TLB on all other CPU's
1018 */
1019 static void
1020 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1021 {
1022 u_int ncpu;
1023
1024 ncpu = mp_ncpus - 1; /* does not shootdown self */
1025 if (ncpu < 1)
1026 return; /* no other cpus */
1027 if (!(read_eflags() & PSL_I))
1028 panic("%s: interrupts disabled", __func__);
1029 mtx_lock_spin(&smp_ipi_mtx);
1030 smp_tlb_addr1 = addr1;
1031 smp_tlb_addr2 = addr2;
1032 atomic_store_rel_int(&smp_tlb_wait, 0);
1033 ipi_all_but_self(vector);
1034 while (smp_tlb_wait < ncpu)
1035 ia32_pause();
1036 mtx_unlock_spin(&smp_ipi_mtx);
1037 }
1038
1039 static void
1040 smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1041 {
1042 int ncpu, othercpus;
1043
1044 othercpus = mp_ncpus - 1;
1045 if (mask == (u_int)-1) {
1046 ncpu = othercpus;
1047 if (ncpu < 1)
1048 return;
1049 } else {
1050 mask &= ~PCPU_GET(cpumask);
1051 if (mask == 0)
1052 return;
1053 ncpu = bitcount32(mask);
1054 if (ncpu > othercpus) {
1055 /* XXX this should be a panic offence */
1056 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
1057 ncpu, othercpus);
1058 ncpu = othercpus;
1059 }
1060 /* XXX should be a panic, implied by mask == 0 above */
1061 if (ncpu < 1)
1062 return;
1063 }
1064 if (!(read_eflags() & PSL_I))
1065 panic("%s: interrupts disabled", __func__);
1066 mtx_lock_spin(&smp_ipi_mtx);
1067 smp_tlb_addr1 = addr1;
1068 smp_tlb_addr2 = addr2;
1069 atomic_store_rel_int(&smp_tlb_wait, 0);
1070 if (mask == (u_int)-1)
1071 ipi_all_but_self(vector);
1072 else
1073 ipi_selected(mask, vector);
1074 while (smp_tlb_wait < ncpu)
1075 ia32_pause();
1076 mtx_unlock_spin(&smp_ipi_mtx);
1077 }
1078
1079 void
1080 smp_cache_flush(void)
1081 {
1082
1083 if (smp_started)
1084 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
1085 }
1086
1087 void
1088 smp_invltlb(void)
1089 {
1090
1091 if (smp_started) {
1092 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
1093 #ifdef COUNT_XINVLTLB_HITS
1094 ipi_global++;
1095 #endif
1096 }
1097 }
1098
1099 void
1100 smp_invlpg(vm_offset_t addr)
1101 {
1102
1103 if (smp_started) {
1104 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
1105 #ifdef COUNT_XINVLTLB_HITS
1106 ipi_page++;
1107 #endif
1108 }
1109 }
1110
1111 void
1112 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
1113 {
1114
1115 if (smp_started) {
1116 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
1117 #ifdef COUNT_XINVLTLB_HITS
1118 ipi_range++;
1119 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
1120 #endif
1121 }
1122 }
1123
1124 void
1125 smp_masked_invltlb(u_int mask)
1126 {
1127
1128 if (smp_started) {
1129 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
1130 #ifdef COUNT_XINVLTLB_HITS
1131 ipi_masked_global++;
1132 #endif
1133 }
1134 }
1135
1136 void
1137 smp_masked_invlpg(u_int mask, vm_offset_t addr)
1138 {
1139
1140 if (smp_started) {
1141 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
1142 #ifdef COUNT_XINVLTLB_HITS
1143 ipi_masked_page++;
1144 #endif
1145 }
1146 }
1147
1148 void
1149 smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
1150 {
1151
1152 if (smp_started) {
1153 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
1154 #ifdef COUNT_XINVLTLB_HITS
1155 ipi_masked_range++;
1156 ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE;
1157 #endif
1158 }
1159 }
1160
1161
1162 void
1163 ipi_bitmap_handler(struct clockframe frame)
1164 {
1165 int cpu = PCPU_GET(cpuid);
1166 u_int ipi_bitmap;
1167
1168 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1169
1170 #ifdef IPI_PREEMPTION
1171 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
1172 #ifdef COUNT_IPIS
1173 *ipi_preempt_counts[cpu]++;
1174 #endif
1175 mtx_lock_spin(&sched_lock);
1176 /* Don't preempt the idle thread */
1177 if (curthread != PCPU_GET(idlethread)) {
1178 struct thread *running_thread = curthread;
1179 if (running_thread->td_critnest > 1)
1180 running_thread->td_owepreempt = 1;
1181 else
1182 mi_switch(SW_INVOL | SW_PREEMPT, NULL);
1183 }
1184 mtx_unlock_spin(&sched_lock);
1185 }
1186 #endif
1187
1188 if (ipi_bitmap & (1 << IPI_AST)) {
1189 #ifdef COUNT_IPIS
1190 *ipi_ast_counts[cpu]++;
1191 #endif
1192 /* Nothing to do for AST */
1193 }
1194 }
1195
1196 /*
1197 * send an IPI to a set of cpus.
1198 */
1199 void
1200 ipi_selected(u_int32_t cpus, u_int ipi)
1201 {
1202 int cpu;
1203 u_int bitmap = 0;
1204 u_int old_pending;
1205 u_int new_pending;
1206
1207 if (IPI_IS_BITMAPED(ipi)) {
1208 bitmap = 1 << ipi;
1209 ipi = IPI_BITMAP_VECTOR;
1210 }
1211
1212 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
1213 while ((cpu = ffs(cpus)) != 0) {
1214 cpu--;
1215 cpus &= ~(1 << cpu);
1216
1217 KASSERT(cpu_apic_ids[cpu] != -1,
1218 ("IPI to non-existent CPU %d", cpu));
1219
1220 if (bitmap) {
1221 do {
1222 old_pending = cpu_ipi_pending[cpu];
1223 new_pending = old_pending | bitmap;
1224 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
1225
1226 if (old_pending)
1227 continue;
1228 }
1229
1230 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1231 }
1232
1233 }
1234
1235 /*
1236 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
1237 */
1238 void
1239 ipi_all(u_int ipi)
1240 {
1241
1242 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1243 lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
1244 }
1245
1246 /*
1247 * send an IPI to all CPUs EXCEPT myself
1248 */
1249 void
1250 ipi_all_but_self(u_int ipi)
1251 {
1252
1253 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1254 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1255 }
1256
1257 /*
1258 * send an IPI to myself
1259 */
1260 void
1261 ipi_self(u_int ipi)
1262 {
1263
1264 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1265 lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
1266 }
1267
1268 #ifdef KDB_STOP_NMI
1269 /*
1270 * send NMI IPI to selected CPUs
1271 */
1272
1273 #define BEFORE_SPIN 1000000
1274
1275 void
1276 ipi_nmi_selected(u_int32_t cpus)
1277 {
1278
1279 int cpu;
1280 register_t icrlo;
1281
1282 icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
1283 | APIC_TRIGMOD_EDGE;
1284
1285 CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
1286
1287
1288 atomic_set_int(&ipi_nmi_pending, cpus);
1289
1290
1291 while ((cpu = ffs(cpus)) != 0) {
1292 cpu--;
1293 cpus &= ~(1 << cpu);
1294
1295 KASSERT(cpu_apic_ids[cpu] != -1,
1296 ("IPI NMI to non-existent CPU %d", cpu));
1297
1298 /* Wait for an earlier IPI to finish. */
1299 if (!lapic_ipi_wait(BEFORE_SPIN))
1300 panic("ipi_nmi_selected: previous IPI has not cleared");
1301
1302 lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]);
1303 }
1304 }
1305
1306
1307 int
1308 ipi_nmi_handler()
1309 {
1310 int cpu = PCPU_GET(cpuid);
1311
1312 if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu)))
1313 return 1;
1314
1315 atomic_clear_int(&ipi_nmi_pending,1 << cpu);
1316
1317 savectx(&stoppcbs[cpu]);
1318
1319 /* Indicate that we are stopped */
1320 atomic_set_int(&stopped_cpus,1 << cpu);
1321
1322
1323 /* Wait for restart */
1324 while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu)))
1325 ia32_pause();
1326
1327 atomic_clear_int(&started_cpus,1 << cpu);
1328 atomic_clear_int(&stopped_cpus,1 << cpu);
1329
1330 if(cpu == 0 && cpustop_restartfunc != NULL)
1331 cpustop_restartfunc();
1332
1333 return 0;
1334 }
1335
1336 #endif /* KDB_STOP_NMI */
1337
1338 /*
1339 * This is called once the rest of the system is up and running and we're
1340 * ready to let the AP's out of the pen.
1341 */
1342 static void
1343 release_aps(void *dummy __unused)
1344 {
1345
1346 if (mp_ncpus == 1)
1347 return;
1348 mtx_lock_spin(&sched_lock);
1349 atomic_store_rel_int(&aps_ready, 1);
1350 while (smp_started == 0)
1351 ia32_pause();
1352 mtx_unlock_spin(&sched_lock);
1353 }
1354 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1355
1356 static int
1357 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1358 {
1359 u_int mask;
1360 int error;
1361
1362 mask = hlt_cpus_mask;
1363 error = sysctl_handle_int(oidp, &mask, 0, req);
1364 if (error || !req->newptr)
1365 return (error);
1366
1367 if (logical_cpus_mask != 0 &&
1368 (mask & logical_cpus_mask) == logical_cpus_mask)
1369 hlt_logical_cpus = 1;
1370 else
1371 hlt_logical_cpus = 0;
1372
1373 if (! hyperthreading_allowed)
1374 mask |= hyperthreading_cpus_mask;
1375
1376 if ((mask & all_cpus) == all_cpus)
1377 mask &= ~(1<<0);
1378 hlt_cpus_mask = mask;
1379 return (error);
1380 }
1381 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1382 0, 0, sysctl_hlt_cpus, "IU",
1383 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
1384
1385 static int
1386 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1387 {
1388 int disable, error;
1389
1390 disable = hlt_logical_cpus;
1391 error = sysctl_handle_int(oidp, &disable, 0, req);
1392 if (error || !req->newptr)
1393 return (error);
1394
1395 if (disable)
1396 hlt_cpus_mask |= logical_cpus_mask;
1397 else
1398 hlt_cpus_mask &= ~logical_cpus_mask;
1399
1400 if (! hyperthreading_allowed)
1401 hlt_cpus_mask |= hyperthreading_cpus_mask;
1402
1403 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1404 hlt_cpus_mask &= ~(1<<0);
1405
1406 hlt_logical_cpus = disable;
1407 return (error);
1408 }
1409
1410 static int
1411 sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
1412 {
1413 int allowed, error;
1414
1415 allowed = hyperthreading_allowed;
1416 error = sysctl_handle_int(oidp, &allowed, 0, req);
1417 if (error || !req->newptr)
1418 return (error);
1419
1420 if (allowed)
1421 hlt_cpus_mask &= ~hyperthreading_cpus_mask;
1422 else
1423 hlt_cpus_mask |= hyperthreading_cpus_mask;
1424
1425 if (logical_cpus_mask != 0 &&
1426 (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
1427 hlt_logical_cpus = 1;
1428 else
1429 hlt_logical_cpus = 0;
1430
1431 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1432 hlt_cpus_mask &= ~(1<<0);
1433
1434 hyperthreading_allowed = allowed;
1435 return (error);
1436 }
1437
1438 static void
1439 cpu_hlt_setup(void *dummy __unused)
1440 {
1441
1442 if (logical_cpus_mask != 0) {
1443 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1444 &hlt_logical_cpus);
1445 sysctl_ctx_init(&logical_cpu_clist);
1446 SYSCTL_ADD_PROC(&logical_cpu_clist,
1447 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1448 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1449 sysctl_hlt_logical_cpus, "IU", "");
1450 SYSCTL_ADD_UINT(&logical_cpu_clist,
1451 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1452 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1453 &logical_cpus_mask, 0, "");
1454
1455 if (hlt_logical_cpus)
1456 hlt_cpus_mask |= logical_cpus_mask;
1457
1458 /*
1459 * If necessary for security purposes, force
1460 * hyperthreading off, regardless of the value
1461 * of hlt_logical_cpus.
1462 */
1463 if (hyperthreading_cpus_mask) {
1464 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
1465 &hyperthreading_allowed);
1466 SYSCTL_ADD_PROC(&logical_cpu_clist,
1467 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1468 "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
1469 0, 0, sysctl_hyperthreading_allowed, "IU", "");
1470 if (! hyperthreading_allowed)
1471 hlt_cpus_mask |= hyperthreading_cpus_mask;
1472 }
1473 }
1474 }
1475 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1476
1477 int
1478 mp_grab_cpu_hlt(void)
1479 {
1480 u_int mask = PCPU_GET(cpumask);
1481 #ifdef MP_WATCHDOG
1482 u_int cpuid = PCPU_GET(cpuid);
1483 #endif
1484 int retval;
1485
1486 #ifdef MP_WATCHDOG
1487 ap_watchdog(cpuid);
1488 #endif
1489
1490 retval = mask & hlt_cpus_mask;
1491 while (mask & hlt_cpus_mask)
1492 __asm __volatile("sti; hlt" : : : "memory");
1493 return (retval);
1494 }
1495
1496 #ifdef COUNT_IPIS
1497 /*
1498 * Setup interrupt counters for IPI handlers.
1499 */
1500 static void
1501 mp_ipi_intrcnt(void *dummy)
1502 {
1503 char buf[64];
1504 int i;
1505
1506 for (i = 0; i < mp_maxid; i++) {
1507 if (CPU_ABSENT(i))
1508 continue;
1509 snprintf(buf, sizeof(buf), "cpu%d: invltlb", i);
1510 intrcnt_add(buf, &ipi_invltlb_counts[i]);
1511 snprintf(buf, sizeof(buf), "cpu%d: invlrng", i);
1512 intrcnt_add(buf, &ipi_invlrng_counts[i]);
1513 snprintf(buf, sizeof(buf), "cpu%d: invlpg", i);
1514 intrcnt_add(buf, &ipi_invlpg_counts[i]);
1515 #ifdef IPI_PREEMPTION
1516 snprintf(buf, sizeof(buf), "cpu%d: preempt", i);
1517 intrcnt_add(buf, &ipi_preempt_counts[i]);
1518 #endif
1519 snprintf(buf, sizeof(buf), "cpu%d: ast", i);
1520 intrcnt_add(buf, &ipi_ast_counts[i]);
1521 snprintf(buf, sizeof(buf), "cpu%d: rendezvous", i);
1522 intrcnt_add(buf, &ipi_rendezvous_counts[i]);
1523 snprintf(buf, sizeof(buf), "cpu%d: lazypmap", i);
1524 intrcnt_add(buf, &ipi_lazypmap_counts[i]);
1525 }
1526 }
1527 SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL)
1528 #endif
Cache object: 7c0cc6ddfe9becfae412bbdda787726b
|