1 /*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/10.4/sys/amd64/amd64/mp_machdep.c 333371 2018-05-08 17:12:10Z gordon $");
29
30 #include "opt_cpu.h"
31 #include "opt_ddb.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_sched.h"
34 #include "opt_smp.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/cpuset.h>
40 #ifdef GPROF
41 #include <sys/gmon.h>
42 #endif
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/memrange.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_extern.h>
60
61 #include <x86/apicreg.h>
62 #include <machine/clock.h>
63 #include <machine/cputypes.h>
64 #include <machine/cpufunc.h>
65 #include <x86/mca.h>
66 #include <machine/md_var.h>
67 #include <machine/pcb.h>
68 #include <machine/psl.h>
69 #include <machine/smp.h>
70 #include <machine/specialreg.h>
71 #include <machine/tss.h>
72 #include <machine/cpu.h>
73
74 #define WARMBOOT_TARGET 0
75 #define WARMBOOT_OFF (KERNBASE + 0x0467)
76 #define WARMBOOT_SEG (KERNBASE + 0x0469)
77
78 #define CMOS_REG (0x70)
79 #define CMOS_DATA (0x71)
80 #define BIOS_RESET (0x0f)
81 #define BIOS_WARM (0x0a)
82
83 /* lock region used by kernel profiling */
84 int mcount_lock;
85
86 int mp_naps; /* # of Applications processors */
87 int boot_cpu_id = -1; /* designated BSP */
88
89 extern struct pcpu __pcpu[];
90
91 /* AP uses this during bootstrap. Do not staticize. */
92 char *bootSTK;
93 static int bootAP;
94
95 /* Free these after use */
96 void *bootstacks[MAXCPU];
97
98 /* Temporary variables for init_secondary() */
99 char *doublefault_stack;
100 char *nmi_stack;
101 char *dbg_stack;
102 void *dpcpu;
103
104 struct pcb stoppcbs[MAXCPU];
105 struct susppcb **susppcbs;
106
107 /* Variables needed for SMP tlb shootdown. */
108 vm_offset_t smp_tlb_addr2;
109 struct invpcid_descr smp_tlb_invpcid;
110 volatile int smp_tlb_wait;
111 uint64_t pcid_cr3;
112 pmap_t smp_tlb_pmap;
113 extern int invpcid_works;
114
115 #ifdef COUNT_IPIS
116 /* Interrupt counts. */
117 static u_long *ipi_preempt_counts[MAXCPU];
118 static u_long *ipi_ast_counts[MAXCPU];
119 u_long *ipi_invltlb_counts[MAXCPU];
120 u_long *ipi_invlrng_counts[MAXCPU];
121 u_long *ipi_invlpg_counts[MAXCPU];
122 u_long *ipi_invlcache_counts[MAXCPU];
123 u_long *ipi_rendezvous_counts[MAXCPU];
124 static u_long *ipi_hardclock_counts[MAXCPU];
125 #endif
126
127 /* Default cpu_ops implementation. */
128 struct cpu_ops cpu_ops = {
129 .ipi_vectored = lapic_ipi_vectored
130 };
131
132 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
133
134 extern int pmap_pcid_enabled;
135
136 /*
137 * Local data and functions.
138 */
139
140 static volatile cpuset_t ipi_nmi_pending;
141
142 /* used to hold the AP's until we are ready to release them */
143 static struct mtx ap_boot_mtx;
144
145 /* Set to 1 once we're ready to let the APs out of the pen. */
146 static volatile int aps_ready = 0;
147
148 /*
149 * Store data from cpu_add() until later in the boot when we actually setup
150 * the APs.
151 */
152 struct cpu_info {
153 int cpu_present:1;
154 int cpu_bsp:1;
155 int cpu_disabled:1;
156 int cpu_hyperthread:1;
157 } static cpu_info[MAX_APIC_ID + 1];
158 int cpu_apic_ids[MAXCPU];
159 int apic_cpuids[MAX_APIC_ID + 1];
160
161 /* Holds pending bitmap based IPIs per CPU */
162 volatile u_int cpu_ipi_pending[MAXCPU];
163
164 static u_int boot_address;
165 static int cpu_logical; /* logical cpus per core */
166 static int cpu_cores; /* cores per package */
167
168 static void assign_cpu_ids(void);
169 static void set_interrupt_apic_ids(void);
170 static int start_all_aps(void);
171 static int start_ap(int apic_id);
172 static void release_aps(void *dummy);
173
174 static u_int hyperthreading_cpus; /* logical cpus sharing L1 cache */
175 static int hyperthreading_allowed = 1;
176 static u_int bootMP_size;
177
178 static void
179 mem_range_AP_init(void)
180 {
181 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
182 mem_range_softc.mr_op->initAP(&mem_range_softc);
183 }
184
185 static void
186 topo_probe_amd(void)
187 {
188 int core_id_bits;
189 int id;
190
191 /* AMD processors do not support HTT. */
192 cpu_logical = 1;
193
194 if ((amd_feature2 & AMDID2_CMP) == 0) {
195 cpu_cores = 1;
196 return;
197 }
198
199 core_id_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >>
200 AMDID_COREID_SIZE_SHIFT;
201 if (core_id_bits == 0) {
202 cpu_cores = (cpu_procinfo2 & AMDID_CMP_CORES) + 1;
203 return;
204 }
205
206 /* Fam 10h and newer should get here. */
207 for (id = 0; id <= MAX_APIC_ID; id++) {
208 /* Check logical CPU availability. */
209 if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled)
210 continue;
211 /* Check if logical CPU has the same package ID. */
212 if ((id >> core_id_bits) != (boot_cpu_id >> core_id_bits))
213 continue;
214 cpu_cores++;
215 }
216 }
217
218 /*
219 * Round up to the next power of two, if necessary, and then
220 * take log2.
221 * Returns -1 if argument is zero.
222 */
223 static __inline int
224 mask_width(u_int x)
225 {
226
227 return (fls(x << (1 - powerof2(x))) - 1);
228 }
229
230 static void
231 topo_probe_0x4(void)
232 {
233 u_int p[4];
234 int pkg_id_bits;
235 int core_id_bits;
236 int max_cores;
237 int max_logical;
238 int id;
239
240 /* Both zero and one here mean one logical processor per package. */
241 max_logical = (cpu_feature & CPUID_HTT) != 0 ?
242 (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1;
243 if (max_logical <= 1)
244 return;
245
246 /*
247 * Because of uniformity assumption we examine only
248 * those logical processors that belong to the same
249 * package as BSP. Further, we count number of
250 * logical processors that belong to the same core
251 * as BSP thus deducing number of threads per core.
252 */
253 if (cpu_high >= 0x4) {
254 cpuid_count(0x04, 0, p);
255 max_cores = ((p[0] >> 26) & 0x3f) + 1;
256 } else
257 max_cores = 1;
258 core_id_bits = mask_width(max_logical/max_cores);
259 if (core_id_bits < 0)
260 return;
261 pkg_id_bits = core_id_bits + mask_width(max_cores);
262
263 for (id = 0; id <= MAX_APIC_ID; id++) {
264 /* Check logical CPU availability. */
265 if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled)
266 continue;
267 /* Check if logical CPU has the same package ID. */
268 if ((id >> pkg_id_bits) != (boot_cpu_id >> pkg_id_bits))
269 continue;
270 cpu_cores++;
271 /* Check if logical CPU has the same package and core IDs. */
272 if ((id >> core_id_bits) == (boot_cpu_id >> core_id_bits))
273 cpu_logical++;
274 }
275
276 KASSERT(cpu_cores >= 1 && cpu_logical >= 1,
277 ("topo_probe_0x4 couldn't find BSP"));
278
279 cpu_cores /= cpu_logical;
280 hyperthreading_cpus = cpu_logical;
281 }
282
283 static void
284 topo_probe_0xb(void)
285 {
286 u_int p[4];
287 int bits;
288 int cnt;
289 int i;
290 int logical;
291 int type;
292 int x;
293
294 /* We only support three levels for now. */
295 for (i = 0; i < 3; i++) {
296 cpuid_count(0x0b, i, p);
297
298 /* Fall back if CPU leaf 11 doesn't really exist. */
299 if (i == 0 && p[1] == 0) {
300 topo_probe_0x4();
301 return;
302 }
303
304 bits = p[0] & 0x1f;
305 logical = p[1] &= 0xffff;
306 type = (p[2] >> 8) & 0xff;
307 if (type == 0 || logical == 0)
308 break;
309 /*
310 * Because of uniformity assumption we examine only
311 * those logical processors that belong to the same
312 * package as BSP.
313 */
314 for (cnt = 0, x = 0; x <= MAX_APIC_ID; x++) {
315 if (!cpu_info[x].cpu_present ||
316 cpu_info[x].cpu_disabled)
317 continue;
318 if (x >> bits == boot_cpu_id >> bits)
319 cnt++;
320 }
321 if (type == CPUID_TYPE_SMT)
322 cpu_logical = cnt;
323 else if (type == CPUID_TYPE_CORE)
324 cpu_cores = cnt;
325 }
326 if (cpu_logical == 0)
327 cpu_logical = 1;
328 cpu_cores /= cpu_logical;
329 }
330
331 /*
332 * Both topology discovery code and code that consumes topology
333 * information assume top-down uniformity of the topology.
334 * That is, all physical packages must be identical and each
335 * core in a package must have the same number of threads.
336 * Topology information is queried only on BSP, on which this
337 * code runs and for which it can query CPUID information.
338 * Then topology is extrapolated on all packages using the
339 * uniformity assumption.
340 */
341 static void
342 topo_probe(void)
343 {
344 static int cpu_topo_probed = 0;
345
346 if (cpu_topo_probed)
347 return;
348
349 CPU_ZERO(&logical_cpus_mask);
350 if (mp_ncpus <= 1)
351 cpu_cores = cpu_logical = 1;
352 else if (cpu_vendor_id == CPU_VENDOR_AMD)
353 topo_probe_amd();
354 else if (cpu_vendor_id == CPU_VENDOR_INTEL) {
355 /*
356 * See Intel(R) 64 Architecture Processor
357 * Topology Enumeration article for details.
358 *
359 * Note that 0x1 <= cpu_high < 4 case should be
360 * compatible with topo_probe_0x4() logic when
361 * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1)
362 * or it should trigger the fallback otherwise.
363 */
364 if (cpu_high >= 0xb)
365 topo_probe_0xb();
366 else if (cpu_high >= 0x1)
367 topo_probe_0x4();
368 }
369
370 /*
371 * Fallback: assume each logical CPU is in separate
372 * physical package. That is, no multi-core, no SMT.
373 */
374 if (cpu_cores == 0 || cpu_logical == 0)
375 cpu_cores = cpu_logical = 1;
376 cpu_topo_probed = 1;
377 }
378
379 struct cpu_group *
380 cpu_topo(void)
381 {
382 int cg_flags;
383
384 /*
385 * Determine whether any threading flags are
386 * necessry.
387 */
388 topo_probe();
389 if (cpu_logical > 1 && hyperthreading_cpus)
390 cg_flags = CG_FLAG_HTT;
391 else if (cpu_logical > 1)
392 cg_flags = CG_FLAG_SMT;
393 else
394 cg_flags = 0;
395 if (mp_ncpus % (cpu_cores * cpu_logical) != 0) {
396 printf("WARNING: Non-uniform processors.\n");
397 printf("WARNING: Using suboptimal topology.\n");
398 return (smp_topo_none());
399 }
400 /*
401 * No multi-core or hyper-threaded.
402 */
403 if (cpu_logical * cpu_cores == 1)
404 return (smp_topo_none());
405 /*
406 * Only HTT no multi-core.
407 */
408 if (cpu_logical > 1 && cpu_cores == 1)
409 return (smp_topo_1level(CG_SHARE_L1, cpu_logical, cg_flags));
410 /*
411 * Only multi-core no HTT.
412 */
413 if (cpu_cores > 1 && cpu_logical == 1)
414 return (smp_topo_1level(CG_SHARE_L2, cpu_cores, cg_flags));
415 /*
416 * Both HTT and multi-core.
417 */
418 return (smp_topo_2level(CG_SHARE_L2, cpu_cores,
419 CG_SHARE_L1, cpu_logical, cg_flags));
420 }
421
422 /*
423 * Calculate usable address in base memory for AP trampoline code.
424 */
425 u_int
426 mp_bootaddress(u_int basemem)
427 {
428
429 bootMP_size = mptramp_end - mptramp_start;
430 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
431 if (((basemem * 1024) - boot_address) < bootMP_size)
432 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
433 /* 3 levels of page table pages */
434 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
435
436 return mptramp_pagetables;
437 }
438
439 void
440 cpu_add(u_int apic_id, char boot_cpu)
441 {
442
443 if (apic_id > MAX_APIC_ID) {
444 panic("SMP: APIC ID %d too high", apic_id);
445 return;
446 }
447 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
448 apic_id));
449 cpu_info[apic_id].cpu_present = 1;
450 if (boot_cpu) {
451 KASSERT(boot_cpu_id == -1,
452 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
453 boot_cpu_id));
454 boot_cpu_id = apic_id;
455 cpu_info[apic_id].cpu_bsp = 1;
456 }
457 if (mp_ncpus < MAXCPU) {
458 mp_ncpus++;
459 mp_maxid = mp_ncpus - 1;
460 }
461 if (bootverbose)
462 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
463 "AP");
464 }
465
466 void
467 cpu_mp_setmaxid(void)
468 {
469
470 /*
471 * mp_maxid should be already set by calls to cpu_add().
472 * Just sanity check its value here.
473 */
474 if (mp_ncpus == 0)
475 KASSERT(mp_maxid == 0,
476 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
477 else if (mp_ncpus == 1)
478 mp_maxid = 0;
479 else
480 KASSERT(mp_maxid >= mp_ncpus - 1,
481 ("%s: counters out of sync: max %d, count %d", __func__,
482 mp_maxid, mp_ncpus));
483 }
484
485 int
486 cpu_mp_probe(void)
487 {
488
489 /*
490 * Always record BSP in CPU map so that the mbuf init code works
491 * correctly.
492 */
493 CPU_SETOF(0, &all_cpus);
494 if (mp_ncpus == 0) {
495 /*
496 * No CPUs were found, so this must be a UP system. Setup
497 * the variables to represent a system with a single CPU
498 * with an id of 0.
499 */
500 mp_ncpus = 1;
501 return (0);
502 }
503
504 /* At least one CPU was found. */
505 if (mp_ncpus == 1) {
506 /*
507 * One CPU was found, so this must be a UP system with
508 * an I/O APIC.
509 */
510 mp_maxid = 0;
511 return (0);
512 }
513
514 /* At least two CPUs were found. */
515 return (1);
516 }
517
518 /*
519 * Initialize the IPI handlers and start up the AP's.
520 */
521 void
522 cpu_mp_start(void)
523 {
524 int i;
525
526 /* Initialize the logical ID to APIC ID table. */
527 for (i = 0; i < MAXCPU; i++) {
528 cpu_apic_ids[i] = -1;
529 cpu_ipi_pending[i] = 0;
530 }
531
532 /* Install an inter-CPU IPI for TLB invalidation */
533 if (pmap_pcid_enabled) {
534 setidt(IPI_INVLTLB, IDTVEC(invltlb_pcid), SDT_SYSIGT,
535 SEL_KPL, 0);
536 setidt(IPI_INVLPG, IDTVEC(invlpg_pcid), SDT_SYSIGT,
537 SEL_KPL, 0);
538 } else {
539 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
540 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
541 }
542 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
543
544 /* Install an inter-CPU IPI for cache invalidation. */
545 setidt(IPI_INVLCACHE, IDTVEC(invlcache), SDT_SYSIGT, SEL_KPL, 0);
546
547 /* Install an inter-CPU IPI for all-CPU rendezvous */
548 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
549
550 /* Install generic inter-CPU IPI handler */
551 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
552 SDT_SYSIGT, SEL_KPL, 0);
553
554 /* Install an inter-CPU IPI for CPU stop/restart */
555 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
556
557 /* Install an inter-CPU IPI for CPU suspend/resume */
558 setidt(IPI_SUSPEND, IDTVEC(cpususpend), SDT_SYSIGT, SEL_KPL, 0);
559
560 /* Set boot_cpu_id if needed. */
561 if (boot_cpu_id == -1) {
562 boot_cpu_id = PCPU_GET(apic_id);
563 cpu_info[boot_cpu_id].cpu_bsp = 1;
564 } else
565 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
566 ("BSP's APIC ID doesn't match boot_cpu_id"));
567
568 /* Probe logical/physical core configuration. */
569 topo_probe();
570
571 assign_cpu_ids();
572
573 /* Start each Application Processor */
574 start_all_aps();
575
576 set_interrupt_apic_ids();
577 }
578
579
580 /*
581 * Print various information about the SMP system hardware and setup.
582 */
583 void
584 cpu_mp_announce(void)
585 {
586 const char *hyperthread;
587 int i;
588
589 printf("FreeBSD/SMP: %d package(s) x %d core(s)",
590 mp_ncpus / (cpu_cores * cpu_logical), cpu_cores);
591 if (hyperthreading_cpus > 1)
592 printf(" x %d HTT threads", cpu_logical);
593 else if (cpu_logical > 1)
594 printf(" x %d SMT threads", cpu_logical);
595 printf("\n");
596
597 /* List active CPUs first. */
598 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
599 for (i = 1; i < mp_ncpus; i++) {
600 if (cpu_info[cpu_apic_ids[i]].cpu_hyperthread)
601 hyperthread = "/HT";
602 else
603 hyperthread = "";
604 printf(" cpu%d (AP%s): APIC ID: %2d\n", i, hyperthread,
605 cpu_apic_ids[i]);
606 }
607
608 /* List disabled CPUs last. */
609 for (i = 0; i <= MAX_APIC_ID; i++) {
610 if (!cpu_info[i].cpu_present || !cpu_info[i].cpu_disabled)
611 continue;
612 if (cpu_info[i].cpu_hyperthread)
613 hyperthread = "/HT";
614 else
615 hyperthread = "";
616 printf(" cpu (AP%s): APIC ID: %2d (disabled)\n", hyperthread,
617 i);
618 }
619 }
620
621 /*
622 * AP CPU's call this to initialize themselves.
623 */
624 void
625 init_secondary(void)
626 {
627 struct pcpu *pc;
628 struct nmi_pcpu *np;
629 u_int64_t msr, cr0;
630 u_int cpuid;
631 int cpu, gsel_tss, x;
632 struct region_descriptor ap_gdt;
633
634 /* Set by the startup code for us to use */
635 cpu = bootAP;
636
637 /* Init tss */
638 common_tss[cpu] = common_tss[0];
639 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */
640 common_tss[cpu].tss_iobase = sizeof(struct amd64tss) +
641 IOPAGES * PAGE_SIZE;
642 common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
643
644 /* The NMI stack runs on IST2. */
645 np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
646 common_tss[cpu].tss_ist2 = (long) np;
647
648 /* The DB# stack runs on IST4. */
649 np = ((struct nmi_pcpu *) &dbg_stack[PAGE_SIZE]) - 1;
650 common_tss[cpu].tss_ist4 = (long) np;
651
652 /* Prepare private GDT */
653 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
654 for (x = 0; x < NGDT; x++) {
655 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
656 x != GUSERLDT_SEL && x != (GUSERLDT_SEL + 1))
657 ssdtosd(&gdt_segs[x], &gdt[NGDT * cpu + x]);
658 }
659 ssdtosyssd(&gdt_segs[GPROC0_SEL],
660 (struct system_segment_descriptor *)&gdt[NGDT * cpu + GPROC0_SEL]);
661 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
662 ap_gdt.rd_base = (long) &gdt[NGDT * cpu];
663 lgdt(&ap_gdt); /* does magic intra-segment return */
664
665 /* Get per-cpu data */
666 pc = &__pcpu[cpu];
667
668 /* prime data page for it to use */
669 pcpu_init(pc, cpu, sizeof(struct pcpu));
670 dpcpu_init(dpcpu, cpu);
671 pc->pc_apic_id = cpu_apic_ids[cpu];
672 pc->pc_prvspace = pc;
673 pc->pc_curthread = 0;
674 pc->pc_tssp = &common_tss[cpu];
675 pc->pc_commontssp = &common_tss[cpu];
676 pc->pc_rsp0 = 0;
677 pc->pc_tss = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
678 GPROC0_SEL];
679 pc->pc_fs32p = &gdt[NGDT * cpu + GUFS32_SEL];
680 pc->pc_gs32p = &gdt[NGDT * cpu + GUGS32_SEL];
681 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
682 GUSERLDT_SEL];
683
684 /* Save the per-cpu pointer for use by the NMI handler. */
685 np->np_pcpu = (register_t) pc;
686
687 /* Save the per-cpu pointer for use by the DB# handler. */
688 np = ((struct nmi_pcpu *) &dbg_stack[PAGE_SIZE]) - 1;
689 np->np_pcpu = (register_t) pc;
690
691 wrmsr(MSR_FSBASE, 0); /* User value */
692 wrmsr(MSR_GSBASE, (u_int64_t)pc);
693 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
694 fix_cpuid();
695
696 lidt(&r_idt);
697
698 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
699 ltr(gsel_tss);
700
701 /*
702 * Set to a known state:
703 * Set by mpboot.s: CR0_PG, CR0_PE
704 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
705 */
706 cr0 = rcr0();
707 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
708 load_cr0(cr0);
709
710 /* Set up the fast syscall stuff */
711 msr = rdmsr(MSR_EFER) | EFER_SCE;
712 wrmsr(MSR_EFER, msr);
713 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
714 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
715 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
716 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
717 wrmsr(MSR_STAR, msr);
718 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
719
720 /* Disable local APIC just to be sure. */
721 lapic_disable();
722
723 /* signal our startup to the BSP. */
724 mp_naps++;
725
726 /* Spin until the BSP releases the AP's. */
727 while (!aps_ready)
728 ia32_pause();
729
730 /* Initialize the PAT MSR. */
731 pmap_init_pat();
732
733 /* set up CPU registers and state */
734 cpu_setregs();
735
736 /* set up SSE/NX */
737 initializecpu();
738
739 /* set up FPU state on the AP */
740 fpuinit();
741
742 if (cpu_ops.cpu_init)
743 cpu_ops.cpu_init();
744
745 /* A quick check from sanity claus */
746 cpuid = PCPU_GET(cpuid);
747 if (PCPU_GET(apic_id) != lapic_id()) {
748 printf("SMP: cpuid = %d\n", cpuid);
749 printf("SMP: actual apic_id = %d\n", lapic_id());
750 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
751 panic("cpuid mismatch! boom!!");
752 }
753
754 /* Initialize curthread. */
755 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
756 PCPU_SET(curthread, PCPU_GET(idlethread));
757
758 mca_init();
759
760 mtx_lock_spin(&ap_boot_mtx);
761
762 /* Init local apic for irq's */
763 lapic_setup(1);
764
765 /* Set memory range attributes for this CPU to match the BSP */
766 mem_range_AP_init();
767
768 smp_cpus++;
769
770 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
771 printf("SMP: AP CPU #%d Launched!\n", cpuid);
772
773 /* Determine if we are a logical CPU. */
774 /* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
775 if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
776 CPU_SET(cpuid, &logical_cpus_mask);
777
778 if (bootverbose)
779 lapic_dump("AP");
780
781 if (smp_cpus == mp_ncpus) {
782 /* enable IPI's, tlb shootdown, freezes etc */
783 atomic_store_rel_int(&smp_started, 1);
784 }
785
786 /*
787 * Enable global pages TLB extension
788 * This also implicitly flushes the TLB
789 */
790
791 load_cr4(rcr4() | CR4_PGE);
792 if (pmap_pcid_enabled)
793 load_cr4(rcr4() | CR4_PCIDE);
794 load_ds(_udatasel);
795 load_es(_udatasel);
796 load_fs(_ufssel);
797 mtx_unlock_spin(&ap_boot_mtx);
798
799 /* Wait until all the AP's are up. */
800 while (smp_started == 0)
801 ia32_pause();
802
803 /* Start per-CPU event timers. */
804 cpu_initclocks_ap();
805
806 sched_throw(NULL);
807
808 panic("scheduler returned us to %s", __func__);
809 /* NOTREACHED */
810 }
811
812 /*******************************************************************
813 * local functions and data
814 */
815
816 /*
817 * We tell the I/O APIC code about all the CPUs we want to receive
818 * interrupts. If we don't want certain CPUs to receive IRQs we
819 * can simply not tell the I/O APIC code about them in this function.
820 * We also do not tell it about the BSP since it tells itself about
821 * the BSP internally to work with UP kernels and on UP machines.
822 */
823 static void
824 set_interrupt_apic_ids(void)
825 {
826 u_int i, apic_id;
827
828 for (i = 0; i < MAXCPU; i++) {
829 apic_id = cpu_apic_ids[i];
830 if (apic_id == -1)
831 continue;
832 if (cpu_info[apic_id].cpu_bsp)
833 continue;
834 if (cpu_info[apic_id].cpu_disabled)
835 continue;
836
837 /* Don't let hyperthreads service interrupts. */
838 if (hyperthreading_cpus > 1 &&
839 apic_id % hyperthreading_cpus != 0)
840 continue;
841
842 intr_add_cpu(i);
843 }
844 }
845
846 /*
847 * Assign logical CPU IDs to local APICs.
848 */
849 static void
850 assign_cpu_ids(void)
851 {
852 u_int i;
853
854 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
855 &hyperthreading_allowed);
856
857 /* Check for explicitly disabled CPUs. */
858 for (i = 0; i <= MAX_APIC_ID; i++) {
859 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
860 continue;
861
862 if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
863 cpu_info[i].cpu_hyperthread = 1;
864
865 /*
866 * Don't use HT CPU if it has been disabled by a
867 * tunable.
868 */
869 if (hyperthreading_allowed == 0) {
870 cpu_info[i].cpu_disabled = 1;
871 continue;
872 }
873 }
874
875 /* Don't use this CPU if it has been disabled by a tunable. */
876 if (resource_disabled("lapic", i)) {
877 cpu_info[i].cpu_disabled = 1;
878 continue;
879 }
880 }
881
882 if (hyperthreading_allowed == 0 && hyperthreading_cpus > 1) {
883 hyperthreading_cpus = 0;
884 cpu_logical = 1;
885 }
886
887 /*
888 * Assign CPU IDs to local APIC IDs and disable any CPUs
889 * beyond MAXCPU. CPU 0 is always assigned to the BSP.
890 *
891 * To minimize confusion for userland, we attempt to number
892 * CPUs such that all threads and cores in a package are
893 * grouped together. For now we assume that the BSP is always
894 * the first thread in a package and just start adding APs
895 * starting with the BSP's APIC ID.
896 */
897 mp_ncpus = 1;
898 cpu_apic_ids[0] = boot_cpu_id;
899 apic_cpuids[boot_cpu_id] = 0;
900 for (i = boot_cpu_id + 1; i != boot_cpu_id;
901 i == MAX_APIC_ID ? i = 0 : i++) {
902 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
903 cpu_info[i].cpu_disabled)
904 continue;
905
906 if (mp_ncpus < MAXCPU) {
907 cpu_apic_ids[mp_ncpus] = i;
908 apic_cpuids[i] = mp_ncpus;
909 mp_ncpus++;
910 } else
911 cpu_info[i].cpu_disabled = 1;
912 }
913 KASSERT(mp_maxid >= mp_ncpus - 1,
914 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
915 mp_ncpus));
916 }
917
918 /*
919 * start each AP in our list
920 */
921 static int
922 start_all_aps(void)
923 {
924 vm_offset_t va = boot_address + KERNBASE;
925 u_int64_t *pt4, *pt3, *pt2;
926 u_int32_t mpbioswarmvec;
927 int apic_id, cpu, i;
928 u_char mpbiosreason;
929
930 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
931
932 /* install the AP 1st level boot code */
933 pmap_kenter(va, boot_address);
934 pmap_invalidate_page(kernel_pmap, va);
935 bcopy(mptramp_start, (void *)va, bootMP_size);
936
937 /* Locate the page tables, they'll be below the trampoline */
938 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
939 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
940 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
941
942 /* Create the initial 1GB replicated page tables */
943 for (i = 0; i < 512; i++) {
944 /* Each slot of the level 4 pages points to the same level 3 page */
945 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
946 pt4[i] |= PG_V | PG_RW | PG_U;
947
948 /* Each slot of the level 3 pages points to the same level 2 page */
949 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
950 pt3[i] |= PG_V | PG_RW | PG_U;
951
952 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
953 pt2[i] = i * (2 * 1024 * 1024);
954 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
955 }
956
957 /* save the current value of the warm-start vector */
958 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
959 outb(CMOS_REG, BIOS_RESET);
960 mpbiosreason = inb(CMOS_DATA);
961
962 /* setup a vector to our boot code */
963 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
964 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
965 outb(CMOS_REG, BIOS_RESET);
966 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
967
968 /* start each AP */
969 for (cpu = 1; cpu < mp_ncpus; cpu++) {
970 apic_id = cpu_apic_ids[cpu];
971
972 /* allocate and set up an idle stack data page */
973 bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
974 KSTACK_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO);
975 doublefault_stack = (char *)kmem_malloc(kernel_arena,
976 PAGE_SIZE, M_WAITOK | M_ZERO);
977 nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
978 M_WAITOK | M_ZERO);
979 dbg_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
980 M_WAITOK | M_ZERO);
981 dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
982 M_WAITOK | M_ZERO);
983
984 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
985 bootAP = cpu;
986
987 /* attempt to start the Application Processor */
988 if (!start_ap(apic_id)) {
989 /* restore the warmstart vector */
990 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
991 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
992 }
993
994 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
995 }
996
997 /* restore the warmstart vector */
998 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
999
1000 outb(CMOS_REG, BIOS_RESET);
1001 outb(CMOS_DATA, mpbiosreason);
1002
1003 /* number of APs actually started */
1004 return mp_naps;
1005 }
1006
1007
1008 /*
1009 * This function starts the AP (application processor) identified
1010 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
1011 * to accomplish this. This is necessary because of the nuances
1012 * of the different hardware we might encounter. It isn't pretty,
1013 * but it seems to work.
1014 */
1015 static int
1016 start_ap(int apic_id)
1017 {
1018 int vector, ms;
1019 int cpus;
1020
1021 /* calculate the vector */
1022 vector = (boot_address >> 12) & 0xff;
1023
1024 /* used as a watchpoint to signal AP startup */
1025 cpus = mp_naps;
1026
1027 ipi_startup(apic_id, vector);
1028
1029 /* Wait up to 5 seconds for it to start. */
1030 for (ms = 0; ms < 5000; ms++) {
1031 if (mp_naps > cpus)
1032 return 1; /* return SUCCESS */
1033 DELAY(1000);
1034 }
1035 return 0; /* return FAILURE */
1036 }
1037
1038 #ifdef COUNT_XINVLTLB_HITS
1039 u_int xhits_gbl[MAXCPU];
1040 u_int xhits_pg[MAXCPU];
1041 u_int xhits_rng[MAXCPU];
1042 static SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
1043 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
1044 sizeof(xhits_gbl), "IU", "");
1045 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
1046 sizeof(xhits_pg), "IU", "");
1047 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
1048 sizeof(xhits_rng), "IU", "");
1049
1050 u_int ipi_global;
1051 u_int ipi_page;
1052 u_int ipi_range;
1053 u_int ipi_range_size;
1054 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
1055 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
1056 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
1057 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW,
1058 &ipi_range_size, 0, "");
1059
1060 u_int ipi_masked_global;
1061 u_int ipi_masked_page;
1062 u_int ipi_masked_range;
1063 u_int ipi_masked_range_size;
1064 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW,
1065 &ipi_masked_global, 0, "");
1066 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW,
1067 &ipi_masked_page, 0, "");
1068 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW,
1069 &ipi_masked_range, 0, "");
1070 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
1071 &ipi_masked_range_size, 0, "");
1072 #endif /* COUNT_XINVLTLB_HITS */
1073
1074 /*
1075 * Init and startup IPI.
1076 */
1077 void
1078 ipi_startup(int apic_id, int vector)
1079 {
1080
1081 /*
1082 * This attempts to follow the algorithm described in the
1083 * Intel Multiprocessor Specification v1.4 in section B.4.
1084 * For each IPI, we allow the local APIC ~20us to deliver the
1085 * IPI. If that times out, we panic.
1086 */
1087
1088 /*
1089 * first we do an INIT IPI: this INIT IPI might be run, resetting
1090 * and running the target CPU. OR this INIT IPI might be latched (P5
1091 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
1092 * ignored.
1093 */
1094 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
1095 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
1096 lapic_ipi_wait(100);
1097
1098 /* Explicitly deassert the INIT IPI. */
1099 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
1100 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT,
1101 apic_id);
1102
1103 DELAY(10000); /* wait ~10mS */
1104
1105 /*
1106 * next we do a STARTUP IPI: the previous INIT IPI might still be
1107 * latched, (P5 bug) this 1st STARTUP would then terminate
1108 * immediately, and the previously started INIT IPI would continue. OR
1109 * the previous INIT IPI has already run. and this STARTUP IPI will
1110 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
1111 * will run.
1112 */
1113 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1114 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1115 vector, apic_id);
1116 if (!lapic_ipi_wait(100))
1117 panic("Failed to deliver first STARTUP IPI to APIC %d",
1118 apic_id);
1119 DELAY(200); /* wait ~200uS */
1120
1121 /*
1122 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
1123 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
1124 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
1125 * recognized after hardware RESET or INIT IPI.
1126 */
1127 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1128 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1129 vector, apic_id);
1130 if (!lapic_ipi_wait(100))
1131 panic("Failed to deliver second STARTUP IPI to APIC %d",
1132 apic_id);
1133
1134 DELAY(200); /* wait ~200uS */
1135 }
1136
1137 /*
1138 * Send an IPI to specified CPU handling the bitmap logic.
1139 */
1140 static void
1141 ipi_send_cpu(int cpu, u_int ipi)
1142 {
1143 u_int bitmap, old_pending, new_pending;
1144
1145 KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
1146
1147 if (IPI_IS_BITMAPED(ipi)) {
1148 bitmap = 1 << ipi;
1149 ipi = IPI_BITMAP_VECTOR;
1150 do {
1151 old_pending = cpu_ipi_pending[cpu];
1152 new_pending = old_pending | bitmap;
1153 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
1154 old_pending, new_pending));
1155 if (old_pending)
1156 return;
1157 }
1158 cpu_ops.ipi_vectored(ipi, cpu_apic_ids[cpu]);
1159 }
1160
1161 /*
1162 * Flush the TLB on all other CPU's
1163 */
1164 static void
1165 smp_tlb_shootdown(u_int vector, pmap_t pmap, vm_offset_t addr1,
1166 vm_offset_t addr2)
1167 {
1168 u_int ncpu;
1169
1170 ncpu = mp_ncpus - 1; /* does not shootdown self */
1171 if (ncpu < 1)
1172 return; /* no other cpus */
1173 if (!(read_rflags() & PSL_I))
1174 panic("%s: interrupts disabled", __func__);
1175 mtx_lock_spin(&smp_ipi_mtx);
1176 smp_tlb_invpcid.addr = addr1;
1177 if (pmap == NULL) {
1178 smp_tlb_invpcid.pcid = 0;
1179 } else {
1180 smp_tlb_invpcid.pcid = pmap->pm_pcid;
1181 pcid_cr3 = pmap->pm_cr3;
1182 }
1183 smp_tlb_addr2 = addr2;
1184 smp_tlb_pmap = pmap;
1185 atomic_store_rel_int(&smp_tlb_wait, 0);
1186 ipi_all_but_self(vector);
1187 while (smp_tlb_wait < ncpu)
1188 ia32_pause();
1189 mtx_unlock_spin(&smp_ipi_mtx);
1190 }
1191
1192 static void
1193 smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
1194 vm_offset_t addr1, vm_offset_t addr2)
1195 {
1196 int cpu, ncpu, othercpus;
1197
1198 othercpus = mp_ncpus - 1;
1199 if (CPU_ISFULLSET(&mask)) {
1200 if (othercpus < 1)
1201 return;
1202 } else {
1203 CPU_CLR(PCPU_GET(cpuid), &mask);
1204 if (CPU_EMPTY(&mask))
1205 return;
1206 }
1207 if (!(read_rflags() & PSL_I))
1208 panic("%s: interrupts disabled", __func__);
1209 mtx_lock_spin(&smp_ipi_mtx);
1210 smp_tlb_invpcid.addr = addr1;
1211 if (pmap == NULL) {
1212 smp_tlb_invpcid.pcid = 0;
1213 } else {
1214 smp_tlb_invpcid.pcid = pmap->pm_pcid;
1215 pcid_cr3 = pmap->pm_cr3;
1216 }
1217 smp_tlb_addr2 = addr2;
1218 smp_tlb_pmap = pmap;
1219 atomic_store_rel_int(&smp_tlb_wait, 0);
1220 if (CPU_ISFULLSET(&mask)) {
1221 ncpu = othercpus;
1222 ipi_all_but_self(vector);
1223 } else {
1224 ncpu = 0;
1225 while ((cpu = CPU_FFS(&mask)) != 0) {
1226 cpu--;
1227 CPU_CLR(cpu, &mask);
1228 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__,
1229 cpu, vector);
1230 ipi_send_cpu(cpu, vector);
1231 ncpu++;
1232 }
1233 }
1234 while (smp_tlb_wait < ncpu)
1235 ia32_pause();
1236 mtx_unlock_spin(&smp_ipi_mtx);
1237 }
1238
1239 void
1240 smp_cache_flush(void)
1241 {
1242
1243 if (smp_started)
1244 smp_tlb_shootdown(IPI_INVLCACHE, NULL, 0, 0);
1245 }
1246
1247 void
1248 smp_invltlb(pmap_t pmap)
1249 {
1250
1251 if (smp_started) {
1252 smp_tlb_shootdown(IPI_INVLTLB, pmap, 0, 0);
1253 #ifdef COUNT_XINVLTLB_HITS
1254 ipi_global++;
1255 #endif
1256 }
1257 }
1258
1259 void
1260 smp_invlpg(pmap_t pmap, vm_offset_t addr)
1261 {
1262
1263 if (smp_started) {
1264 smp_tlb_shootdown(IPI_INVLPG, pmap, addr, 0);
1265 #ifdef COUNT_XINVLTLB_HITS
1266 ipi_page++;
1267 #endif
1268 }
1269 }
1270
1271 void
1272 smp_invlpg_range(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2)
1273 {
1274
1275 if (smp_started) {
1276 smp_tlb_shootdown(IPI_INVLRNG, pmap, addr1, addr2);
1277 #ifdef COUNT_XINVLTLB_HITS
1278 ipi_range++;
1279 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
1280 #endif
1281 }
1282 }
1283
1284 void
1285 smp_masked_invltlb(cpuset_t mask, pmap_t pmap)
1286 {
1287
1288 if (smp_started) {
1289 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, pmap, 0, 0);
1290 #ifdef COUNT_XINVLTLB_HITS
1291 ipi_masked_global++;
1292 #endif
1293 }
1294 }
1295
1296 void
1297 smp_masked_invlpg(cpuset_t mask, pmap_t pmap, vm_offset_t addr)
1298 {
1299
1300 if (smp_started) {
1301 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, pmap, addr, 0);
1302 #ifdef COUNT_XINVLTLB_HITS
1303 ipi_masked_page++;
1304 #endif
1305 }
1306 }
1307
1308 void
1309 smp_masked_invlpg_range(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
1310 vm_offset_t addr2)
1311 {
1312
1313 if (smp_started) {
1314 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, pmap, addr1,
1315 addr2);
1316 #ifdef COUNT_XINVLTLB_HITS
1317 ipi_masked_range++;
1318 ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE;
1319 #endif
1320 }
1321 }
1322
1323 void
1324 ipi_bitmap_handler(struct trapframe frame)
1325 {
1326 struct trapframe *oldframe;
1327 struct thread *td;
1328 int cpu = PCPU_GET(cpuid);
1329 u_int ipi_bitmap;
1330
1331 critical_enter();
1332 td = curthread;
1333 td->td_intr_nesting_level++;
1334 oldframe = td->td_intr_frame;
1335 td->td_intr_frame = &frame;
1336 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1337 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
1338 #ifdef COUNT_IPIS
1339 (*ipi_preempt_counts[cpu])++;
1340 #endif
1341 sched_preempt(td);
1342 }
1343 if (ipi_bitmap & (1 << IPI_AST)) {
1344 #ifdef COUNT_IPIS
1345 (*ipi_ast_counts[cpu])++;
1346 #endif
1347 /* Nothing to do for AST */
1348 }
1349 if (ipi_bitmap & (1 << IPI_HARDCLOCK)) {
1350 #ifdef COUNT_IPIS
1351 (*ipi_hardclock_counts[cpu])++;
1352 #endif
1353 hardclockintr();
1354 }
1355 td->td_intr_frame = oldframe;
1356 td->td_intr_nesting_level--;
1357 critical_exit();
1358 }
1359
1360 /*
1361 * send an IPI to a set of cpus.
1362 */
1363 void
1364 ipi_selected(cpuset_t cpus, u_int ipi)
1365 {
1366 int cpu;
1367
1368 /*
1369 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1370 * of help in order to understand what is the source.
1371 * Set the mask of receiving CPUs for this purpose.
1372 */
1373 if (ipi == IPI_STOP_HARD)
1374 CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus);
1375
1376 while ((cpu = CPU_FFS(&cpus)) != 0) {
1377 cpu--;
1378 CPU_CLR(cpu, &cpus);
1379 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1380 ipi_send_cpu(cpu, ipi);
1381 }
1382 }
1383
1384 /*
1385 * send an IPI to a specific CPU.
1386 */
1387 void
1388 ipi_cpu(int cpu, u_int ipi)
1389 {
1390
1391 /*
1392 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1393 * of help in order to understand what is the source.
1394 * Set the mask of receiving CPUs for this purpose.
1395 */
1396 if (ipi == IPI_STOP_HARD)
1397 CPU_SET_ATOMIC(cpu, &ipi_nmi_pending);
1398
1399 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1400 ipi_send_cpu(cpu, ipi);
1401 }
1402
1403 /*
1404 * send an IPI to all CPUs EXCEPT myself
1405 */
1406 void
1407 ipi_all_but_self(u_int ipi)
1408 {
1409 cpuset_t other_cpus;
1410
1411 other_cpus = all_cpus;
1412 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
1413
1414 if (IPI_IS_BITMAPED(ipi)) {
1415 ipi_selected(other_cpus, ipi);
1416 return;
1417 }
1418
1419 /*
1420 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1421 * of help in order to understand what is the source.
1422 * Set the mask of receiving CPUs for this purpose.
1423 */
1424 if (ipi == IPI_STOP_HARD)
1425 CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);
1426
1427 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1428 cpu_ops.ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1429 }
1430
1431 int
1432 ipi_nmi_handler()
1433 {
1434 u_int cpuid;
1435
1436 /*
1437 * As long as there is not a simple way to know about a NMI's
1438 * source, if the bitmask for the current CPU is present in
1439 * the global pending bitword an IPI_STOP_HARD has been issued
1440 * and should be handled.
1441 */
1442 cpuid = PCPU_GET(cpuid);
1443 if (!CPU_ISSET(cpuid, &ipi_nmi_pending))
1444 return (1);
1445
1446 CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending);
1447 cpustop_handler();
1448 return (0);
1449 }
1450
1451 /*
1452 * Handle an IPI_STOP by saving our current context and spinning until we
1453 * are resumed.
1454 */
1455 void
1456 cpustop_handler(void)
1457 {
1458 u_int cpu;
1459
1460 cpu = PCPU_GET(cpuid);
1461
1462 savectx(&stoppcbs[cpu]);
1463
1464 /* Indicate that we are stopped */
1465 CPU_SET_ATOMIC(cpu, &stopped_cpus);
1466
1467 /* Wait for restart */
1468 while (!CPU_ISSET(cpu, &started_cpus))
1469 ia32_pause();
1470
1471 CPU_CLR_ATOMIC(cpu, &started_cpus);
1472 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
1473
1474 #ifdef DDB
1475 amd64_db_resume_dbreg();
1476 #endif
1477
1478 if (cpu == 0 && cpustop_restartfunc != NULL) {
1479 cpustop_restartfunc();
1480 cpustop_restartfunc = NULL;
1481 }
1482 }
1483
1484 /*
1485 * Handle an IPI_SUSPEND by saving our current context and spinning until we
1486 * are resumed.
1487 */
1488 void
1489 cpususpend_handler(void)
1490 {
1491 u_int cpu;
1492
1493 mtx_assert(&smp_ipi_mtx, MA_NOTOWNED);
1494
1495 cpu = PCPU_GET(cpuid);
1496 if (savectx(&susppcbs[cpu]->sp_pcb)) {
1497 fpususpend(susppcbs[cpu]->sp_fpususpend);
1498 wbinvd();
1499 CPU_SET_ATOMIC(cpu, &suspended_cpus);
1500 } else {
1501 fpuresume(susppcbs[cpu]->sp_fpususpend);
1502 pmap_init_pat();
1503 initializecpu();
1504 PCPU_SET(switchtime, 0);
1505 PCPU_SET(switchticks, ticks);
1506
1507 /* Indicate that we are resumed */
1508 CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1509 }
1510
1511 /* Wait for resume */
1512 while (!CPU_ISSET(cpu, &started_cpus))
1513 ia32_pause();
1514
1515 if (cpu_ops.cpu_resume)
1516 cpu_ops.cpu_resume();
1517 if (vmm_resume_p)
1518 vmm_resume_p();
1519
1520 /* Resume MCA and local APIC */
1521 mca_resume();
1522 lapic_setup(0);
1523
1524 CPU_CLR_ATOMIC(cpu, &started_cpus);
1525 /* Indicate that we are resumed */
1526 CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1527 }
1528
1529 /*
1530 * Handlers for TLB related IPIs
1531 */
1532 void
1533 invltlb_handler(void)
1534 {
1535 #ifdef COUNT_XINVLTLB_HITS
1536 xhits_gbl[PCPU_GET(cpuid)]++;
1537 #endif /* COUNT_XINVLTLB_HITS */
1538 #ifdef COUNT_IPIS
1539 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
1540 #endif /* COUNT_IPIS */
1541
1542 invltlb();
1543 atomic_add_int(&smp_tlb_wait, 1);
1544 }
1545
1546 void
1547 invltlb_pcid_handler(void)
1548 {
1549 uint64_t cr3;
1550 u_int cpuid;
1551 #ifdef COUNT_XINVLTLB_HITS
1552 xhits_gbl[PCPU_GET(cpuid)]++;
1553 #endif /* COUNT_XINVLTLB_HITS */
1554 #ifdef COUNT_IPIS
1555 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
1556 #endif /* COUNT_IPIS */
1557
1558 if (smp_tlb_invpcid.pcid != (uint64_t)-1 &&
1559 smp_tlb_invpcid.pcid != 0) {
1560 if (invpcid_works) {
1561 invpcid(&smp_tlb_invpcid, INVPCID_CTX);
1562 } else {
1563 /* Otherwise reload %cr3 twice. */
1564 cr3 = rcr3();
1565 if (cr3 != pcid_cr3) {
1566 load_cr3(pcid_cr3);
1567 cr3 |= CR3_PCID_SAVE;
1568 }
1569 load_cr3(cr3);
1570 }
1571 } else {
1572 invltlb_globpcid();
1573 }
1574 if (smp_tlb_pmap != NULL) {
1575 cpuid = PCPU_GET(cpuid);
1576 if (!CPU_ISSET(cpuid, &smp_tlb_pmap->pm_active))
1577 CPU_CLR_ATOMIC(cpuid, &smp_tlb_pmap->pm_save);
1578 }
1579
1580 atomic_add_int(&smp_tlb_wait, 1);
1581 }
1582
1583 void
1584 invlpg_handler(void)
1585 {
1586 #ifdef COUNT_XINVLTLB_HITS
1587 xhits_pg[PCPU_GET(cpuid)]++;
1588 #endif /* COUNT_XINVLTLB_HITS */
1589 #ifdef COUNT_IPIS
1590 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
1591 #endif /* COUNT_IPIS */
1592
1593 invlpg(smp_tlb_invpcid.addr);
1594 atomic_add_int(&smp_tlb_wait, 1);
1595 }
1596
1597 void
1598 invlpg_pcid_handler(void)
1599 {
1600 uint64_t cr3;
1601 #ifdef COUNT_XINVLTLB_HITS
1602 xhits_pg[PCPU_GET(cpuid)]++;
1603 #endif /* COUNT_XINVLTLB_HITS */
1604 #ifdef COUNT_IPIS
1605 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
1606 #endif /* COUNT_IPIS */
1607
1608 if (smp_tlb_invpcid.pcid == (uint64_t)-1) {
1609 invltlb_globpcid();
1610 } else if (smp_tlb_invpcid.pcid == 0) {
1611 invlpg(smp_tlb_invpcid.addr);
1612 } else if (invpcid_works) {
1613 invpcid(&smp_tlb_invpcid, INVPCID_ADDR);
1614 } else {
1615 /*
1616 * PCID supported, but INVPCID is not.
1617 * Temporarily switch to the target address
1618 * space and do INVLPG.
1619 */
1620 cr3 = rcr3();
1621 if (cr3 != pcid_cr3)
1622 load_cr3(pcid_cr3 | CR3_PCID_SAVE);
1623 invlpg(smp_tlb_invpcid.addr);
1624 load_cr3(cr3 | CR3_PCID_SAVE);
1625 }
1626
1627 atomic_add_int(&smp_tlb_wait, 1);
1628 }
1629
1630 static inline void
1631 invlpg_range(vm_offset_t start, vm_offset_t end)
1632 {
1633
1634 do {
1635 invlpg(start);
1636 start += PAGE_SIZE;
1637 } while (start < end);
1638 }
1639
1640 void
1641 invlrng_handler(void)
1642 {
1643 struct invpcid_descr d;
1644 vm_offset_t addr;
1645 uint64_t cr3;
1646 u_int cpuid;
1647 #ifdef COUNT_XINVLTLB_HITS
1648 xhits_rng[PCPU_GET(cpuid)]++;
1649 #endif /* COUNT_XINVLTLB_HITS */
1650 #ifdef COUNT_IPIS
1651 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
1652 #endif /* COUNT_IPIS */
1653
1654 addr = smp_tlb_invpcid.addr;
1655 if (pmap_pcid_enabled) {
1656 if (smp_tlb_invpcid.pcid == 0) {
1657 /*
1658 * kernel pmap - use invlpg to invalidate
1659 * global mapping.
1660 */
1661 invlpg_range(addr, smp_tlb_addr2);
1662 } else if (smp_tlb_invpcid.pcid == (uint64_t)-1) {
1663 invltlb_globpcid();
1664 if (smp_tlb_pmap != NULL) {
1665 cpuid = PCPU_GET(cpuid);
1666 if (!CPU_ISSET(cpuid, &smp_tlb_pmap->pm_active))
1667 CPU_CLR_ATOMIC(cpuid,
1668 &smp_tlb_pmap->pm_save);
1669 }
1670 } else if (invpcid_works) {
1671 d = smp_tlb_invpcid;
1672 do {
1673 invpcid(&d, INVPCID_ADDR);
1674 d.addr += PAGE_SIZE;
1675 } while (d.addr <= smp_tlb_addr2);
1676 } else {
1677 cr3 = rcr3();
1678 if (cr3 != pcid_cr3)
1679 load_cr3(pcid_cr3 | CR3_PCID_SAVE);
1680 invlpg_range(addr, smp_tlb_addr2);
1681 load_cr3(cr3 | CR3_PCID_SAVE);
1682 }
1683 } else {
1684 invlpg_range(addr, smp_tlb_addr2);
1685 }
1686
1687 atomic_add_int(&smp_tlb_wait, 1);
1688 }
1689
1690 void
1691 invlcache_handler(void)
1692 {
1693 #ifdef COUNT_IPIS
1694 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1695 #endif /* COUNT_IPIS */
1696
1697 wbinvd();
1698 atomic_add_int(&smp_tlb_wait, 1);
1699 }
1700
1701 /*
1702 * This is called once the rest of the system is up and running and we're
1703 * ready to let the AP's out of the pen.
1704 */
1705 static void
1706 release_aps(void *dummy __unused)
1707 {
1708
1709 if (mp_ncpus == 1)
1710 return;
1711 atomic_store_rel_int(&aps_ready, 1);
1712 while (smp_started == 0)
1713 ia32_pause();
1714 }
1715 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1716
1717 #ifdef COUNT_IPIS
1718 /*
1719 * Setup interrupt counters for IPI handlers.
1720 */
1721 static void
1722 mp_ipi_intrcnt(void *dummy)
1723 {
1724 char buf[64];
1725 int i;
1726
1727 CPU_FOREACH(i) {
1728 snprintf(buf, sizeof(buf), "cpu%d:invltlb", i);
1729 intrcnt_add(buf, &ipi_invltlb_counts[i]);
1730 snprintf(buf, sizeof(buf), "cpu%d:invlrng", i);
1731 intrcnt_add(buf, &ipi_invlrng_counts[i]);
1732 snprintf(buf, sizeof(buf), "cpu%d:invlpg", i);
1733 intrcnt_add(buf, &ipi_invlpg_counts[i]);
1734 snprintf(buf, sizeof(buf), "cpu%d:invlcache", i);
1735 intrcnt_add(buf, &ipi_invlcache_counts[i]);
1736 snprintf(buf, sizeof(buf), "cpu%d:preempt", i);
1737 intrcnt_add(buf, &ipi_preempt_counts[i]);
1738 snprintf(buf, sizeof(buf), "cpu%d:ast", i);
1739 intrcnt_add(buf, &ipi_ast_counts[i]);
1740 snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i);
1741 intrcnt_add(buf, &ipi_rendezvous_counts[i]);
1742 snprintf(buf, sizeof(buf), "cpu%d:hardclock", i);
1743 intrcnt_add(buf, &ipi_hardclock_counts[i]);
1744 }
1745 }
1746 SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL);
1747 #endif
1748
Cache object: 4cba082ecdc4ebe9ab1ad96200ff5e6b
|