1 /*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include "opt_cpu.h"
31 #include "opt_ddb.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_sched.h"
34 #include "opt_smp.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/cpuset.h>
40 #ifdef GPROF
41 #include <sys/gmon.h>
42 #endif
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/memrange.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_extern.h>
60
61 #include <x86/apicreg.h>
62 #include <machine/clock.h>
63 #include <machine/cputypes.h>
64 #include <machine/cpufunc.h>
65 #include <x86/mca.h>
66 #include <machine/md_var.h>
67 #include <machine/pcb.h>
68 #include <machine/psl.h>
69 #include <machine/smp.h>
70 #include <machine/specialreg.h>
71 #include <machine/tss.h>
72 #include <machine/cpu.h>
73
74 #define WARMBOOT_TARGET 0
75 #define WARMBOOT_OFF (KERNBASE + 0x0467)
76 #define WARMBOOT_SEG (KERNBASE + 0x0469)
77
78 #define CMOS_REG (0x70)
79 #define CMOS_DATA (0x71)
80 #define BIOS_RESET (0x0f)
81 #define BIOS_WARM (0x0a)
82
83 /* lock region used by kernel profiling */
84 int mcount_lock;
85
86 int mp_naps; /* # of Applications processors */
87 int boot_cpu_id = -1; /* designated BSP */
88
89 extern struct pcpu __pcpu[];
90
91 /* AP uses this during bootstrap. Do not staticize. */
92 char *bootSTK;
93 static int bootAP;
94
95 /* Free these after use */
96 void *bootstacks[MAXCPU];
97
98 /* Temporary variables for init_secondary() */
99 char *doublefault_stack;
100 char *nmi_stack;
101 char *dbg_stack;
102 void *dpcpu;
103
104 struct pcb stoppcbs[MAXCPU];
105 struct susppcb **susppcbs;
106
107 /* Variables needed for SMP tlb shootdown. */
108 vm_offset_t smp_tlb_addr2;
109 struct invpcid_descr smp_tlb_invpcid;
110 volatile int smp_tlb_wait;
111 uint64_t pcid_cr3;
112 pmap_t smp_tlb_pmap;
113 extern int invpcid_works;
114
115 #ifdef COUNT_IPIS
116 /* Interrupt counts. */
117 static u_long *ipi_preempt_counts[MAXCPU];
118 static u_long *ipi_ast_counts[MAXCPU];
119 u_long *ipi_invltlb_counts[MAXCPU];
120 u_long *ipi_invlrng_counts[MAXCPU];
121 u_long *ipi_invlpg_counts[MAXCPU];
122 u_long *ipi_invlcache_counts[MAXCPU];
123 u_long *ipi_rendezvous_counts[MAXCPU];
124 static u_long *ipi_hardclock_counts[MAXCPU];
125 #endif
126
127 /* Default cpu_ops implementation. */
128 struct cpu_ops cpu_ops = {
129 .ipi_vectored = lapic_ipi_vectored
130 };
131
132 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
133
134 extern int pmap_pcid_enabled;
135
136 /*
137 * Local data and functions.
138 */
139
140 static volatile cpuset_t ipi_nmi_pending;
141
142 volatile cpuset_t resuming_cpus;
143 volatile cpuset_t toresume_cpus;
144
145 /* used to hold the AP's until we are ready to release them */
146 static struct mtx ap_boot_mtx;
147
148 /* Set to 1 once we're ready to let the APs out of the pen. */
149 static volatile int aps_ready = 0;
150
151 /*
152 * Store data from cpu_add() until later in the boot when we actually setup
153 * the APs.
154 */
155 struct cpu_info {
156 int cpu_present:1;
157 int cpu_bsp:1;
158 int cpu_disabled:1;
159 int cpu_hyperthread:1;
160 } static cpu_info[MAX_APIC_ID + 1];
161 int cpu_apic_ids[MAXCPU];
162 int apic_cpuids[MAX_APIC_ID + 1];
163
164 /* Holds pending bitmap based IPIs per CPU */
165 volatile u_int cpu_ipi_pending[MAXCPU];
166
167 static u_int boot_address;
168 static int cpu_logical; /* logical cpus per core */
169 static int cpu_cores; /* cores per package */
170
171 static void assign_cpu_ids(void);
172 static void set_interrupt_apic_ids(void);
173 static int start_all_aps(void);
174 static int start_ap(int apic_id);
175 static void release_aps(void *dummy);
176
177 static u_int hyperthreading_cpus; /* logical cpus sharing L1 cache */
178 static int hyperthreading_allowed = 1;
179 static u_int bootMP_size;
180
181 static void
182 mem_range_AP_init(void)
183 {
184 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
185 mem_range_softc.mr_op->initAP(&mem_range_softc);
186 }
187
188 static void
189 topo_probe_amd(void)
190 {
191 int core_id_bits;
192 int id;
193
194 /* AMD processors do not support HTT. */
195 cpu_logical = 1;
196
197 if ((amd_feature2 & AMDID2_CMP) == 0) {
198 cpu_cores = 1;
199 return;
200 }
201
202 core_id_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >>
203 AMDID_COREID_SIZE_SHIFT;
204 if (core_id_bits == 0) {
205 cpu_cores = (cpu_procinfo2 & AMDID_CMP_CORES) + 1;
206 return;
207 }
208
209 /* Fam 10h and newer should get here. */
210 for (id = 0; id <= MAX_APIC_ID; id++) {
211 /* Check logical CPU availability. */
212 if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled)
213 continue;
214 /* Check if logical CPU has the same package ID. */
215 if ((id >> core_id_bits) != (boot_cpu_id >> core_id_bits))
216 continue;
217 cpu_cores++;
218 }
219 }
220
221 /*
222 * Round up to the next power of two, if necessary, and then
223 * take log2.
224 * Returns -1 if argument is zero.
225 */
226 static __inline int
227 mask_width(u_int x)
228 {
229
230 return (fls(x << (1 - powerof2(x))) - 1);
231 }
232
233 static void
234 topo_probe_0x4(void)
235 {
236 u_int p[4];
237 int pkg_id_bits;
238 int core_id_bits;
239 int max_cores;
240 int max_logical;
241 int id;
242
243 /* Both zero and one here mean one logical processor per package. */
244 max_logical = (cpu_feature & CPUID_HTT) != 0 ?
245 (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1;
246 if (max_logical <= 1)
247 return;
248
249 /*
250 * Because of uniformity assumption we examine only
251 * those logical processors that belong to the same
252 * package as BSP. Further, we count number of
253 * logical processors that belong to the same core
254 * as BSP thus deducing number of threads per core.
255 */
256 if (cpu_high >= 0x4) {
257 cpuid_count(0x04, 0, p);
258 max_cores = ((p[0] >> 26) & 0x3f) + 1;
259 } else
260 max_cores = 1;
261 core_id_bits = mask_width(max_logical/max_cores);
262 if (core_id_bits < 0)
263 return;
264 pkg_id_bits = core_id_bits + mask_width(max_cores);
265
266 for (id = 0; id <= MAX_APIC_ID; id++) {
267 /* Check logical CPU availability. */
268 if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled)
269 continue;
270 /* Check if logical CPU has the same package ID. */
271 if ((id >> pkg_id_bits) != (boot_cpu_id >> pkg_id_bits))
272 continue;
273 cpu_cores++;
274 /* Check if logical CPU has the same package and core IDs. */
275 if ((id >> core_id_bits) == (boot_cpu_id >> core_id_bits))
276 cpu_logical++;
277 }
278
279 KASSERT(cpu_cores >= 1 && cpu_logical >= 1,
280 ("topo_probe_0x4 couldn't find BSP"));
281
282 cpu_cores /= cpu_logical;
283 hyperthreading_cpus = cpu_logical;
284 }
285
286 static void
287 topo_probe_0xb(void)
288 {
289 u_int p[4];
290 int bits;
291 int cnt;
292 int i;
293 int logical;
294 int type;
295 int x;
296
297 /* We only support three levels for now. */
298 for (i = 0; i < 3; i++) {
299 cpuid_count(0x0b, i, p);
300
301 /* Fall back if CPU leaf 11 doesn't really exist. */
302 if (i == 0 && p[1] == 0) {
303 topo_probe_0x4();
304 return;
305 }
306
307 bits = p[0] & 0x1f;
308 logical = p[1] &= 0xffff;
309 type = (p[2] >> 8) & 0xff;
310 if (type == 0 || logical == 0)
311 break;
312 /*
313 * Because of uniformity assumption we examine only
314 * those logical processors that belong to the same
315 * package as BSP.
316 */
317 for (cnt = 0, x = 0; x <= MAX_APIC_ID; x++) {
318 if (!cpu_info[x].cpu_present ||
319 cpu_info[x].cpu_disabled)
320 continue;
321 if (x >> bits == boot_cpu_id >> bits)
322 cnt++;
323 }
324 if (type == CPUID_TYPE_SMT)
325 cpu_logical = cnt;
326 else if (type == CPUID_TYPE_CORE)
327 cpu_cores = cnt;
328 }
329 if (cpu_logical == 0)
330 cpu_logical = 1;
331 cpu_cores /= cpu_logical;
332 }
333
334 /*
335 * Both topology discovery code and code that consumes topology
336 * information assume top-down uniformity of the topology.
337 * That is, all physical packages must be identical and each
338 * core in a package must have the same number of threads.
339 * Topology information is queried only on BSP, on which this
340 * code runs and for which it can query CPUID information.
341 * Then topology is extrapolated on all packages using the
342 * uniformity assumption.
343 */
344 static void
345 topo_probe(void)
346 {
347 static int cpu_topo_probed = 0;
348
349 if (cpu_topo_probed)
350 return;
351
352 CPU_ZERO(&logical_cpus_mask);
353 if (mp_ncpus <= 1)
354 cpu_cores = cpu_logical = 1;
355 else if (cpu_vendor_id == CPU_VENDOR_AMD)
356 topo_probe_amd();
357 else if (cpu_vendor_id == CPU_VENDOR_INTEL) {
358 /*
359 * See Intel(R) 64 Architecture Processor
360 * Topology Enumeration article for details.
361 *
362 * Note that 0x1 <= cpu_high < 4 case should be
363 * compatible with topo_probe_0x4() logic when
364 * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1)
365 * or it should trigger the fallback otherwise.
366 */
367 if (cpu_high >= 0xb)
368 topo_probe_0xb();
369 else if (cpu_high >= 0x1)
370 topo_probe_0x4();
371 }
372
373 /*
374 * Fallback: assume each logical CPU is in separate
375 * physical package. That is, no multi-core, no SMT.
376 */
377 if (cpu_cores == 0 || cpu_logical == 0)
378 cpu_cores = cpu_logical = 1;
379 cpu_topo_probed = 1;
380 }
381
382 struct cpu_group *
383 cpu_topo(void)
384 {
385 int cg_flags;
386
387 /*
388 * Determine whether any threading flags are
389 * necessry.
390 */
391 topo_probe();
392 if (cpu_logical > 1 && hyperthreading_cpus)
393 cg_flags = CG_FLAG_HTT;
394 else if (cpu_logical > 1)
395 cg_flags = CG_FLAG_SMT;
396 else
397 cg_flags = 0;
398 if (mp_ncpus % (cpu_cores * cpu_logical) != 0) {
399 printf("WARNING: Non-uniform processors.\n");
400 printf("WARNING: Using suboptimal topology.\n");
401 return (smp_topo_none());
402 }
403 /*
404 * No multi-core or hyper-threaded.
405 */
406 if (cpu_logical * cpu_cores == 1)
407 return (smp_topo_none());
408 /*
409 * Only HTT no multi-core.
410 */
411 if (cpu_logical > 1 && cpu_cores == 1)
412 return (smp_topo_1level(CG_SHARE_L1, cpu_logical, cg_flags));
413 /*
414 * Only multi-core no HTT.
415 */
416 if (cpu_cores > 1 && cpu_logical == 1)
417 return (smp_topo_1level(CG_SHARE_L2, cpu_cores, cg_flags));
418 /*
419 * Both HTT and multi-core.
420 */
421 return (smp_topo_2level(CG_SHARE_L2, cpu_cores,
422 CG_SHARE_L1, cpu_logical, cg_flags));
423 }
424
425 /*
426 * Calculate usable address in base memory for AP trampoline code.
427 */
428 u_int
429 mp_bootaddress(u_int basemem)
430 {
431
432 bootMP_size = mptramp_end - mptramp_start;
433 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
434 if (((basemem * 1024) - boot_address) < bootMP_size)
435 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
436 /* 3 levels of page table pages */
437 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
438
439 return mptramp_pagetables;
440 }
441
442 void
443 cpu_add(u_int apic_id, char boot_cpu)
444 {
445
446 if (apic_id > MAX_APIC_ID) {
447 panic("SMP: APIC ID %d too high", apic_id);
448 return;
449 }
450 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
451 apic_id));
452 cpu_info[apic_id].cpu_present = 1;
453 if (boot_cpu) {
454 KASSERT(boot_cpu_id == -1,
455 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
456 boot_cpu_id));
457 boot_cpu_id = apic_id;
458 cpu_info[apic_id].cpu_bsp = 1;
459 }
460 if (mp_ncpus < MAXCPU) {
461 mp_ncpus++;
462 mp_maxid = mp_ncpus - 1;
463 }
464 if (bootverbose)
465 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
466 "AP");
467 }
468
469 void
470 cpu_mp_setmaxid(void)
471 {
472
473 /*
474 * mp_maxid should be already set by calls to cpu_add().
475 * Just sanity check its value here.
476 */
477 if (mp_ncpus == 0)
478 KASSERT(mp_maxid == 0,
479 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
480 else if (mp_ncpus == 1)
481 mp_maxid = 0;
482 else
483 KASSERT(mp_maxid >= mp_ncpus - 1,
484 ("%s: counters out of sync: max %d, count %d", __func__,
485 mp_maxid, mp_ncpus));
486 }
487
488 int
489 cpu_mp_probe(void)
490 {
491
492 /*
493 * Always record BSP in CPU map so that the mbuf init code works
494 * correctly.
495 */
496 CPU_SETOF(0, &all_cpus);
497 if (mp_ncpus == 0) {
498 /*
499 * No CPUs were found, so this must be a UP system. Setup
500 * the variables to represent a system with a single CPU
501 * with an id of 0.
502 */
503 mp_ncpus = 1;
504 return (0);
505 }
506
507 /* At least one CPU was found. */
508 if (mp_ncpus == 1) {
509 /*
510 * One CPU was found, so this must be a UP system with
511 * an I/O APIC.
512 */
513 mp_maxid = 0;
514 return (0);
515 }
516
517 /* At least two CPUs were found. */
518 return (1);
519 }
520
521 /*
522 * Initialize the IPI handlers and start up the AP's.
523 */
524 void
525 cpu_mp_start(void)
526 {
527 int i;
528
529 /* Initialize the logical ID to APIC ID table. */
530 for (i = 0; i < MAXCPU; i++) {
531 cpu_apic_ids[i] = -1;
532 cpu_ipi_pending[i] = 0;
533 }
534
535 /* Install an inter-CPU IPI for TLB invalidation */
536 if (pmap_pcid_enabled) {
537 setidt(IPI_INVLTLB, IDTVEC(invltlb_pcid), SDT_SYSIGT,
538 SEL_KPL, 0);
539 setidt(IPI_INVLPG, IDTVEC(invlpg_pcid), SDT_SYSIGT,
540 SEL_KPL, 0);
541 } else {
542 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
543 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
544 }
545 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
546
547 /* Install an inter-CPU IPI for cache invalidation. */
548 setidt(IPI_INVLCACHE, IDTVEC(invlcache), SDT_SYSIGT, SEL_KPL, 0);
549
550 /* Install an inter-CPU IPI for all-CPU rendezvous */
551 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
552
553 /* Install generic inter-CPU IPI handler */
554 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
555 SDT_SYSIGT, SEL_KPL, 0);
556
557 /* Install an inter-CPU IPI for CPU stop/restart */
558 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
559
560 /* Install an inter-CPU IPI for CPU suspend/resume */
561 setidt(IPI_SUSPEND, IDTVEC(cpususpend), SDT_SYSIGT, SEL_KPL, 0);
562
563 /* Set boot_cpu_id if needed. */
564 if (boot_cpu_id == -1) {
565 boot_cpu_id = PCPU_GET(apic_id);
566 cpu_info[boot_cpu_id].cpu_bsp = 1;
567 } else
568 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
569 ("BSP's APIC ID doesn't match boot_cpu_id"));
570
571 /* Probe logical/physical core configuration. */
572 topo_probe();
573
574 assign_cpu_ids();
575
576 /* Start each Application Processor */
577 start_all_aps();
578
579 set_interrupt_apic_ids();
580 }
581
582
583 /*
584 * Print various information about the SMP system hardware and setup.
585 */
586 void
587 cpu_mp_announce(void)
588 {
589 const char *hyperthread;
590 int i;
591
592 printf("FreeBSD/SMP: %d package(s) x %d core(s)",
593 mp_ncpus / (cpu_cores * cpu_logical), cpu_cores);
594 if (hyperthreading_cpus > 1)
595 printf(" x %d HTT threads", cpu_logical);
596 else if (cpu_logical > 1)
597 printf(" x %d SMT threads", cpu_logical);
598 printf("\n");
599
600 /* List active CPUs first. */
601 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
602 for (i = 1; i < mp_ncpus; i++) {
603 if (cpu_info[cpu_apic_ids[i]].cpu_hyperthread)
604 hyperthread = "/HT";
605 else
606 hyperthread = "";
607 printf(" cpu%d (AP%s): APIC ID: %2d\n", i, hyperthread,
608 cpu_apic_ids[i]);
609 }
610
611 /* List disabled CPUs last. */
612 for (i = 0; i <= MAX_APIC_ID; i++) {
613 if (!cpu_info[i].cpu_present || !cpu_info[i].cpu_disabled)
614 continue;
615 if (cpu_info[i].cpu_hyperthread)
616 hyperthread = "/HT";
617 else
618 hyperthread = "";
619 printf(" cpu (AP%s): APIC ID: %2d (disabled)\n", hyperthread,
620 i);
621 }
622 }
623
624 /*
625 * AP CPU's call this to initialize themselves.
626 */
627 void
628 init_secondary(void)
629 {
630 struct pcpu *pc;
631 struct nmi_pcpu *np;
632 u_int64_t msr, cr0;
633 u_int cpuid;
634 int cpu, gsel_tss, x;
635 struct region_descriptor ap_gdt;
636
637 /* Set by the startup code for us to use */
638 cpu = bootAP;
639
640 /* Init tss */
641 common_tss[cpu] = common_tss[0];
642 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */
643 common_tss[cpu].tss_iobase = sizeof(struct amd64tss) +
644 IOPAGES * PAGE_SIZE;
645 common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
646
647 /* The NMI stack runs on IST2. */
648 np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
649 common_tss[cpu].tss_ist2 = (long) np;
650
651 /* The DB# stack runs on IST4. */
652 np = ((struct nmi_pcpu *) &dbg_stack[PAGE_SIZE]) - 1;
653 common_tss[cpu].tss_ist4 = (long) np;
654
655 /* Prepare private GDT */
656 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
657 for (x = 0; x < NGDT; x++) {
658 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
659 x != GUSERLDT_SEL && x != (GUSERLDT_SEL + 1))
660 ssdtosd(&gdt_segs[x], &gdt[NGDT * cpu + x]);
661 }
662 ssdtosyssd(&gdt_segs[GPROC0_SEL],
663 (struct system_segment_descriptor *)&gdt[NGDT * cpu + GPROC0_SEL]);
664 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
665 ap_gdt.rd_base = (long) &gdt[NGDT * cpu];
666 lgdt(&ap_gdt); /* does magic intra-segment return */
667
668 /* Get per-cpu data */
669 pc = &__pcpu[cpu];
670
671 /* prime data page for it to use */
672 pcpu_init(pc, cpu, sizeof(struct pcpu));
673 dpcpu_init(dpcpu, cpu);
674 pc->pc_apic_id = cpu_apic_ids[cpu];
675 pc->pc_prvspace = pc;
676 pc->pc_curthread = 0;
677 pc->pc_tssp = &common_tss[cpu];
678 pc->pc_commontssp = &common_tss[cpu];
679 pc->pc_rsp0 = 0;
680 pc->pc_tss = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
681 GPROC0_SEL];
682 pc->pc_fs32p = &gdt[NGDT * cpu + GUFS32_SEL];
683 pc->pc_gs32p = &gdt[NGDT * cpu + GUGS32_SEL];
684 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
685 GUSERLDT_SEL];
686
687 /* Save the per-cpu pointer for use by the NMI handler. */
688 np->np_pcpu = (register_t) pc;
689
690 /* Save the per-cpu pointer for use by the DB# handler. */
691 np = ((struct nmi_pcpu *) &dbg_stack[PAGE_SIZE]) - 1;
692 np->np_pcpu = (register_t) pc;
693
694 wrmsr(MSR_FSBASE, 0); /* User value */
695 wrmsr(MSR_GSBASE, (u_int64_t)pc);
696 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
697 fix_cpuid();
698
699 lidt(&r_idt);
700
701 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
702 ltr(gsel_tss);
703
704 /*
705 * Set to a known state:
706 * Set by mpboot.s: CR0_PG, CR0_PE
707 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
708 */
709 cr0 = rcr0();
710 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
711 load_cr0(cr0);
712
713 /* Set up the fast syscall stuff */
714 msr = rdmsr(MSR_EFER) | EFER_SCE;
715 wrmsr(MSR_EFER, msr);
716 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
717 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
718 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
719 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
720 wrmsr(MSR_STAR, msr);
721 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
722
723 /* Disable local APIC just to be sure. */
724 lapic_disable();
725
726 /* signal our startup to the BSP. */
727 mp_naps++;
728
729 /* Spin until the BSP releases the AP's. */
730 while (!aps_ready)
731 ia32_pause();
732
733 /* Initialize the PAT MSR. */
734 pmap_init_pat();
735
736 /* set up CPU registers and state */
737 cpu_setregs();
738
739 /* set up SSE/NX */
740 initializecpu();
741
742 /* set up FPU state on the AP */
743 fpuinit();
744
745 if (cpu_ops.cpu_init)
746 cpu_ops.cpu_init();
747
748 /* A quick check from sanity claus */
749 cpuid = PCPU_GET(cpuid);
750 if (PCPU_GET(apic_id) != lapic_id()) {
751 printf("SMP: cpuid = %d\n", cpuid);
752 printf("SMP: actual apic_id = %d\n", lapic_id());
753 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
754 panic("cpuid mismatch! boom!!");
755 }
756
757 /* Initialize curthread. */
758 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
759 PCPU_SET(curthread, PCPU_GET(idlethread));
760
761 mca_init();
762
763 mtx_lock_spin(&ap_boot_mtx);
764
765 /* Init local apic for irq's */
766 lapic_setup(1);
767
768 /* Set memory range attributes for this CPU to match the BSP */
769 mem_range_AP_init();
770
771 smp_cpus++;
772
773 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
774 printf("SMP: AP CPU #%d Launched!\n", cpuid);
775
776 /* Determine if we are a logical CPU. */
777 /* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
778 if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
779 CPU_SET(cpuid, &logical_cpus_mask);
780
781 if (bootverbose)
782 lapic_dump("AP");
783
784 if (smp_cpus == mp_ncpus) {
785 /* enable IPI's, tlb shootdown, freezes etc */
786 atomic_store_rel_int(&smp_started, 1);
787 }
788
789 /*
790 * Enable global pages TLB extension
791 * This also implicitly flushes the TLB
792 */
793
794 load_cr4(rcr4() | CR4_PGE);
795 if (pmap_pcid_enabled)
796 load_cr4(rcr4() | CR4_PCIDE);
797 load_ds(_udatasel);
798 load_es(_udatasel);
799 load_fs(_ufssel);
800 mtx_unlock_spin(&ap_boot_mtx);
801
802 /* Wait until all the AP's are up. */
803 while (smp_started == 0)
804 ia32_pause();
805
806 /* Start per-CPU event timers. */
807 cpu_initclocks_ap();
808
809 sched_throw(NULL);
810
811 panic("scheduler returned us to %s", __func__);
812 /* NOTREACHED */
813 }
814
815 /*******************************************************************
816 * local functions and data
817 */
818
819 /*
820 * We tell the I/O APIC code about all the CPUs we want to receive
821 * interrupts. If we don't want certain CPUs to receive IRQs we
822 * can simply not tell the I/O APIC code about them in this function.
823 * We also do not tell it about the BSP since it tells itself about
824 * the BSP internally to work with UP kernels and on UP machines.
825 */
826 static void
827 set_interrupt_apic_ids(void)
828 {
829 u_int i, apic_id;
830
831 for (i = 0; i < MAXCPU; i++) {
832 apic_id = cpu_apic_ids[i];
833 if (apic_id == -1)
834 continue;
835 if (cpu_info[apic_id].cpu_bsp)
836 continue;
837 if (cpu_info[apic_id].cpu_disabled)
838 continue;
839
840 /* Don't let hyperthreads service interrupts. */
841 if (hyperthreading_cpus > 1 &&
842 apic_id % hyperthreading_cpus != 0)
843 continue;
844
845 intr_add_cpu(i);
846 }
847 }
848
849 /*
850 * Assign logical CPU IDs to local APICs.
851 */
852 static void
853 assign_cpu_ids(void)
854 {
855 u_int i;
856
857 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
858 &hyperthreading_allowed);
859
860 /* Check for explicitly disabled CPUs. */
861 for (i = 0; i <= MAX_APIC_ID; i++) {
862 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
863 continue;
864
865 if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
866 cpu_info[i].cpu_hyperthread = 1;
867
868 /*
869 * Don't use HT CPU if it has been disabled by a
870 * tunable.
871 */
872 if (hyperthreading_allowed == 0) {
873 cpu_info[i].cpu_disabled = 1;
874 continue;
875 }
876 }
877
878 /* Don't use this CPU if it has been disabled by a tunable. */
879 if (resource_disabled("lapic", i)) {
880 cpu_info[i].cpu_disabled = 1;
881 continue;
882 }
883 }
884
885 if (hyperthreading_allowed == 0 && hyperthreading_cpus > 1) {
886 hyperthreading_cpus = 0;
887 cpu_logical = 1;
888 }
889
890 /*
891 * Assign CPU IDs to local APIC IDs and disable any CPUs
892 * beyond MAXCPU. CPU 0 is always assigned to the BSP.
893 *
894 * To minimize confusion for userland, we attempt to number
895 * CPUs such that all threads and cores in a package are
896 * grouped together. For now we assume that the BSP is always
897 * the first thread in a package and just start adding APs
898 * starting with the BSP's APIC ID.
899 */
900 mp_ncpus = 1;
901 cpu_apic_ids[0] = boot_cpu_id;
902 apic_cpuids[boot_cpu_id] = 0;
903 for (i = boot_cpu_id + 1; i != boot_cpu_id;
904 i == MAX_APIC_ID ? i = 0 : i++) {
905 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
906 cpu_info[i].cpu_disabled)
907 continue;
908
909 if (mp_ncpus < MAXCPU) {
910 cpu_apic_ids[mp_ncpus] = i;
911 apic_cpuids[i] = mp_ncpus;
912 mp_ncpus++;
913 } else
914 cpu_info[i].cpu_disabled = 1;
915 }
916 KASSERT(mp_maxid >= mp_ncpus - 1,
917 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
918 mp_ncpus));
919 }
920
921 /*
922 * start each AP in our list
923 */
924 static int
925 start_all_aps(void)
926 {
927 vm_offset_t va = boot_address + KERNBASE;
928 u_int64_t *pt4, *pt3, *pt2;
929 u_int32_t mpbioswarmvec;
930 int apic_id, cpu, i;
931 u_char mpbiosreason;
932
933 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
934
935 /* install the AP 1st level boot code */
936 pmap_kenter(va, boot_address);
937 pmap_invalidate_page(kernel_pmap, va);
938 bcopy(mptramp_start, (void *)va, bootMP_size);
939
940 /* Locate the page tables, they'll be below the trampoline */
941 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
942 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
943 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
944
945 /* Create the initial 1GB replicated page tables */
946 for (i = 0; i < 512; i++) {
947 /* Each slot of the level 4 pages points to the same level 3 page */
948 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
949 pt4[i] |= PG_V | PG_RW | PG_U;
950
951 /* Each slot of the level 3 pages points to the same level 2 page */
952 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
953 pt3[i] |= PG_V | PG_RW | PG_U;
954
955 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
956 pt2[i] = i * (2 * 1024 * 1024);
957 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
958 }
959
960 /* save the current value of the warm-start vector */
961 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
962 outb(CMOS_REG, BIOS_RESET);
963 mpbiosreason = inb(CMOS_DATA);
964
965 /* setup a vector to our boot code */
966 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
967 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
968 outb(CMOS_REG, BIOS_RESET);
969 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
970
971 /* start each AP */
972 for (cpu = 1; cpu < mp_ncpus; cpu++) {
973 apic_id = cpu_apic_ids[cpu];
974
975 /* allocate and set up an idle stack data page */
976 bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
977 KSTACK_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO);
978 doublefault_stack = (char *)kmem_malloc(kernel_arena,
979 PAGE_SIZE, M_WAITOK | M_ZERO);
980 nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
981 M_WAITOK | M_ZERO);
982 dbg_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
983 M_WAITOK | M_ZERO);
984 dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
985 M_WAITOK | M_ZERO);
986
987 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
988 bootAP = cpu;
989
990 /* attempt to start the Application Processor */
991 if (!start_ap(apic_id)) {
992 /* restore the warmstart vector */
993 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
994 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
995 }
996
997 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
998 }
999
1000 /* restore the warmstart vector */
1001 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
1002
1003 outb(CMOS_REG, BIOS_RESET);
1004 outb(CMOS_DATA, mpbiosreason);
1005
1006 /* number of APs actually started */
1007 return mp_naps;
1008 }
1009
1010
1011 /*
1012 * This function starts the AP (application processor) identified
1013 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
1014 * to accomplish this. This is necessary because of the nuances
1015 * of the different hardware we might encounter. It isn't pretty,
1016 * but it seems to work.
1017 */
1018 static int
1019 start_ap(int apic_id)
1020 {
1021 int vector, ms;
1022 int cpus;
1023
1024 /* calculate the vector */
1025 vector = (boot_address >> 12) & 0xff;
1026
1027 /* used as a watchpoint to signal AP startup */
1028 cpus = mp_naps;
1029
1030 ipi_startup(apic_id, vector);
1031
1032 /* Wait up to 5 seconds for it to start. */
1033 for (ms = 0; ms < 5000; ms++) {
1034 if (mp_naps > cpus)
1035 return 1; /* return SUCCESS */
1036 DELAY(1000);
1037 }
1038 return 0; /* return FAILURE */
1039 }
1040
1041 #ifdef COUNT_XINVLTLB_HITS
1042 u_int xhits_gbl[MAXCPU];
1043 u_int xhits_pg[MAXCPU];
1044 u_int xhits_rng[MAXCPU];
1045 static SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
1046 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
1047 sizeof(xhits_gbl), "IU", "");
1048 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
1049 sizeof(xhits_pg), "IU", "");
1050 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
1051 sizeof(xhits_rng), "IU", "");
1052
1053 u_int ipi_global;
1054 u_int ipi_page;
1055 u_int ipi_range;
1056 u_int ipi_range_size;
1057 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
1058 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
1059 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
1060 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW,
1061 &ipi_range_size, 0, "");
1062
1063 u_int ipi_masked_global;
1064 u_int ipi_masked_page;
1065 u_int ipi_masked_range;
1066 u_int ipi_masked_range_size;
1067 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW,
1068 &ipi_masked_global, 0, "");
1069 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW,
1070 &ipi_masked_page, 0, "");
1071 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW,
1072 &ipi_masked_range, 0, "");
1073 SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
1074 &ipi_masked_range_size, 0, "");
1075 #endif /* COUNT_XINVLTLB_HITS */
1076
1077 /*
1078 * Init and startup IPI.
1079 */
1080 void
1081 ipi_startup(int apic_id, int vector)
1082 {
1083
1084 /*
1085 * This attempts to follow the algorithm described in the
1086 * Intel Multiprocessor Specification v1.4 in section B.4.
1087 * For each IPI, we allow the local APIC ~20us to deliver the
1088 * IPI. If that times out, we panic.
1089 */
1090
1091 /*
1092 * first we do an INIT IPI: this INIT IPI might be run, resetting
1093 * and running the target CPU. OR this INIT IPI might be latched (P5
1094 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
1095 * ignored.
1096 */
1097 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
1098 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
1099 lapic_ipi_wait(100);
1100
1101 /* Explicitly deassert the INIT IPI. */
1102 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
1103 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT,
1104 apic_id);
1105
1106 DELAY(10000); /* wait ~10mS */
1107
1108 /*
1109 * next we do a STARTUP IPI: the previous INIT IPI might still be
1110 * latched, (P5 bug) this 1st STARTUP would then terminate
1111 * immediately, and the previously started INIT IPI would continue. OR
1112 * the previous INIT IPI has already run. and this STARTUP IPI will
1113 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
1114 * will run.
1115 */
1116 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1117 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1118 vector, apic_id);
1119 if (!lapic_ipi_wait(100))
1120 panic("Failed to deliver first STARTUP IPI to APIC %d",
1121 apic_id);
1122 DELAY(200); /* wait ~200uS */
1123
1124 /*
1125 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
1126 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
1127 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
1128 * recognized after hardware RESET or INIT IPI.
1129 */
1130 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1131 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1132 vector, apic_id);
1133 if (!lapic_ipi_wait(100))
1134 panic("Failed to deliver second STARTUP IPI to APIC %d",
1135 apic_id);
1136
1137 DELAY(200); /* wait ~200uS */
1138 }
1139
1140 /*
1141 * Send an IPI to specified CPU handling the bitmap logic.
1142 */
1143 static void
1144 ipi_send_cpu(int cpu, u_int ipi)
1145 {
1146 u_int bitmap, old_pending, new_pending;
1147
1148 KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
1149
1150 if (IPI_IS_BITMAPED(ipi)) {
1151 bitmap = 1 << ipi;
1152 ipi = IPI_BITMAP_VECTOR;
1153 do {
1154 old_pending = cpu_ipi_pending[cpu];
1155 new_pending = old_pending | bitmap;
1156 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
1157 old_pending, new_pending));
1158 if (old_pending)
1159 return;
1160 }
1161 cpu_ops.ipi_vectored(ipi, cpu_apic_ids[cpu]);
1162 }
1163
1164 /*
1165 * Flush the TLB on all other CPU's
1166 */
1167 static void
1168 smp_tlb_shootdown(u_int vector, pmap_t pmap, vm_offset_t addr1,
1169 vm_offset_t addr2)
1170 {
1171 u_int ncpu;
1172
1173 ncpu = mp_ncpus - 1; /* does not shootdown self */
1174 if (ncpu < 1)
1175 return; /* no other cpus */
1176 if (!(read_rflags() & PSL_I))
1177 panic("%s: interrupts disabled", __func__);
1178 mtx_lock_spin(&smp_ipi_mtx);
1179 smp_tlb_invpcid.addr = addr1;
1180 if (pmap == NULL) {
1181 smp_tlb_invpcid.pcid = 0;
1182 } else {
1183 smp_tlb_invpcid.pcid = pmap->pm_pcid;
1184 pcid_cr3 = pmap->pm_cr3;
1185 }
1186 smp_tlb_addr2 = addr2;
1187 smp_tlb_pmap = pmap;
1188 atomic_store_rel_int(&smp_tlb_wait, 0);
1189 ipi_all_but_self(vector);
1190 while (smp_tlb_wait < ncpu)
1191 ia32_pause();
1192 mtx_unlock_spin(&smp_ipi_mtx);
1193 }
1194
1195 static void
1196 smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
1197 vm_offset_t addr1, vm_offset_t addr2)
1198 {
1199 int cpu, ncpu, othercpus;
1200
1201 othercpus = mp_ncpus - 1;
1202 if (CPU_ISFULLSET(&mask)) {
1203 if (othercpus < 1)
1204 return;
1205 } else {
1206 CPU_CLR(PCPU_GET(cpuid), &mask);
1207 if (CPU_EMPTY(&mask))
1208 return;
1209 }
1210 if (!(read_rflags() & PSL_I))
1211 panic("%s: interrupts disabled", __func__);
1212 mtx_lock_spin(&smp_ipi_mtx);
1213 smp_tlb_invpcid.addr = addr1;
1214 if (pmap == NULL) {
1215 smp_tlb_invpcid.pcid = 0;
1216 } else {
1217 smp_tlb_invpcid.pcid = pmap->pm_pcid;
1218 pcid_cr3 = pmap->pm_cr3;
1219 }
1220 smp_tlb_addr2 = addr2;
1221 smp_tlb_pmap = pmap;
1222 atomic_store_rel_int(&smp_tlb_wait, 0);
1223 if (CPU_ISFULLSET(&mask)) {
1224 ncpu = othercpus;
1225 ipi_all_but_self(vector);
1226 } else {
1227 ncpu = 0;
1228 while ((cpu = CPU_FFS(&mask)) != 0) {
1229 cpu--;
1230 CPU_CLR(cpu, &mask);
1231 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__,
1232 cpu, vector);
1233 ipi_send_cpu(cpu, vector);
1234 ncpu++;
1235 }
1236 }
1237 while (smp_tlb_wait < ncpu)
1238 ia32_pause();
1239 mtx_unlock_spin(&smp_ipi_mtx);
1240 }
1241
1242 void
1243 smp_cache_flush(void)
1244 {
1245
1246 if (smp_started)
1247 smp_tlb_shootdown(IPI_INVLCACHE, NULL, 0, 0);
1248 }
1249
1250 void
1251 smp_invltlb(pmap_t pmap)
1252 {
1253
1254 if (smp_started) {
1255 smp_tlb_shootdown(IPI_INVLTLB, pmap, 0, 0);
1256 #ifdef COUNT_XINVLTLB_HITS
1257 ipi_global++;
1258 #endif
1259 }
1260 }
1261
1262 void
1263 smp_invlpg(pmap_t pmap, vm_offset_t addr)
1264 {
1265
1266 if (smp_started) {
1267 smp_tlb_shootdown(IPI_INVLPG, pmap, addr, 0);
1268 #ifdef COUNT_XINVLTLB_HITS
1269 ipi_page++;
1270 #endif
1271 }
1272 }
1273
1274 void
1275 smp_invlpg_range(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2)
1276 {
1277
1278 if (smp_started) {
1279 smp_tlb_shootdown(IPI_INVLRNG, pmap, addr1, addr2);
1280 #ifdef COUNT_XINVLTLB_HITS
1281 ipi_range++;
1282 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
1283 #endif
1284 }
1285 }
1286
1287 void
1288 smp_masked_invltlb(cpuset_t mask, pmap_t pmap)
1289 {
1290
1291 if (smp_started) {
1292 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, pmap, 0, 0);
1293 #ifdef COUNT_XINVLTLB_HITS
1294 ipi_masked_global++;
1295 #endif
1296 }
1297 }
1298
1299 void
1300 smp_masked_invlpg(cpuset_t mask, pmap_t pmap, vm_offset_t addr)
1301 {
1302
1303 if (smp_started) {
1304 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, pmap, addr, 0);
1305 #ifdef COUNT_XINVLTLB_HITS
1306 ipi_masked_page++;
1307 #endif
1308 }
1309 }
1310
1311 void
1312 smp_masked_invlpg_range(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
1313 vm_offset_t addr2)
1314 {
1315
1316 if (smp_started) {
1317 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, pmap, addr1,
1318 addr2);
1319 #ifdef COUNT_XINVLTLB_HITS
1320 ipi_masked_range++;
1321 ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE;
1322 #endif
1323 }
1324 }
1325
1326 void
1327 ipi_bitmap_handler(struct trapframe frame)
1328 {
1329 struct trapframe *oldframe;
1330 struct thread *td;
1331 int cpu = PCPU_GET(cpuid);
1332 u_int ipi_bitmap;
1333
1334 critical_enter();
1335 td = curthread;
1336 td->td_intr_nesting_level++;
1337 oldframe = td->td_intr_frame;
1338 td->td_intr_frame = &frame;
1339 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1340 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
1341 #ifdef COUNT_IPIS
1342 (*ipi_preempt_counts[cpu])++;
1343 #endif
1344 sched_preempt(td);
1345 }
1346 if (ipi_bitmap & (1 << IPI_AST)) {
1347 #ifdef COUNT_IPIS
1348 (*ipi_ast_counts[cpu])++;
1349 #endif
1350 /* Nothing to do for AST */
1351 }
1352 if (ipi_bitmap & (1 << IPI_HARDCLOCK)) {
1353 #ifdef COUNT_IPIS
1354 (*ipi_hardclock_counts[cpu])++;
1355 #endif
1356 hardclockintr();
1357 }
1358 td->td_intr_frame = oldframe;
1359 td->td_intr_nesting_level--;
1360 critical_exit();
1361 }
1362
1363 /*
1364 * send an IPI to a set of cpus.
1365 */
1366 void
1367 ipi_selected(cpuset_t cpus, u_int ipi)
1368 {
1369 int cpu;
1370
1371 /*
1372 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1373 * of help in order to understand what is the source.
1374 * Set the mask of receiving CPUs for this purpose.
1375 */
1376 if (ipi == IPI_STOP_HARD)
1377 CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus);
1378
1379 while ((cpu = CPU_FFS(&cpus)) != 0) {
1380 cpu--;
1381 CPU_CLR(cpu, &cpus);
1382 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1383 ipi_send_cpu(cpu, ipi);
1384 }
1385 }
1386
1387 /*
1388 * send an IPI to a specific CPU.
1389 */
1390 void
1391 ipi_cpu(int cpu, u_int ipi)
1392 {
1393
1394 /*
1395 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1396 * of help in order to understand what is the source.
1397 * Set the mask of receiving CPUs for this purpose.
1398 */
1399 if (ipi == IPI_STOP_HARD)
1400 CPU_SET_ATOMIC(cpu, &ipi_nmi_pending);
1401
1402 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1403 ipi_send_cpu(cpu, ipi);
1404 }
1405
1406 /*
1407 * send an IPI to all CPUs EXCEPT myself
1408 */
1409 void
1410 ipi_all_but_self(u_int ipi)
1411 {
1412 cpuset_t other_cpus;
1413
1414 other_cpus = all_cpus;
1415 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
1416
1417 if (IPI_IS_BITMAPED(ipi)) {
1418 ipi_selected(other_cpus, ipi);
1419 return;
1420 }
1421
1422 /*
1423 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1424 * of help in order to understand what is the source.
1425 * Set the mask of receiving CPUs for this purpose.
1426 */
1427 if (ipi == IPI_STOP_HARD)
1428 CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);
1429
1430 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1431 cpu_ops.ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1432 }
1433
1434 int
1435 ipi_nmi_handler()
1436 {
1437 u_int cpuid;
1438
1439 /*
1440 * As long as there is not a simple way to know about a NMI's
1441 * source, if the bitmask for the current CPU is present in
1442 * the global pending bitword an IPI_STOP_HARD has been issued
1443 * and should be handled.
1444 */
1445 cpuid = PCPU_GET(cpuid);
1446 if (!CPU_ISSET(cpuid, &ipi_nmi_pending))
1447 return (1);
1448
1449 CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending);
1450 cpustop_handler();
1451 return (0);
1452 }
1453
1454 /*
1455 * Handle an IPI_STOP by saving our current context and spinning until we
1456 * are resumed.
1457 */
1458 void
1459 cpustop_handler(void)
1460 {
1461 u_int cpu;
1462
1463 cpu = PCPU_GET(cpuid);
1464
1465 savectx(&stoppcbs[cpu]);
1466
1467 /* Indicate that we are stopped */
1468 CPU_SET_ATOMIC(cpu, &stopped_cpus);
1469
1470 /* Wait for restart */
1471 while (!CPU_ISSET(cpu, &started_cpus))
1472 ia32_pause();
1473
1474 CPU_CLR_ATOMIC(cpu, &started_cpus);
1475 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
1476
1477 #ifdef DDB
1478 amd64_db_resume_dbreg();
1479 #endif
1480
1481 if (cpu == 0 && cpustop_restartfunc != NULL) {
1482 cpustop_restartfunc();
1483 cpustop_restartfunc = NULL;
1484 }
1485 }
1486
1487 /*
1488 * Handle an IPI_SUSPEND by saving our current context and spinning until we
1489 * are resumed.
1490 */
1491 void
1492 cpususpend_handler(void)
1493 {
1494 u_int cpu;
1495
1496 mtx_assert(&smp_ipi_mtx, MA_NOTOWNED);
1497
1498 cpu = PCPU_GET(cpuid);
1499 if (savectx(&susppcbs[cpu]->sp_pcb)) {
1500 fpususpend(susppcbs[cpu]->sp_fpususpend);
1501 wbinvd();
1502 CPU_SET_ATOMIC(cpu, &suspended_cpus);
1503 /*
1504 * Hack for xen, which does not use resumectx() so never
1505 * uses the next clause: set resuming_cpus early so that
1506 * resume_cpus() can wait on the same bitmap for acpi and
1507 * xen. resuming_cpus now means eventually_resumable_cpus.
1508 */
1509 CPU_SET_ATOMIC(cpu, &resuming_cpus);
1510 } else {
1511 fpuresume(susppcbs[cpu]->sp_fpususpend);
1512 pmap_init_pat();
1513 initializecpu();
1514 PCPU_SET(switchtime, 0);
1515 PCPU_SET(switchticks, ticks);
1516
1517 /* Indicate that we are resuming */
1518 CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1519 }
1520
1521 /* Wait for resume directive */
1522 while (!CPU_ISSET(cpu, &toresume_cpus))
1523 ia32_pause();
1524
1525 if (cpu_ops.cpu_resume)
1526 cpu_ops.cpu_resume();
1527 if (vmm_resume_p)
1528 vmm_resume_p();
1529
1530 /* Resume MCA and local APIC */
1531 mca_resume();
1532 lapic_setup(0);
1533
1534 /* Indicate that we are resumed */
1535 CPU_CLR_ATOMIC(cpu, &resuming_cpus);
1536 CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1537 CPU_CLR_ATOMIC(cpu, &toresume_cpus);
1538 }
1539
1540 /*
1541 * Handlers for TLB related IPIs
1542 */
1543 void
1544 invltlb_handler(void)
1545 {
1546 #ifdef COUNT_XINVLTLB_HITS
1547 xhits_gbl[PCPU_GET(cpuid)]++;
1548 #endif /* COUNT_XINVLTLB_HITS */
1549 #ifdef COUNT_IPIS
1550 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
1551 #endif /* COUNT_IPIS */
1552
1553 invltlb();
1554 atomic_add_int(&smp_tlb_wait, 1);
1555 }
1556
1557 void
1558 invltlb_pcid_handler(void)
1559 {
1560 uint64_t cr3;
1561 u_int cpuid;
1562 #ifdef COUNT_XINVLTLB_HITS
1563 xhits_gbl[PCPU_GET(cpuid)]++;
1564 #endif /* COUNT_XINVLTLB_HITS */
1565 #ifdef COUNT_IPIS
1566 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
1567 #endif /* COUNT_IPIS */
1568
1569 if (smp_tlb_invpcid.pcid != (uint64_t)-1 &&
1570 smp_tlb_invpcid.pcid != 0) {
1571 if (invpcid_works) {
1572 invpcid(&smp_tlb_invpcid, INVPCID_CTX);
1573 } else {
1574 /* Otherwise reload %cr3 twice. */
1575 cr3 = rcr3();
1576 if (cr3 != pcid_cr3) {
1577 load_cr3(pcid_cr3);
1578 cr3 |= CR3_PCID_SAVE;
1579 }
1580 load_cr3(cr3);
1581 }
1582 } else {
1583 invltlb_globpcid();
1584 }
1585 if (smp_tlb_pmap != NULL) {
1586 cpuid = PCPU_GET(cpuid);
1587 if (!CPU_ISSET(cpuid, &smp_tlb_pmap->pm_active))
1588 CPU_CLR_ATOMIC(cpuid, &smp_tlb_pmap->pm_save);
1589 }
1590
1591 atomic_add_int(&smp_tlb_wait, 1);
1592 }
1593
1594 void
1595 invlpg_handler(void)
1596 {
1597 #ifdef COUNT_XINVLTLB_HITS
1598 xhits_pg[PCPU_GET(cpuid)]++;
1599 #endif /* COUNT_XINVLTLB_HITS */
1600 #ifdef COUNT_IPIS
1601 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
1602 #endif /* COUNT_IPIS */
1603
1604 invlpg(smp_tlb_invpcid.addr);
1605 atomic_add_int(&smp_tlb_wait, 1);
1606 }
1607
1608 void
1609 invlpg_pcid_handler(void)
1610 {
1611 uint64_t cr3;
1612 #ifdef COUNT_XINVLTLB_HITS
1613 xhits_pg[PCPU_GET(cpuid)]++;
1614 #endif /* COUNT_XINVLTLB_HITS */
1615 #ifdef COUNT_IPIS
1616 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
1617 #endif /* COUNT_IPIS */
1618
1619 if (smp_tlb_invpcid.pcid == (uint64_t)-1) {
1620 invltlb_globpcid();
1621 } else if (smp_tlb_invpcid.pcid == 0) {
1622 invlpg(smp_tlb_invpcid.addr);
1623 } else if (invpcid_works) {
1624 invpcid(&smp_tlb_invpcid, INVPCID_ADDR);
1625 } else {
1626 /*
1627 * PCID supported, but INVPCID is not.
1628 * Temporarily switch to the target address
1629 * space and do INVLPG.
1630 */
1631 cr3 = rcr3();
1632 if (cr3 != pcid_cr3)
1633 load_cr3(pcid_cr3 | CR3_PCID_SAVE);
1634 invlpg(smp_tlb_invpcid.addr);
1635 load_cr3(cr3 | CR3_PCID_SAVE);
1636 }
1637
1638 atomic_add_int(&smp_tlb_wait, 1);
1639 }
1640
1641 static inline void
1642 invlpg_range(vm_offset_t start, vm_offset_t end)
1643 {
1644
1645 do {
1646 invlpg(start);
1647 start += PAGE_SIZE;
1648 } while (start < end);
1649 }
1650
1651 void
1652 invlrng_handler(void)
1653 {
1654 struct invpcid_descr d;
1655 vm_offset_t addr;
1656 uint64_t cr3;
1657 u_int cpuid;
1658 #ifdef COUNT_XINVLTLB_HITS
1659 xhits_rng[PCPU_GET(cpuid)]++;
1660 #endif /* COUNT_XINVLTLB_HITS */
1661 #ifdef COUNT_IPIS
1662 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
1663 #endif /* COUNT_IPIS */
1664
1665 addr = smp_tlb_invpcid.addr;
1666 if (pmap_pcid_enabled) {
1667 if (smp_tlb_invpcid.pcid == 0) {
1668 /*
1669 * kernel pmap - use invlpg to invalidate
1670 * global mapping.
1671 */
1672 invlpg_range(addr, smp_tlb_addr2);
1673 } else if (smp_tlb_invpcid.pcid == (uint64_t)-1) {
1674 invltlb_globpcid();
1675 if (smp_tlb_pmap != NULL) {
1676 cpuid = PCPU_GET(cpuid);
1677 if (!CPU_ISSET(cpuid, &smp_tlb_pmap->pm_active))
1678 CPU_CLR_ATOMIC(cpuid,
1679 &smp_tlb_pmap->pm_save);
1680 }
1681 } else if (invpcid_works) {
1682 d = smp_tlb_invpcid;
1683 do {
1684 invpcid(&d, INVPCID_ADDR);
1685 d.addr += PAGE_SIZE;
1686 } while (d.addr <= smp_tlb_addr2);
1687 } else {
1688 cr3 = rcr3();
1689 if (cr3 != pcid_cr3)
1690 load_cr3(pcid_cr3 | CR3_PCID_SAVE);
1691 invlpg_range(addr, smp_tlb_addr2);
1692 load_cr3(cr3 | CR3_PCID_SAVE);
1693 }
1694 } else {
1695 invlpg_range(addr, smp_tlb_addr2);
1696 }
1697
1698 atomic_add_int(&smp_tlb_wait, 1);
1699 }
1700
1701 void
1702 invlcache_handler(void)
1703 {
1704 #ifdef COUNT_IPIS
1705 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1706 #endif /* COUNT_IPIS */
1707
1708 wbinvd();
1709 atomic_add_int(&smp_tlb_wait, 1);
1710 }
1711
1712 /*
1713 * This is called once the rest of the system is up and running and we're
1714 * ready to let the AP's out of the pen.
1715 */
1716 static void
1717 release_aps(void *dummy __unused)
1718 {
1719
1720 if (mp_ncpus == 1)
1721 return;
1722 atomic_store_rel_int(&aps_ready, 1);
1723 while (smp_started == 0)
1724 ia32_pause();
1725 }
1726 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1727
1728 #ifdef COUNT_IPIS
1729 /*
1730 * Setup interrupt counters for IPI handlers.
1731 */
1732 static void
1733 mp_ipi_intrcnt(void *dummy)
1734 {
1735 char buf[64];
1736 int i;
1737
1738 CPU_FOREACH(i) {
1739 snprintf(buf, sizeof(buf), "cpu%d:invltlb", i);
1740 intrcnt_add(buf, &ipi_invltlb_counts[i]);
1741 snprintf(buf, sizeof(buf), "cpu%d:invlrng", i);
1742 intrcnt_add(buf, &ipi_invlrng_counts[i]);
1743 snprintf(buf, sizeof(buf), "cpu%d:invlpg", i);
1744 intrcnt_add(buf, &ipi_invlpg_counts[i]);
1745 snprintf(buf, sizeof(buf), "cpu%d:invlcache", i);
1746 intrcnt_add(buf, &ipi_invlcache_counts[i]);
1747 snprintf(buf, sizeof(buf), "cpu%d:preempt", i);
1748 intrcnt_add(buf, &ipi_preempt_counts[i]);
1749 snprintf(buf, sizeof(buf), "cpu%d:ast", i);
1750 intrcnt_add(buf, &ipi_ast_counts[i]);
1751 snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i);
1752 intrcnt_add(buf, &ipi_rendezvous_counts[i]);
1753 snprintf(buf, sizeof(buf), "cpu%d:hardclock", i);
1754 intrcnt_add(buf, &ipi_hardclock_counts[i]);
1755 }
1756 }
1757 SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL);
1758 #endif
1759
Cache object: 10f365571ff5c80d053dccc2dcf49ea3
|