1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1996, by Steve Passe
5 * Copyright (c) 2003, by Peter Wemm
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_acpi.h"
33 #include "opt_cpu.h"
34 #include "opt_ddb.h"
35 #include "opt_kstack_pages.h"
36 #include "opt_sched.h"
37 #include "opt_smp.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpuset.h>
43 #include <sys/domainset.h>
44 #include <sys/kdb.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/memrange.h>
50 #include <sys/mutex.h>
51 #include <sys/pcpu.h>
52 #include <sys/proc.h>
53 #include <sys/sched.h>
54 #include <sys/smp.h>
55 #include <sys/sysctl.h>
56
57 #include <vm/vm.h>
58 #include <vm/vm_param.h>
59 #include <vm/pmap.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_extern.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_phys.h>
64
65 #include <x86/apicreg.h>
66 #include <machine/clock.h>
67 #include <machine/cputypes.h>
68 #include <machine/cpufunc.h>
69 #include <x86/mca.h>
70 #include <machine/md_var.h>
71 #include <machine/pcb.h>
72 #include <machine/psl.h>
73 #include <machine/smp.h>
74 #include <machine/specialreg.h>
75 #include <machine/tss.h>
76 #include <x86/ucode.h>
77 #include <machine/cpu.h>
78 #include <x86/init.h>
79
80 #ifdef DEV_ACPI
81 #include <contrib/dev/acpica/include/acpi.h>
82 #include <dev/acpica/acpivar.h>
83 #endif
84
85 #define WARMBOOT_TARGET 0
86 #define WARMBOOT_OFF (KERNBASE + 0x0467)
87 #define WARMBOOT_SEG (KERNBASE + 0x0469)
88
89 #define CMOS_REG (0x70)
90 #define CMOS_DATA (0x71)
91 #define BIOS_RESET (0x0f)
92 #define BIOS_WARM (0x0a)
93
94 #define GiB(v) (v ## ULL << 30)
95
96 #define AP_BOOTPT_SZ (PAGE_SIZE * 4)
97
98 /* Temporary variables for init_secondary() */
99 static char *doublefault_stack;
100 static char *mce_stack;
101 static char *nmi_stack;
102 static char *dbg_stack;
103 void *bootpcpu;
104
105 extern u_int mptramp_la57;
106 extern u_int mptramp_nx;
107
108 /*
109 * Local data and functions.
110 */
111
112 static int start_ap(int apic_id, vm_paddr_t boot_address);
113
114 /*
115 * Initialize the IPI handlers and start up the AP's.
116 */
117 void
118 cpu_mp_start(void)
119 {
120 int i;
121
122 /* Initialize the logical ID to APIC ID table. */
123 for (i = 0; i < MAXCPU; i++) {
124 cpu_apic_ids[i] = -1;
125 }
126
127 /* Install an inter-CPU IPI for cache and TLB invalidations. */
128 setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
129 SDT_SYSIGT, SEL_KPL, 0);
130
131 /* Install an inter-CPU IPI for all-CPU rendezvous */
132 setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
133 IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
134
135 /* Install generic inter-CPU IPI handler */
136 setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
137 IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
138
139 /* Install an inter-CPU IPI for CPU stop/restart */
140 setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
141 SDT_SYSIGT, SEL_KPL, 0);
142
143 /* Install an inter-CPU IPI for CPU suspend/resume */
144 setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
145 SDT_SYSIGT, SEL_KPL, 0);
146
147 /* Install an IPI for calling delayed SWI */
148 setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
149 SDT_SYSIGT, SEL_KPL, 0);
150
151 /* Set boot_cpu_id if needed. */
152 if (boot_cpu_id == -1) {
153 boot_cpu_id = PCPU_GET(apic_id);
154 cpu_info[boot_cpu_id].cpu_bsp = 1;
155 } else
156 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
157 ("BSP's APIC ID doesn't match boot_cpu_id"));
158
159 /* Probe logical/physical core configuration. */
160 topo_probe();
161
162 assign_cpu_ids();
163
164 mptramp_la57 = la57;
165 mptramp_nx = pg_nx != 0;
166 MPASS(kernel_pmap->pm_cr3 < (1UL << 32));
167 mptramp_pagetables = kernel_pmap->pm_cr3;
168
169 /* Start each Application Processor */
170 start_all_aps();
171
172 set_interrupt_apic_ids();
173
174 #if defined(DEV_ACPI) && MAXMEMDOM > 1
175 acpi_pxm_set_cpu_locality();
176 #endif
177 }
178
179 /*
180 * AP CPU's call this to initialize themselves.
181 */
182 void
183 init_secondary(void)
184 {
185 struct pcpu *pc;
186 struct nmi_pcpu *np;
187 struct user_segment_descriptor *gdt;
188 struct region_descriptor ap_gdt;
189 u_int64_t cr0;
190 int cpu, gsel_tss, x;
191
192 /* Set by the startup code for us to use */
193 cpu = bootAP;
194
195 /* Update microcode before doing anything else. */
196 ucode_load_ap(cpu);
197
198 /* Initialize the PCPU area. */
199 pc = bootpcpu;
200 pcpu_init(pc, cpu, sizeof(struct pcpu));
201 dpcpu_init(dpcpu, cpu);
202 pc->pc_apic_id = cpu_apic_ids[cpu];
203 pc->pc_prvspace = pc;
204 pc->pc_curthread = 0;
205 pc->pc_tssp = &pc->pc_common_tss;
206 pc->pc_rsp0 = 0;
207 pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
208 PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
209 gdt = pc->pc_gdt;
210 pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
211 pc->pc_fs32p = &gdt[GUFS32_SEL];
212 pc->pc_gs32p = &gdt[GUGS32_SEL];
213 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
214 pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
215 /* See comment in pmap_bootstrap(). */
216 pc->pc_pcid_next = PMAP_PCID_KERN + 2;
217 pc->pc_pcid_gen = 1;
218
219 pc->pc_smp_tlb_gen = 1;
220
221 /* Init tss */
222 pc->pc_common_tss = __pcpu[0].pc_common_tss;
223 pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
224 IOPERM_BITMAP_SIZE;
225 pc->pc_common_tss.tss_rsp0 = 0;
226
227 /* The doublefault stack runs on IST1. */
228 np = ((struct nmi_pcpu *)&doublefault_stack[DBLFAULT_STACK_SIZE]) - 1;
229 np->np_pcpu = (register_t)pc;
230 pc->pc_common_tss.tss_ist1 = (long)np;
231
232 /* The NMI stack runs on IST2. */
233 np = ((struct nmi_pcpu *)&nmi_stack[NMI_STACK_SIZE]) - 1;
234 np->np_pcpu = (register_t)pc;
235 pc->pc_common_tss.tss_ist2 = (long)np;
236
237 /* The MC# stack runs on IST3. */
238 np = ((struct nmi_pcpu *)&mce_stack[MCE_STACK_SIZE]) - 1;
239 np->np_pcpu = (register_t)pc;
240 pc->pc_common_tss.tss_ist3 = (long)np;
241
242 /* The DB# stack runs on IST4. */
243 np = ((struct nmi_pcpu *)&dbg_stack[DBG_STACK_SIZE]) - 1;
244 np->np_pcpu = (register_t)pc;
245 pc->pc_common_tss.tss_ist4 = (long)np;
246
247 /* Prepare private GDT */
248 gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
249 for (x = 0; x < NGDT; x++) {
250 if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
251 x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
252 ssdtosd(&gdt_segs[x], &gdt[x]);
253 }
254 ssdtosyssd(&gdt_segs[GPROC0_SEL],
255 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
256 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
257 ap_gdt.rd_base = (u_long)gdt;
258 lgdt(&ap_gdt); /* does magic intra-segment return */
259
260 wrmsr(MSR_FSBASE, 0); /* User value */
261 wrmsr(MSR_GSBASE, (uint64_t)pc);
262 wrmsr(MSR_KGSBASE, 0); /* User value */
263 fix_cpuid();
264
265 lidt(&r_idt);
266
267 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
268 ltr(gsel_tss);
269
270 /*
271 * Set to a known state:
272 * Set by mpboot.s: CR0_PG, CR0_PE
273 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
274 */
275 cr0 = rcr0();
276 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
277 load_cr0(cr0);
278
279 amd64_conf_fast_syscall();
280
281 /* signal our startup to the BSP. */
282 mp_naps++;
283
284 /* Spin until the BSP releases the AP's. */
285 while (atomic_load_acq_int(&aps_ready) == 0)
286 ia32_pause();
287
288 init_secondary_tail();
289 }
290
291 /*******************************************************************
292 * local functions and data
293 */
294
295 #ifdef NUMA
296 static void
297 mp_realloc_pcpu(int cpuid, int domain)
298 {
299 vm_page_t m;
300 vm_offset_t oa, na;
301
302 oa = (vm_offset_t)&__pcpu[cpuid];
303 if (vm_phys_domain(pmap_kextract(oa)) == domain)
304 return;
305 m = vm_page_alloc_noobj_domain(domain, 0);
306 if (m == NULL)
307 return;
308 na = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
309 pagecopy((void *)oa, (void *)na);
310 pmap_qenter((vm_offset_t)&__pcpu[cpuid], &m, 1);
311 /* XXX old pcpu page leaked. */
312 }
313 #endif
314
315 /*
316 * start each AP in our list
317 */
318 int
319 start_all_aps(void)
320 {
321 vm_page_t m_boottramp, m_pml4, m_pdp, m_pd[4];
322 pml5_entry_t old_pml45;
323 pml4_entry_t *v_pml4;
324 pdp_entry_t *v_pdp;
325 pd_entry_t *v_pd;
326 vm_paddr_t boot_address;
327 u_int32_t mpbioswarmvec;
328 int apic_id, cpu, domain, i;
329 u_char mpbiosreason;
330
331 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
332
333 MPASS(bootMP_size <= PAGE_SIZE);
334 m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0,
335 (1ULL << 20), /* Trampoline should be below 1M for real mode */
336 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
337 boot_address = VM_PAGE_TO_PHYS(m_boottramp);
338
339 /* Create a transient 1:1 mapping of low 4G */
340 if (la57) {
341 m_pml4 = pmap_page_alloc_below_4g(true);
342 v_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
343 } else {
344 v_pml4 = &kernel_pmap->pm_pmltop[0];
345 }
346 m_pdp = pmap_page_alloc_below_4g(true);
347 v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
348 m_pd[0] = pmap_page_alloc_below_4g(false);
349 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[0]));
350 for (i = 0; i < NPDEPG; i++)
351 v_pd[i] = (i << PDRSHIFT) | X86_PG_V | X86_PG_RW | X86_PG_A |
352 X86_PG_M | PG_PS;
353 m_pd[1] = pmap_page_alloc_below_4g(false);
354 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[1]));
355 for (i = 0; i < NPDEPG; i++)
356 v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW |
357 X86_PG_A | X86_PG_M | PG_PS;
358 m_pd[2] = pmap_page_alloc_below_4g(false);
359 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[2]));
360 for (i = 0; i < NPDEPG; i++)
361 v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
362 X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
363 m_pd[3] = pmap_page_alloc_below_4g(false);
364 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[3]));
365 for (i = 0; i < NPDEPG; i++)
366 v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
367 X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
368 v_pdp[0] = VM_PAGE_TO_PHYS(m_pd[0]) | X86_PG_V |
369 X86_PG_RW | X86_PG_A | X86_PG_M;
370 v_pdp[1] = VM_PAGE_TO_PHYS(m_pd[1]) | X86_PG_V |
371 X86_PG_RW | X86_PG_A | X86_PG_M;
372 v_pdp[2] = VM_PAGE_TO_PHYS(m_pd[2]) | X86_PG_V |
373 X86_PG_RW | X86_PG_A | X86_PG_M;
374 v_pdp[3] = VM_PAGE_TO_PHYS(m_pd[3]) | X86_PG_V |
375 X86_PG_RW | X86_PG_A | X86_PG_M;
376 old_pml45 = kernel_pmap->pm_pmltop[0];
377 if (la57) {
378 kernel_pmap->pm_pmltop[0] = VM_PAGE_TO_PHYS(m_pml4) |
379 X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
380 }
381 v_pml4[0] = VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V |
382 X86_PG_RW | X86_PG_A | X86_PG_M;
383 pmap_invalidate_all(kernel_pmap);
384
385 /* copy the AP 1st level boot code */
386 bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
387 if (bootverbose)
388 printf("AP boot address %#lx\n", boot_address);
389
390 /* save the current value of the warm-start vector */
391 if (!efi_boot)
392 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
393 outb(CMOS_REG, BIOS_RESET);
394 mpbiosreason = inb(CMOS_DATA);
395
396 /* setup a vector to our boot code */
397 if (!efi_boot) {
398 *((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET;
399 *((volatile u_short *)WARMBOOT_SEG) = (boot_address >> 4);
400 }
401 outb(CMOS_REG, BIOS_RESET);
402 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
403
404 /* Relocate pcpu areas to the correct domain. */
405 #ifdef NUMA
406 if (vm_ndomains > 1)
407 for (cpu = 1; cpu < mp_ncpus; cpu++) {
408 apic_id = cpu_apic_ids[cpu];
409 domain = acpi_pxm_get_cpu_locality(apic_id);
410 mp_realloc_pcpu(cpu, domain);
411 }
412 #endif
413
414 /* start each AP */
415 domain = 0;
416 for (cpu = 1; cpu < mp_ncpus; cpu++) {
417 apic_id = cpu_apic_ids[cpu];
418 #ifdef NUMA
419 if (vm_ndomains > 1)
420 domain = acpi_pxm_get_cpu_locality(apic_id);
421 #endif
422 /* allocate and set up an idle stack data page */
423 bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
424 M_WAITOK | M_ZERO);
425 doublefault_stack = kmem_malloc(DBLFAULT_STACK_SIZE,
426 M_WAITOK | M_ZERO);
427 mce_stack = kmem_malloc(MCE_STACK_SIZE,
428 M_WAITOK | M_ZERO);
429 nmi_stack = kmem_malloc_domainset(
430 DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
431 dbg_stack = kmem_malloc_domainset(
432 DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
433 dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
434 DPCPU_SIZE, M_WAITOK | M_ZERO);
435
436 bootpcpu = &__pcpu[cpu];
437 bootSTK = (char *)bootstacks[cpu] +
438 kstack_pages * PAGE_SIZE - 8;
439 bootAP = cpu;
440
441 /* attempt to start the Application Processor */
442 if (!start_ap(apic_id, boot_address)) {
443 /* restore the warmstart vector */
444 if (!efi_boot)
445 *(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
446 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
447 }
448
449 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
450 }
451
452 /* restore the warmstart vector */
453 if (!efi_boot)
454 *(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
455
456 outb(CMOS_REG, BIOS_RESET);
457 outb(CMOS_DATA, mpbiosreason);
458
459 /* Destroy transient 1:1 mapping */
460 kernel_pmap->pm_pmltop[0] = old_pml45;
461 invlpg(0);
462 if (la57)
463 vm_page_free(m_pml4);
464 vm_page_free(m_pd[3]);
465 vm_page_free(m_pd[2]);
466 vm_page_free(m_pd[1]);
467 vm_page_free(m_pd[0]);
468 vm_page_free(m_pdp);
469 vm_page_free(m_boottramp);
470
471 /* number of APs actually started */
472 return (mp_naps);
473 }
474
475 /*
476 * This function starts the AP (application processor) identified
477 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
478 * to accomplish this. This is necessary because of the nuances
479 * of the different hardware we might encounter. It isn't pretty,
480 * but it seems to work.
481 */
482 static int
483 start_ap(int apic_id, vm_paddr_t boot_address)
484 {
485 int vector, ms;
486 int cpus;
487
488 /* calculate the vector */
489 vector = (boot_address >> 12) & 0xff;
490
491 /* used as a watchpoint to signal AP startup */
492 cpus = mp_naps;
493
494 ipi_startup(apic_id, vector);
495
496 /* Wait up to 5 seconds for it to start. */
497 for (ms = 0; ms < 5000; ms++) {
498 if (mp_naps > cpus)
499 return 1; /* return SUCCESS */
500 DELAY(1000);
501 }
502 return 0; /* return FAILURE */
503 }
504
505 /*
506 * Flush the TLB on other CPU's
507 */
508
509 /*
510 * Invalidation request. PCPU pc_smp_tlb_op uses u_int instead of the
511 * enum to avoid both namespace and ABI issues (with enums).
512 */
513 enum invl_op_codes {
514 INVL_OP_TLB = 1,
515 INVL_OP_TLB_INVPCID = 2,
516 INVL_OP_TLB_INVPCID_PTI = 3,
517 INVL_OP_TLB_PCID = 4,
518 INVL_OP_PGRNG = 5,
519 INVL_OP_PGRNG_INVPCID = 6,
520 INVL_OP_PGRNG_PCID = 7,
521 INVL_OP_PG = 8,
522 INVL_OP_PG_INVPCID = 9,
523 INVL_OP_PG_PCID = 10,
524 INVL_OP_CACHE = 11,
525 };
526
527 /*
528 * These variables are initialized at startup to reflect how each of
529 * the different kinds of invalidations should be performed on the
530 * current machine and environment.
531 */
532 static enum invl_op_codes invl_op_tlb;
533 static enum invl_op_codes invl_op_pgrng;
534 static enum invl_op_codes invl_op_pg;
535
536 /*
537 * Scoreboard of IPI completion notifications from target to IPI initiator.
538 *
539 * Each CPU can initiate shootdown IPI independently from other CPUs.
540 * Initiator enters critical section, then fills its local PCPU
541 * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
542 * at location (cpu, my_cpuid) for each target cpu. After that IPI is
543 * sent to all targets which scan for zeroed scoreboard generation
544 * words. Upon finding such word the shootdown data is read from
545 * corresponding cpu's pcpu, and generation is set. Meantime initiator
546 * loops waiting for all zeroed generations in scoreboard to update.
547 */
548 static uint32_t *invl_scoreboard;
549
550 static void
551 invl_scoreboard_init(void *arg __unused)
552 {
553 u_int i;
554
555 invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
556 (mp_maxid + 1), M_DEVBUF, M_WAITOK);
557 for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
558 invl_scoreboard[i] = 1;
559
560 if (pmap_pcid_enabled) {
561 if (invpcid_works) {
562 if (pti)
563 invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
564 else
565 invl_op_tlb = INVL_OP_TLB_INVPCID;
566 invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
567 invl_op_pg = INVL_OP_PG_INVPCID;
568 } else {
569 invl_op_tlb = INVL_OP_TLB_PCID;
570 invl_op_pgrng = INVL_OP_PGRNG_PCID;
571 invl_op_pg = INVL_OP_PG_PCID;
572 }
573 } else {
574 invl_op_tlb = INVL_OP_TLB;
575 invl_op_pgrng = INVL_OP_PGRNG;
576 invl_op_pg = INVL_OP_PG;
577 }
578 }
579 SYSINIT(invl_ops, SI_SUB_SMP - 1, SI_ORDER_ANY, invl_scoreboard_init, NULL);
580
581 static uint32_t *
582 invl_scoreboard_getcpu(u_int cpu)
583 {
584 return (invl_scoreboard + cpu * (mp_maxid + 1));
585 }
586
587 static uint32_t *
588 invl_scoreboard_slot(u_int cpu)
589 {
590 return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
591 }
592
593 /*
594 * Used by the pmap to request cache or TLB invalidation on local and
595 * remote processors. Mask provides the set of remote CPUs that are
596 * to be signalled with the invalidation IPI. As an optimization, the
597 * curcpu_cb callback is invoked on the calling CPU in a critical
598 * section while waiting for the remote CPUs to complete the operation.
599 *
600 * The callback function is called unconditionally on the caller's
601 * underlying processor, even when this processor is not set in the
602 * mask. So, the callback function must be prepared to handle such
603 * spurious invocations.
604 *
605 * Interrupts must be enabled when calling the function with smp
606 * started, to avoid deadlock with other IPIs that are protected with
607 * smp_ipi_mtx spinlock at the initiator side.
608 *
609 * Function must be called with the thread pinned, and it unpins on
610 * completion.
611 */
612 static void
613 smp_targeted_tlb_shootdown(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2,
614 smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
615 {
616 cpuset_t mask;
617 uint32_t generation, *p_cpudone;
618 int cpu;
619 bool is_all;
620
621 /*
622 * It is not necessary to signal other CPUs while booting or
623 * when in the debugger.
624 */
625 if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started))
626 goto local_cb;
627
628 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
629
630 /*
631 * Make a stable copy of the set of CPUs on which the pmap is active.
632 * See if we have to interrupt other CPUs.
633 */
634 CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask);
635 is_all = CPU_CMP(&mask, &all_cpus) == 0;
636 CPU_CLR(curcpu, &mask);
637 if (CPU_EMPTY(&mask))
638 goto local_cb;
639
640 /*
641 * Initiator must have interrupts enabled, which prevents
642 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
643 * from deadlocking with us. On the other hand, preemption
644 * must be disabled to pin initiator to the instance of the
645 * pcpu pc_smp_tlb data and scoreboard line.
646 */
647 KASSERT((read_rflags() & PSL_I) != 0,
648 ("smp_targeted_tlb_shootdown: interrupts disabled"));
649 critical_enter();
650
651 PCPU_SET(smp_tlb_addr1, addr1);
652 PCPU_SET(smp_tlb_addr2, addr2);
653 PCPU_SET(smp_tlb_pmap, pmap);
654 generation = PCPU_GET(smp_tlb_gen);
655 if (++generation == 0)
656 generation = 1;
657 PCPU_SET(smp_tlb_gen, generation);
658 PCPU_SET(smp_tlb_op, op);
659 /* Fence between filling smp_tlb fields and clearing scoreboard. */
660 atomic_thread_fence_rel();
661
662 CPU_FOREACH_ISSET(cpu, &mask) {
663 KASSERT(*invl_scoreboard_slot(cpu) != 0,
664 ("IPI scoreboard is zero, initiator %d target %d",
665 curcpu, cpu));
666 *invl_scoreboard_slot(cpu) = 0;
667 }
668
669 /*
670 * IPI acts as a fence between writing to the scoreboard above
671 * (zeroing slot) and reading from it below (wait for
672 * acknowledgment).
673 */
674 if (is_all) {
675 ipi_all_but_self(IPI_INVLOP);
676 } else {
677 ipi_selected(mask, IPI_INVLOP);
678 }
679 curcpu_cb(pmap, addr1, addr2);
680 CPU_FOREACH_ISSET(cpu, &mask) {
681 p_cpudone = invl_scoreboard_slot(cpu);
682 while (atomic_load_int(p_cpudone) != generation)
683 ia32_pause();
684 }
685
686 /*
687 * Unpin before leaving critical section. If the thread owes
688 * preemption, this allows scheduler to select thread on any
689 * CPU from its cpuset.
690 */
691 sched_unpin();
692 critical_exit();
693
694 return;
695
696 local_cb:
697 critical_enter();
698 curcpu_cb(pmap, addr1, addr2);
699 sched_unpin();
700 critical_exit();
701 }
702
703 void
704 smp_masked_invltlb(pmap_t pmap, smp_invl_cb_t curcpu_cb)
705 {
706 smp_targeted_tlb_shootdown(pmap, 0, 0, curcpu_cb, invl_op_tlb);
707 #ifdef COUNT_XINVLTLB_HITS
708 ipi_global++;
709 #endif
710 }
711
712 void
713 smp_masked_invlpg(vm_offset_t addr, pmap_t pmap, smp_invl_cb_t curcpu_cb)
714 {
715 smp_targeted_tlb_shootdown(pmap, addr, 0, curcpu_cb, invl_op_pg);
716 #ifdef COUNT_XINVLTLB_HITS
717 ipi_page++;
718 #endif
719 }
720
721 void
722 smp_masked_invlpg_range(vm_offset_t addr1, vm_offset_t addr2, pmap_t pmap,
723 smp_invl_cb_t curcpu_cb)
724 {
725 smp_targeted_tlb_shootdown(pmap, addr1, addr2, curcpu_cb,
726 invl_op_pgrng);
727 #ifdef COUNT_XINVLTLB_HITS
728 ipi_range++;
729 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
730 #endif
731 }
732
733 void
734 smp_cache_flush(smp_invl_cb_t curcpu_cb)
735 {
736 smp_targeted_tlb_shootdown(kernel_pmap, 0, 0, curcpu_cb, INVL_OP_CACHE);
737 }
738
739 /*
740 * Handlers for TLB related IPIs
741 */
742 static void
743 invltlb_handler(pmap_t smp_tlb_pmap)
744 {
745 #ifdef COUNT_XINVLTLB_HITS
746 xhits_gbl[PCPU_GET(cpuid)]++;
747 #endif /* COUNT_XINVLTLB_HITS */
748 #ifdef COUNT_IPIS
749 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
750 #endif /* COUNT_IPIS */
751
752 if (smp_tlb_pmap == kernel_pmap)
753 invltlb_glob();
754 else
755 invltlb();
756 }
757
758 static void
759 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
760 {
761 struct invpcid_descr d;
762
763 #ifdef COUNT_XINVLTLB_HITS
764 xhits_gbl[PCPU_GET(cpuid)]++;
765 #endif /* COUNT_XINVLTLB_HITS */
766 #ifdef COUNT_IPIS
767 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
768 #endif /* COUNT_IPIS */
769
770 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
771 d.pad = 0;
772 d.addr = 0;
773 invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
774 INVPCID_CTX);
775 }
776
777 static void
778 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
779 {
780 struct invpcid_descr d;
781
782 #ifdef COUNT_XINVLTLB_HITS
783 xhits_gbl[PCPU_GET(cpuid)]++;
784 #endif /* COUNT_XINVLTLB_HITS */
785 #ifdef COUNT_IPIS
786 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
787 #endif /* COUNT_IPIS */
788
789 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
790 d.pad = 0;
791 d.addr = 0;
792 if (smp_tlb_pmap == kernel_pmap) {
793 /*
794 * This invalidation actually needs to clear kernel
795 * mappings from the TLB in the current pmap, but
796 * since we were asked for the flush in the kernel
797 * pmap, achieve it by performing global flush.
798 */
799 invpcid(&d, INVPCID_CTXGLOB);
800 } else {
801 invpcid(&d, INVPCID_CTX);
802 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
803 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
804 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
805 }
806 }
807
808 static void
809 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
810 {
811 uint32_t pcid;
812
813 #ifdef COUNT_XINVLTLB_HITS
814 xhits_gbl[PCPU_GET(cpuid)]++;
815 #endif /* COUNT_XINVLTLB_HITS */
816 #ifdef COUNT_IPIS
817 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
818 #endif /* COUNT_IPIS */
819
820 if (smp_tlb_pmap == kernel_pmap) {
821 invltlb_glob();
822 } else {
823 /*
824 * The current pmap might not be equal to
825 * smp_tlb_pmap. The clearing of the pm_gen in
826 * pmap_invalidate_all() takes care of TLB
827 * invalidation when switching to the pmap on this
828 * CPU.
829 */
830 if (smp_tlb_pmap == PCPU_GET(curpmap)) {
831 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
832 load_cr3(smp_tlb_pmap->pm_cr3 | pcid);
833 if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
834 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
835 }
836 }
837 }
838
839 static void
840 invlpg_handler(vm_offset_t smp_tlb_addr1)
841 {
842 #ifdef COUNT_XINVLTLB_HITS
843 xhits_pg[PCPU_GET(cpuid)]++;
844 #endif /* COUNT_XINVLTLB_HITS */
845 #ifdef COUNT_IPIS
846 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
847 #endif /* COUNT_IPIS */
848
849 invlpg(smp_tlb_addr1);
850 }
851
852 static void
853 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
854 {
855 struct invpcid_descr d;
856
857 #ifdef COUNT_XINVLTLB_HITS
858 xhits_pg[PCPU_GET(cpuid)]++;
859 #endif /* COUNT_XINVLTLB_HITS */
860 #ifdef COUNT_IPIS
861 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
862 #endif /* COUNT_IPIS */
863
864 pmap_invlpg(smp_tlb_pmap, smp_tlb_addr1);
865 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
866 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
867 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
868 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
869 PMAP_PCID_USER_PT;
870 d.pad = 0;
871 d.addr = smp_tlb_addr1;
872 invpcid(&d, INVPCID_ADDR);
873 }
874 }
875
876 static void
877 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
878 {
879 uint64_t kcr3, ucr3;
880 uint32_t pcid;
881
882 #ifdef COUNT_XINVLTLB_HITS
883 xhits_pg[PCPU_GET(cpuid)]++;
884 #endif /* COUNT_XINVLTLB_HITS */
885 #ifdef COUNT_IPIS
886 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
887 #endif /* COUNT_IPIS */
888
889 invlpg(smp_tlb_addr1);
890 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
891 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
892 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
893 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
894 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
895 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
896 pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
897 }
898 }
899
900 static void
901 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
902 {
903 vm_offset_t addr;
904
905 #ifdef COUNT_XINVLTLB_HITS
906 xhits_rng[PCPU_GET(cpuid)]++;
907 #endif /* COUNT_XINVLTLB_HITS */
908 #ifdef COUNT_IPIS
909 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
910 #endif /* COUNT_IPIS */
911
912 addr = smp_tlb_addr1;
913 do {
914 invlpg(addr);
915 addr += PAGE_SIZE;
916 } while (addr < smp_tlb_addr2);
917 }
918
919 static void
920 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
921 vm_offset_t smp_tlb_addr2)
922 {
923 struct invpcid_descr d;
924 vm_offset_t addr;
925
926 #ifdef COUNT_XINVLTLB_HITS
927 xhits_rng[PCPU_GET(cpuid)]++;
928 #endif /* COUNT_XINVLTLB_HITS */
929 #ifdef COUNT_IPIS
930 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
931 #endif /* COUNT_IPIS */
932
933 addr = smp_tlb_addr1;
934 if (smp_tlb_pmap == kernel_pmap && PCPU_GET(pcid_invlpg_workaround)) {
935 struct invpcid_descr d = { 0 };
936
937 invpcid(&d, INVPCID_CTXGLOB);
938 } else {
939 do {
940 invlpg(addr);
941 addr += PAGE_SIZE;
942 } while (addr < smp_tlb_addr2);
943 }
944 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
945 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
946 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
947 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
948 PMAP_PCID_USER_PT;
949 d.pad = 0;
950 d.addr = smp_tlb_addr1;
951 do {
952 invpcid(&d, INVPCID_ADDR);
953 d.addr += PAGE_SIZE;
954 } while (d.addr < smp_tlb_addr2);
955 }
956 }
957
958 static void
959 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
960 vm_offset_t smp_tlb_addr2)
961 {
962 vm_offset_t addr;
963 uint64_t kcr3, ucr3;
964 uint32_t pcid;
965
966 #ifdef COUNT_XINVLTLB_HITS
967 xhits_rng[PCPU_GET(cpuid)]++;
968 #endif /* COUNT_XINVLTLB_HITS */
969 #ifdef COUNT_IPIS
970 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
971 #endif /* COUNT_IPIS */
972
973 addr = smp_tlb_addr1;
974 do {
975 invlpg(addr);
976 addr += PAGE_SIZE;
977 } while (addr < smp_tlb_addr2);
978 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
979 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
980 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
981 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
982 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
983 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
984 pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, smp_tlb_addr2);
985 }
986 }
987
988 static void
989 invlcache_handler(void)
990 {
991 #ifdef COUNT_IPIS
992 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
993 #endif /* COUNT_IPIS */
994 wbinvd();
995 }
996
997 static void
998 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
999 vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
1000 {
1001 switch (smp_tlb_op) {
1002 case INVL_OP_TLB:
1003 invltlb_handler(smp_tlb_pmap);
1004 break;
1005 case INVL_OP_TLB_INVPCID:
1006 invltlb_invpcid_handler(smp_tlb_pmap);
1007 break;
1008 case INVL_OP_TLB_INVPCID_PTI:
1009 invltlb_invpcid_pti_handler(smp_tlb_pmap);
1010 break;
1011 case INVL_OP_TLB_PCID:
1012 invltlb_pcid_handler(smp_tlb_pmap);
1013 break;
1014 case INVL_OP_PGRNG:
1015 invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1016 break;
1017 case INVL_OP_PGRNG_INVPCID:
1018 invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1019 smp_tlb_addr2);
1020 break;
1021 case INVL_OP_PGRNG_PCID:
1022 invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1023 smp_tlb_addr2);
1024 break;
1025 case INVL_OP_PG:
1026 invlpg_handler(smp_tlb_addr1);
1027 break;
1028 case INVL_OP_PG_INVPCID:
1029 invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1030 break;
1031 case INVL_OP_PG_PCID:
1032 invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1033 break;
1034 case INVL_OP_CACHE:
1035 invlcache_handler();
1036 break;
1037 default:
1038 __assert_unreachable();
1039 break;
1040 }
1041 }
1042
1043 void
1044 invlop_handler(void)
1045 {
1046 struct pcpu *initiator_pc;
1047 pmap_t smp_tlb_pmap;
1048 vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1049 u_int initiator_cpu_id;
1050 enum invl_op_codes smp_tlb_op;
1051 uint32_t *scoreboard, smp_tlb_gen;
1052
1053 scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1054 for (;;) {
1055 for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1056 initiator_cpu_id++) {
1057 if (atomic_load_int(&scoreboard[initiator_cpu_id]) == 0)
1058 break;
1059 }
1060 if (initiator_cpu_id > mp_maxid)
1061 break;
1062 initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1063
1064 /*
1065 * This acquire fence and its corresponding release
1066 * fence in smp_targeted_tlb_shootdown() is between
1067 * reading zero scoreboard slot and accessing PCPU of
1068 * initiator for pc_smp_tlb values.
1069 */
1070 atomic_thread_fence_acq();
1071 smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1072 smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1073 smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1074 smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1075 smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1076
1077 /*
1078 * Ensure that we do not make our scoreboard
1079 * notification visible to the initiator until the
1080 * pc_smp_tlb values are read. The corresponding
1081 * fence is implicitly provided by the barrier in the
1082 * IPI send operation before the APIC ICR register
1083 * write.
1084 *
1085 * As an optimization, the request is acknowledged
1086 * before the actual invalidation is performed. It is
1087 * safe because target CPU cannot return to userspace
1088 * before handler finishes. Only NMI can preempt the
1089 * handler, but NMI would see the kernel handler frame
1090 * and not touch not-invalidated user page table.
1091 */
1092 atomic_thread_fence_acq();
1093 atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1094
1095 invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,
1096 smp_tlb_addr2);
1097 }
1098 }
Cache object: f4c9cca0893c0081b07b086b0214922d
|