1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2003 Peter Wemm.
5 * Copyright (c) 1991 Regents of the University of California.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department and William Jolitz of UUNET Technologies Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * Derived from hp300 version by Mike Hibler, this version by William
37 * Jolitz uses a recursive map [a pde points to the page directory] to
38 * map the page tables using the pagetables themselves. This is done to
39 * reduce the impact on kernel virtual memory for lots of sparse address
40 * space, and to reduce the cost of memory to each process.
41 *
42 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
43 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
44 * $FreeBSD$
45 */
46
47 #ifdef __i386__
48 #include <i386/pmap.h>
49 #else /* !__i386__ */
50
51 #ifndef _MACHINE_PMAP_H_
52 #define _MACHINE_PMAP_H_
53
54 /*
55 * Page-directory and page-table entries follow this format, with a few
56 * of the fields not present here and there, depending on a lot of things.
57 */
58 /* ---- Intel Nomenclature ---- */
59 #define X86_PG_V 0x001 /* P Valid */
60 #define X86_PG_RW 0x002 /* R/W Read/Write */
61 #define X86_PG_U 0x004 /* U/S User/Supervisor */
62 #define X86_PG_NC_PWT 0x008 /* PWT Write through */
63 #define X86_PG_NC_PCD 0x010 /* PCD Cache disable */
64 #define X86_PG_A 0x020 /* A Accessed */
65 #define X86_PG_M 0x040 /* D Dirty */
66 #define X86_PG_PS 0x080 /* PS Page size (0=4k,1=2M) */
67 #define X86_PG_PTE_PAT 0x080 /* PAT PAT index */
68 #define X86_PG_G 0x100 /* G Global */
69 #define X86_PG_AVAIL1 0x200 /* / Available for system */
70 #define X86_PG_AVAIL2 0x400 /* < programmers use */
71 #define X86_PG_AVAIL3 0x800 /* \ */
72 #define X86_PG_PDE_PAT 0x1000 /* PAT PAT index */
73 #define X86_PG_PKU(idx) ((pt_entry_t)idx << 59)
74 #define X86_PG_NX (1ul<<63) /* No-execute */
75 #define X86_PG_AVAIL(x) (1ul << (x))
76
77 /* Page level cache control fields used to determine the PAT type */
78 #define X86_PG_PDE_CACHE (X86_PG_PDE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD)
79 #define X86_PG_PTE_CACHE (X86_PG_PTE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD)
80
81 /* Protection keys indexes */
82 #define PMAP_MAX_PKRU_IDX 0xf
83 #define X86_PG_PKU_MASK X86_PG_PKU(PMAP_MAX_PKRU_IDX)
84
85 /*
86 * Intel extended page table (EPT) bit definitions.
87 */
88 #define EPT_PG_READ 0x001 /* R Read */
89 #define EPT_PG_WRITE 0x002 /* W Write */
90 #define EPT_PG_EXECUTE 0x004 /* X Execute */
91 #define EPT_PG_IGNORE_PAT 0x040 /* IPAT Ignore PAT */
92 #define EPT_PG_PS 0x080 /* PS Page size */
93 #define EPT_PG_A 0x100 /* A Accessed */
94 #define EPT_PG_M 0x200 /* D Dirty */
95 #define EPT_PG_MEMORY_TYPE(x) ((x) << 3) /* MT Memory Type */
96
97 /*
98 * Define the PG_xx macros in terms of the bits on x86 PTEs.
99 */
100 #define PG_V X86_PG_V
101 #define PG_RW X86_PG_RW
102 #define PG_U X86_PG_U
103 #define PG_NC_PWT X86_PG_NC_PWT
104 #define PG_NC_PCD X86_PG_NC_PCD
105 #define PG_A X86_PG_A
106 #define PG_M X86_PG_M
107 #define PG_PS X86_PG_PS
108 #define PG_PTE_PAT X86_PG_PTE_PAT
109 #define PG_G X86_PG_G
110 #define PG_AVAIL1 X86_PG_AVAIL1
111 #define PG_AVAIL2 X86_PG_AVAIL2
112 #define PG_AVAIL3 X86_PG_AVAIL3
113 #define PG_PDE_PAT X86_PG_PDE_PAT
114 #define PG_NX X86_PG_NX
115 #define PG_PDE_CACHE X86_PG_PDE_CACHE
116 #define PG_PTE_CACHE X86_PG_PTE_CACHE
117
118 /* Our various interpretations of the above */
119 #define PG_W X86_PG_AVAIL3 /* "Wired" pseudoflag */
120 #define PG_MANAGED X86_PG_AVAIL2
121 #define EPT_PG_EMUL_V X86_PG_AVAIL(52)
122 #define EPT_PG_EMUL_RW X86_PG_AVAIL(53)
123 #define PG_PROMOTED X86_PG_AVAIL(54) /* PDE only */
124 #define PG_FRAME (0x000ffffffffff000ul)
125 #define PG_PS_FRAME (0x000fffffffe00000ul)
126 #define PG_PS_PDP_FRAME (0x000fffffc0000000ul)
127
128 /*
129 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
130 * (PTE) page mappings have identical settings for the following fields:
131 */
132 #define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_CACHE | \
133 PG_M | PG_A | PG_U | PG_RW | PG_V | PG_PKU_MASK)
134
135 /*
136 * Page Protection Exception bits
137 */
138
139 #define PGEX_P 0x01 /* Protection violation vs. not present */
140 #define PGEX_W 0x02 /* during a Write cycle */
141 #define PGEX_U 0x04 /* access from User mode (UPL) */
142 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */
143 #define PGEX_I 0x10 /* during an instruction fetch */
144 #define PGEX_PK 0x20 /* protection key violation */
145 #define PGEX_SGX 0x8000 /* SGX-related */
146
147 /*
148 * undef the PG_xx macros that define bits in the regular x86 PTEs that
149 * have a different position in nested PTEs. This is done when compiling
150 * code that needs to be aware of the differences between regular x86 and
151 * nested PTEs.
152 *
153 * The appropriate bitmask will be calculated at runtime based on the pmap
154 * type.
155 */
156 #ifdef AMD64_NPT_AWARE
157 #undef PG_AVAIL1 /* X86_PG_AVAIL1 aliases with EPT_PG_M */
158 #undef PG_G
159 #undef PG_A
160 #undef PG_M
161 #undef PG_PDE_PAT
162 #undef PG_PDE_CACHE
163 #undef PG_PTE_PAT
164 #undef PG_PTE_CACHE
165 #undef PG_RW
166 #undef PG_V
167 #endif
168
169 /*
170 * Pte related macros. This is complicated by having to deal with
171 * the sign extension of the 48th bit.
172 */
173 #define KV4ADDR(l4, l3, l2, l1) ( \
174 ((unsigned long)-1 << 47) | \
175 ((unsigned long)(l4) << PML4SHIFT) | \
176 ((unsigned long)(l3) << PDPSHIFT) | \
177 ((unsigned long)(l2) << PDRSHIFT) | \
178 ((unsigned long)(l1) << PAGE_SHIFT))
179 #define KV5ADDR(l5, l4, l3, l2, l1) ( \
180 ((unsigned long)-1 << 56) | \
181 ((unsigned long)(l5) << PML5SHIFT) | \
182 ((unsigned long)(l4) << PML4SHIFT) | \
183 ((unsigned long)(l3) << PDPSHIFT) | \
184 ((unsigned long)(l2) << PDRSHIFT) | \
185 ((unsigned long)(l1) << PAGE_SHIFT))
186
187 #define UVADDR(l5, l4, l3, l2, l1) ( \
188 ((unsigned long)(l5) << PML5SHIFT) | \
189 ((unsigned long)(l4) << PML4SHIFT) | \
190 ((unsigned long)(l3) << PDPSHIFT) | \
191 ((unsigned long)(l2) << PDRSHIFT) | \
192 ((unsigned long)(l1) << PAGE_SHIFT))
193
194 /*
195 * Number of kernel PML4 slots. Can be anywhere from 1 to 64 or so,
196 * but setting it larger than NDMPML4E makes no sense.
197 *
198 * Each slot provides .5 TB of kernel virtual space.
199 */
200 #define NKPML4E 4
201
202 /*
203 * Number of PML4 slots for the KASAN shadow map. It requires 1 byte of memory
204 * for every 8 bytes of the kernel address space.
205 */
206 #define NKASANPML4E ((NKPML4E + 7) / 8)
207
208 /*
209 * Number of PML4 slots for the KMSAN shadow and origin maps. These are
210 * one-to-one with the kernel map.
211 */
212 #define NKMSANSHADPML4E NKPML4E
213 #define NKMSANORIGPML4E NKPML4E
214
215 /*
216 * We use the same numbering of the page table pages for 5-level and
217 * 4-level paging structures.
218 */
219 #define NUPML5E (NPML5EPG / 2) /* number of userland PML5
220 pages */
221 #define NUPML4E (NUPML5E * NPML4EPG) /* number of userland PML4
222 pages */
223 #define NUPDPE (NUPML4E * NPDPEPG) /* number of userland PDP
224 pages */
225 #define NUPDE (NUPDPE * NPDEPG) /* number of userland PD
226 entries */
227 #define NUP4ML4E (NPML4EPG / 2)
228
229 /*
230 * NDMPML4E is the maximum number of PML4 entries that will be
231 * used to implement the direct map. It must be a power of two,
232 * and should generally exceed NKPML4E. The maximum possible
233 * value is 64; using 128 will make the direct map intrude into
234 * the recursive page table map.
235 */
236 #define NDMPML4E 8
237
238 /*
239 * These values control the layout of virtual memory. The starting address
240 * of the direct map, which is controlled by DMPML4I, must be a multiple of
241 * its size. (See the PHYS_TO_DMAP() and DMAP_TO_PHYS() macros.)
242 *
243 * Note: KPML4I is the index of the (single) level 4 page that maps
244 * the KVA that holds KERNBASE, while KPML4BASE is the index of the
245 * first level 4 page that maps VM_MIN_KERNEL_ADDRESS. If NKPML4E
246 * is 1, these are the same, otherwise KPML4BASE < KPML4I and extra
247 * level 4 PDEs are needed to map from VM_MIN_KERNEL_ADDRESS up to
248 * KERNBASE.
249 *
250 * (KPML4I combines with KPDPI to choose where KERNBASE starts.
251 * Or, in other words, KPML4I provides bits 39..47 of KERNBASE,
252 * and KPDPI provides bits 30..38.)
253 */
254 #define PML4PML4I (NPML4EPG / 2) /* Index of recursive pml4 mapping */
255 #define PML5PML5I (NPML5EPG / 2) /* Index of recursive pml5 mapping */
256
257 #define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */
258 #define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */
259
260 #define KPML4I (NPML4EPG-1)
261 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
262
263 #define KASANPML4I (DMPML4I - NKASANPML4E) /* Below the direct map */
264
265 #define KMSANSHADPML4I (KPML4BASE - NKMSANSHADPML4E)
266 #define KMSANORIGPML4I (DMPML4I - NKMSANORIGPML4E)
267
268 /* Large map: index of the first and max last pml4 entry */
269 #define LMSPML4I (PML4PML4I + 1)
270 #define LMEPML4I (KASANPML4I - 1)
271
272 /*
273 * XXX doesn't really belong here I guess...
274 */
275 #define ISA_HOLE_START 0xa0000
276 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
277
278 #define PMAP_PCID_NONE 0xffffffff
279 #define PMAP_PCID_KERN 0
280 #define PMAP_PCID_OVERMAX 0x1000
281 #define PMAP_PCID_OVERMAX_KERN 0x800
282 #define PMAP_PCID_USER_PT 0x800
283
284 #define PMAP_NO_CR3 0xffffffffffffffff
285 #define PMAP_UCR3_NOMASK 0xffffffffffffffff
286
287 #ifndef LOCORE
288
289 #include <sys/queue.h>
290 #include <sys/_cpuset.h>
291 #include <sys/_lock.h>
292 #include <sys/_mutex.h>
293 #include <sys/_pctrie.h>
294 #include <sys/_pv_entry.h>
295 #include <sys/_rangeset.h>
296 #include <sys/_smr.h>
297
298 #include <vm/_vm_radix.h>
299
300 typedef u_int64_t pd_entry_t;
301 typedef u_int64_t pt_entry_t;
302 typedef u_int64_t pdp_entry_t;
303 typedef u_int64_t pml4_entry_t;
304 typedef u_int64_t pml5_entry_t;
305
306 /*
307 * Address of current address space page table maps and directories.
308 */
309 #ifdef _KERNEL
310 #define addr_P4Tmap (KV4ADDR(PML4PML4I, 0, 0, 0))
311 #define addr_P4Dmap (KV4ADDR(PML4PML4I, PML4PML4I, 0, 0))
312 #define addr_P4DPmap (KV4ADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0))
313 #define addr_P4ML4map (KV4ADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I))
314 #define addr_P4ML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t)))
315 #define P4Tmap ((pt_entry_t *)(addr_P4Tmap))
316 #define P4Dmap ((pd_entry_t *)(addr_P4Dmap))
317
318 #define addr_P5Tmap (KV5ADDR(PML5PML5I, 0, 0, 0, 0))
319 #define addr_P5Dmap (KV5ADDR(PML5PML5I, PML5PML5I, 0, 0, 0))
320 #define addr_P5DPmap (KV5ADDR(PML5PML5I, PML5PML5I, PML5PML5I, 0, 0))
321 #define addr_P5ML4map (KV5ADDR(PML5PML5I, PML5PML5I, PML5PML5I, PML5PML5I, 0))
322 #define addr_P5ML5map \
323 (KVADDR(PML5PML5I, PML5PML5I, PML5PML5I, PML5PML5I, PML5PML5I))
324 #define addr_P5ML5pml5e (addr_P5ML5map + (PML5PML5I * sizeof(pml5_entry_t)))
325 #define P5Tmap ((pt_entry_t *)(addr_P5Tmap))
326 #define P5Dmap ((pd_entry_t *)(addr_P5Dmap))
327
328 extern int nkpt; /* Initial number of kernel page tables */
329 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */
330 extern u_int64_t KPML5phys; /* physical address of kernel level 5 */
331
332 /*
333 * virtual address to page table entry and
334 * to physical address.
335 * Note: these work recursively, thus vtopte of a pte will give
336 * the corresponding pde that in turn maps it.
337 */
338 pt_entry_t *vtopte(vm_offset_t);
339 #define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
340
341 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte)
342 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0)
343 #define pte_store(ptep, pte) do { \
344 *(u_long *)(ptep) = (u_long)(pte); \
345 } while (0)
346 #define pte_clear(ptep) pte_store(ptep, 0)
347
348 #define pde_store(pdep, pde) pte_store(pdep, pde)
349
350 extern pt_entry_t pg_nx;
351
352 #endif /* _KERNEL */
353
354 /*
355 * Pmap stuff
356 */
357
358 /*
359 * Locks
360 * (p) PV list lock
361 */
362 struct md_page {
363 TAILQ_HEAD(, pv_entry) pv_list; /* (p) */
364 int pv_gen; /* (p) */
365 int pat_mode;
366 };
367
368 enum pmap_type {
369 PT_X86, /* regular x86 page tables */
370 PT_EPT, /* Intel's nested page tables */
371 PT_RVI, /* AMD's nested page tables */
372 };
373
374 struct pmap_pcids {
375 uint32_t pm_pcid;
376 uint32_t pm_gen;
377 };
378
379 /*
380 * The kernel virtual address (KVA) of the level 4 page table page is always
381 * within the direct map (DMAP) region.
382 */
383 struct pmap {
384 struct mtx pm_mtx;
385 pml4_entry_t *pm_pmltop; /* KVA of top level page table */
386 pml4_entry_t *pm_pmltopu; /* KVA of user top page table */
387 uint64_t pm_cr3;
388 uint64_t pm_ucr3;
389 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
390 cpuset_t pm_active; /* active on cpus */
391 enum pmap_type pm_type; /* regular or nested tables */
392 struct pmap_statistics pm_stats; /* pmap statistics */
393 struct vm_radix pm_root; /* spare page table pages */
394 long pm_eptgen; /* EPT pmap generation id */
395 smr_t pm_eptsmr;
396 int pm_flags;
397 struct pmap_pcids pm_pcids[MAXCPU];
398 struct rangeset pm_pkru;
399 };
400
401 /* flags */
402 #define PMAP_NESTED_IPIMASK 0xff
403 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */
404 #define PMAP_EMULATE_AD_BITS (1 << 9) /* needs A/D bits emulation */
405 #define PMAP_SUPPORTS_EXEC_ONLY (1 << 10) /* execute only mappings ok */
406
407 typedef struct pmap *pmap_t;
408
409 #ifdef _KERNEL
410 extern struct pmap kernel_pmap_store;
411 #define kernel_pmap (&kernel_pmap_store)
412
413 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
414 #define PMAP_LOCK_ASSERT(pmap, type) \
415 mtx_assert(&(pmap)->pm_mtx, (type))
416 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
417 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
418 NULL, MTX_DEF | MTX_DUPOK)
419 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
420 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
421 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
422 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
423
424 int pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags);
425 int pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype);
426
427 extern caddr_t CADDR1;
428 extern pt_entry_t *CMAP1;
429 extern vm_offset_t virtual_avail;
430 extern vm_offset_t virtual_end;
431 extern vm_paddr_t dmaplimit;
432 extern int pmap_pcid_enabled;
433 extern int invpcid_works;
434 extern int pmap_pcid_invlpg_workaround;
435 extern int pmap_pcid_invlpg_workaround_uena;
436
437 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
438 #define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
439 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
440
441 #define pmap_vm_page_alloc_check(m) \
442 KASSERT(m->phys_addr < kernphys || \
443 m->phys_addr >= kernphys + (vm_offset_t)&_end - KERNSTART, \
444 ("allocating kernel page %p pa %#lx kernphys %#lx end %p", \
445 m, m->phys_addr, kernphys, &_end));
446
447 struct thread;
448
449 void pmap_activate_boot(pmap_t pmap);
450 void pmap_activate_sw(struct thread *);
451 void pmap_allow_2m_x_ept_recalculate(void);
452 void pmap_bootstrap(vm_paddr_t *);
453 int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde);
454 int pmap_change_attr(vm_offset_t, vm_size_t, int);
455 int pmap_change_prot(vm_offset_t, vm_size_t, vm_prot_t);
456 void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate);
457 void pmap_flush_cache_range(vm_offset_t, vm_offset_t);
458 void pmap_flush_cache_phys_range(vm_paddr_t, vm_paddr_t, vm_memattr_t);
459 void pmap_init_pat(void);
460 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
461 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
462 vm_paddr_t pmap_kextract(vm_offset_t);
463 void pmap_kremove(vm_offset_t);
464 int pmap_large_map(vm_paddr_t, vm_size_t, void **, vm_memattr_t);
465 void pmap_large_map_wb(void *sva, vm_size_t len);
466 void pmap_large_unmap(void *sva, vm_size_t len);
467 void *pmap_mapbios(vm_paddr_t, vm_size_t);
468 void *pmap_mapdev(vm_paddr_t, vm_size_t);
469 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
470 void *pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size);
471 bool pmap_not_in_di(void);
472 boolean_t pmap_page_is_mapped(vm_page_t m);
473 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
474 void pmap_page_set_memattr_noflush(vm_page_t m, vm_memattr_t ma);
475 void pmap_pinit_pml4(vm_page_t);
476 void pmap_pinit_pml5(vm_page_t);
477 bool pmap_ps_enabled(pmap_t pmap);
478 void pmap_unmapdev(void *, vm_size_t);
479 void pmap_invalidate_page(pmap_t, vm_offset_t);
480 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
481 void pmap_invalidate_all(pmap_t);
482 void pmap_invalidate_cache(void);
483 void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
484 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
485 void pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
486 void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
487 boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
488 void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
489 void pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec);
490 void pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva);
491 void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
492 void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
493 void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
494 vm_offset_t eva);
495 int pmap_pkru_clear(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
496 int pmap_pkru_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
497 u_int keyidx, int flags);
498 void pmap_thread_init_invl_gen(struct thread *td);
499 int pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap);
500 void pmap_page_array_startup(long count);
501 vm_page_t pmap_page_alloc_below_4g(bool zeroed);
502
503 #if defined(KASAN) || defined(KMSAN)
504 void pmap_san_bootstrap(void);
505 void pmap_san_enter(vm_offset_t);
506 #endif
507
508 /*
509 * Returns a pointer to a set of CPUs on which the pmap is currently active.
510 * Note that the set can be modified without any mutual exclusion, so a copy
511 * must be made if a stable value is required.
512 */
513 static __inline volatile cpuset_t *
514 pmap_invalidate_cpu_mask(pmap_t pmap)
515 {
516 return (&pmap->pm_active);
517 }
518
519 #if defined(_SYS_PCPU_H_) && defined(_MACHINE_CPUFUNC_H_)
520 /*
521 * It seems that AlderLake+ small cores have some microarchitectural
522 * bug, which results in the INVLPG instruction failing to flush all
523 * global TLB entries when PCID is enabled. Work around it for now,
524 * by doing global invalidation on small cores instead of INVLPG.
525 */
526 static __inline void
527 pmap_invlpg(pmap_t pmap, vm_offset_t va)
528 {
529 if (pmap == kernel_pmap && PCPU_GET(pcid_invlpg_workaround)) {
530 struct invpcid_descr d = { 0 };
531
532 invpcid(&d, INVPCID_CTXGLOB);
533 } else {
534 invlpg(va);
535 }
536 }
537 #endif /* sys/pcpu.h && machine/cpufunc.h */
538
539 #endif /* _KERNEL */
540
541 /* Return various clipped indexes for a given VA */
542 static __inline vm_pindex_t
543 pmap_pte_index(vm_offset_t va)
544 {
545
546 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
547 }
548
549 static __inline vm_pindex_t
550 pmap_pde_index(vm_offset_t va)
551 {
552
553 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
554 }
555
556 static __inline vm_pindex_t
557 pmap_pdpe_index(vm_offset_t va)
558 {
559
560 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
561 }
562
563 static __inline vm_pindex_t
564 pmap_pml4e_index(vm_offset_t va)
565 {
566
567 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
568 }
569
570 static __inline vm_pindex_t
571 pmap_pml5e_index(vm_offset_t va)
572 {
573
574 return ((va >> PML5SHIFT) & ((1ul << NPML5EPGSHIFT) - 1));
575 }
576
577 #endif /* !LOCORE */
578
579 #endif /* !_MACHINE_PMAP_H_ */
580
581 #endif /* __i386__ */
Cache object: 532a2e8ee05fe37bc8030a85f084dfb1
|