1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2003 Peter Wemm.
5 * Copyright (c) 1991 Regents of the University of California.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department and William Jolitz of UUNET Technologies Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * Derived from hp300 version by Mike Hibler, this version by William
37 * Jolitz uses a recursive map [a pde points to the page directory] to
38 * map the page tables using the pagetables themselves. This is done to
39 * reduce the impact on kernel virtual memory for lots of sparse address
40 * space, and to reduce the cost of memory to each process.
41 *
42 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
43 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
44 * $FreeBSD: releng/12.0/sys/amd64/include/pmap.h 339432 2018-10-18 20:49:16Z kib $
45 */
46
47 #ifndef _MACHINE_PMAP_H_
48 #define _MACHINE_PMAP_H_
49
50 /*
51 * Page-directory and page-table entries follow this format, with a few
52 * of the fields not present here and there, depending on a lot of things.
53 */
54 /* ---- Intel Nomenclature ---- */
55 #define X86_PG_V 0x001 /* P Valid */
56 #define X86_PG_RW 0x002 /* R/W Read/Write */
57 #define X86_PG_U 0x004 /* U/S User/Supervisor */
58 #define X86_PG_NC_PWT 0x008 /* PWT Write through */
59 #define X86_PG_NC_PCD 0x010 /* PCD Cache disable */
60 #define X86_PG_A 0x020 /* A Accessed */
61 #define X86_PG_M 0x040 /* D Dirty */
62 #define X86_PG_PS 0x080 /* PS Page size (0=4k,1=2M) */
63 #define X86_PG_PTE_PAT 0x080 /* PAT PAT index */
64 #define X86_PG_G 0x100 /* G Global */
65 #define X86_PG_AVAIL1 0x200 /* / Available for system */
66 #define X86_PG_AVAIL2 0x400 /* < programmers use */
67 #define X86_PG_AVAIL3 0x800 /* \ */
68 #define X86_PG_PDE_PAT 0x1000 /* PAT PAT index */
69 #define X86_PG_NX (1ul<<63) /* No-execute */
70 #define X86_PG_AVAIL(x) (1ul << (x))
71
72 /* Page level cache control fields used to determine the PAT type */
73 #define X86_PG_PDE_CACHE (X86_PG_PDE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD)
74 #define X86_PG_PTE_CACHE (X86_PG_PTE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD)
75
76 /*
77 * Intel extended page table (EPT) bit definitions.
78 */
79 #define EPT_PG_READ 0x001 /* R Read */
80 #define EPT_PG_WRITE 0x002 /* W Write */
81 #define EPT_PG_EXECUTE 0x004 /* X Execute */
82 #define EPT_PG_IGNORE_PAT 0x040 /* IPAT Ignore PAT */
83 #define EPT_PG_PS 0x080 /* PS Page size */
84 #define EPT_PG_A 0x100 /* A Accessed */
85 #define EPT_PG_M 0x200 /* D Dirty */
86 #define EPT_PG_MEMORY_TYPE(x) ((x) << 3) /* MT Memory Type */
87
88 /*
89 * Define the PG_xx macros in terms of the bits on x86 PTEs.
90 */
91 #define PG_V X86_PG_V
92 #define PG_RW X86_PG_RW
93 #define PG_U X86_PG_U
94 #define PG_NC_PWT X86_PG_NC_PWT
95 #define PG_NC_PCD X86_PG_NC_PCD
96 #define PG_A X86_PG_A
97 #define PG_M X86_PG_M
98 #define PG_PS X86_PG_PS
99 #define PG_PTE_PAT X86_PG_PTE_PAT
100 #define PG_G X86_PG_G
101 #define PG_AVAIL1 X86_PG_AVAIL1
102 #define PG_AVAIL2 X86_PG_AVAIL2
103 #define PG_AVAIL3 X86_PG_AVAIL3
104 #define PG_PDE_PAT X86_PG_PDE_PAT
105 #define PG_NX X86_PG_NX
106 #define PG_PDE_CACHE X86_PG_PDE_CACHE
107 #define PG_PTE_CACHE X86_PG_PTE_CACHE
108
109 /* Our various interpretations of the above */
110 #define PG_W X86_PG_AVAIL3 /* "Wired" pseudoflag */
111 #define PG_MANAGED X86_PG_AVAIL2
112 #define EPT_PG_EMUL_V X86_PG_AVAIL(52)
113 #define EPT_PG_EMUL_RW X86_PG_AVAIL(53)
114 #define PG_PROMOTED X86_PG_AVAIL(54) /* PDE only */
115 #define PG_FRAME (0x000ffffffffff000ul)
116 #define PG_PS_FRAME (0x000fffffffe00000ul)
117
118 /*
119 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
120 * (PTE) page mappings have identical settings for the following fields:
121 */
122 #define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_CACHE | \
123 PG_M | PG_A | PG_U | PG_RW | PG_V)
124
125 /*
126 * Page Protection Exception bits
127 */
128
129 #define PGEX_P 0x01 /* Protection violation vs. not present */
130 #define PGEX_W 0x02 /* during a Write cycle */
131 #define PGEX_U 0x04 /* access from User mode (UPL) */
132 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */
133 #define PGEX_I 0x10 /* during an instruction fetch */
134
135 /*
136 * undef the PG_xx macros that define bits in the regular x86 PTEs that
137 * have a different position in nested PTEs. This is done when compiling
138 * code that needs to be aware of the differences between regular x86 and
139 * nested PTEs.
140 *
141 * The appropriate bitmask will be calculated at runtime based on the pmap
142 * type.
143 */
144 #ifdef AMD64_NPT_AWARE
145 #undef PG_AVAIL1 /* X86_PG_AVAIL1 aliases with EPT_PG_M */
146 #undef PG_G
147 #undef PG_A
148 #undef PG_M
149 #undef PG_PDE_PAT
150 #undef PG_PDE_CACHE
151 #undef PG_PTE_PAT
152 #undef PG_PTE_CACHE
153 #undef PG_RW
154 #undef PG_V
155 #endif
156
157 /*
158 * Pte related macros. This is complicated by having to deal with
159 * the sign extension of the 48th bit.
160 */
161 #define KVADDR(l4, l3, l2, l1) ( \
162 ((unsigned long)-1 << 47) | \
163 ((unsigned long)(l4) << PML4SHIFT) | \
164 ((unsigned long)(l3) << PDPSHIFT) | \
165 ((unsigned long)(l2) << PDRSHIFT) | \
166 ((unsigned long)(l1) << PAGE_SHIFT))
167
168 #define UVADDR(l4, l3, l2, l1) ( \
169 ((unsigned long)(l4) << PML4SHIFT) | \
170 ((unsigned long)(l3) << PDPSHIFT) | \
171 ((unsigned long)(l2) << PDRSHIFT) | \
172 ((unsigned long)(l1) << PAGE_SHIFT))
173
174 /*
175 * Number of kernel PML4 slots. Can be anywhere from 1 to 64 or so,
176 * but setting it larger than NDMPML4E makes no sense.
177 *
178 * Each slot provides .5 TB of kernel virtual space.
179 */
180 #define NKPML4E 4
181
182 #define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */
183 #define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */
184 #define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */
185
186 /*
187 * NDMPML4E is the maximum number of PML4 entries that will be
188 * used to implement the direct map. It must be a power of two,
189 * and should generally exceed NKPML4E. The maximum possible
190 * value is 64; using 128 will make the direct map intrude into
191 * the recursive page table map.
192 */
193 #define NDMPML4E 8
194
195 /*
196 * These values control the layout of virtual memory. The starting address
197 * of the direct map, which is controlled by DMPML4I, must be a multiple of
198 * its size. (See the PHYS_TO_DMAP() and DMAP_TO_PHYS() macros.)
199 *
200 * Note: KPML4I is the index of the (single) level 4 page that maps
201 * the KVA that holds KERNBASE, while KPML4BASE is the index of the
202 * first level 4 page that maps VM_MIN_KERNEL_ADDRESS. If NKPML4E
203 * is 1, these are the same, otherwise KPML4BASE < KPML4I and extra
204 * level 4 PDEs are needed to map from VM_MIN_KERNEL_ADDRESS up to
205 * KERNBASE.
206 *
207 * (KPML4I combines with KPDPI to choose where KERNBASE starts.
208 * Or, in other words, KPML4I provides bits 39..47 of KERNBASE,
209 * and KPDPI provides bits 30..38.)
210 */
211 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */
212
213 #define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */
214 #define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */
215
216 #define KPML4I (NPML4EPG-1)
217 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
218
219 /* Large map: index of the first and max last pml4 entry */
220 #define LMSPML4I (PML4PML4I + 1)
221 #define LMEPML4I (DMPML4I - 1)
222
223 /*
224 * XXX doesn't really belong here I guess...
225 */
226 #define ISA_HOLE_START 0xa0000
227 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
228
229 #define PMAP_PCID_NONE 0xffffffff
230 #define PMAP_PCID_KERN 0
231 #define PMAP_PCID_OVERMAX 0x1000
232 #define PMAP_PCID_OVERMAX_KERN 0x800
233 #define PMAP_PCID_USER_PT 0x800
234
235 #define PMAP_NO_CR3 (~0UL)
236
237 #ifndef LOCORE
238
239 #include <sys/queue.h>
240 #include <sys/_cpuset.h>
241 #include <sys/_lock.h>
242 #include <sys/_mutex.h>
243
244 #include <vm/_vm_radix.h>
245
246 typedef u_int64_t pd_entry_t;
247 typedef u_int64_t pt_entry_t;
248 typedef u_int64_t pdp_entry_t;
249 typedef u_int64_t pml4_entry_t;
250
251 /*
252 * Address of current address space page table maps and directories.
253 */
254 #ifdef _KERNEL
255 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0))
256 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0))
257 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0))
258 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I))
259 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t)))
260 #define PTmap ((pt_entry_t *)(addr_PTmap))
261 #define PDmap ((pd_entry_t *)(addr_PDmap))
262 #define PDPmap ((pd_entry_t *)(addr_PDPmap))
263 #define PML4map ((pd_entry_t *)(addr_PML4map))
264 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e))
265
266 extern int nkpt; /* Initial number of kernel page tables */
267 extern u_int64_t KPDPphys; /* physical address of kernel level 3 */
268 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */
269
270 /*
271 * virtual address to page table entry and
272 * to physical address.
273 * Note: these work recursively, thus vtopte of a pte will give
274 * the corresponding pde that in turn maps it.
275 */
276 pt_entry_t *vtopte(vm_offset_t);
277 #define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
278
279 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte)
280 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0)
281 #define pte_store(ptep, pte) do { \
282 *(u_long *)(ptep) = (u_long)(pte); \
283 } while (0)
284 #define pte_clear(ptep) pte_store(ptep, 0)
285
286 #define pde_store(pdep, pde) pte_store(pdep, pde)
287
288 extern pt_entry_t pg_nx;
289
290 #endif /* _KERNEL */
291
292 /*
293 * Pmap stuff
294 */
295 struct pv_entry;
296 struct pv_chunk;
297
298 /*
299 * Locks
300 * (p) PV list lock
301 */
302 struct md_page {
303 TAILQ_HEAD(, pv_entry) pv_list; /* (p) */
304 int pv_gen; /* (p) */
305 int pat_mode;
306 };
307
308 enum pmap_type {
309 PT_X86, /* regular x86 page tables */
310 PT_EPT, /* Intel's nested page tables */
311 PT_RVI, /* AMD's nested page tables */
312 };
313
314 struct pmap_pcids {
315 uint32_t pm_pcid;
316 uint32_t pm_gen;
317 };
318
319 /*
320 * The kernel virtual address (KVA) of the level 4 page table page is always
321 * within the direct map (DMAP) region.
322 */
323 struct pmap {
324 struct mtx pm_mtx;
325 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */
326 pml4_entry_t *pm_pml4u; /* KVA of user l4 page table */
327 uint64_t pm_cr3;
328 uint64_t pm_ucr3;
329 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
330 cpuset_t pm_active; /* active on cpus */
331 enum pmap_type pm_type; /* regular or nested tables */
332 struct pmap_statistics pm_stats; /* pmap statistics */
333 struct vm_radix pm_root; /* spare page table pages */
334 long pm_eptgen; /* EPT pmap generation id */
335 int pm_flags;
336 struct pmap_pcids pm_pcids[MAXCPU];
337 };
338
339 /* flags */
340 #define PMAP_NESTED_IPIMASK 0xff
341 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */
342 #define PMAP_EMULATE_AD_BITS (1 << 9) /* needs A/D bits emulation */
343 #define PMAP_SUPPORTS_EXEC_ONLY (1 << 10) /* execute only mappings ok */
344
345 typedef struct pmap *pmap_t;
346
347 #ifdef _KERNEL
348 extern struct pmap kernel_pmap_store;
349 #define kernel_pmap (&kernel_pmap_store)
350
351 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
352 #define PMAP_LOCK_ASSERT(pmap, type) \
353 mtx_assert(&(pmap)->pm_mtx, (type))
354 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
355 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
356 NULL, MTX_DEF | MTX_DUPOK)
357 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
358 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
359 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
360 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
361
362 int pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags);
363 int pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype);
364 #endif
365
366 /*
367 * For each vm_page_t, there is a list of all currently valid virtual
368 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
369 */
370 typedef struct pv_entry {
371 vm_offset_t pv_va; /* virtual address for mapping */
372 TAILQ_ENTRY(pv_entry) pv_next;
373 } *pv_entry_t;
374
375 /*
376 * pv_entries are allocated in chunks per-process. This avoids the
377 * need to track per-pmap assignments.
378 */
379 #define _NPCM 3
380 #define _NPCPV 168
381 #define PV_CHUNK_HEADER \
382 pmap_t pc_pmap; \
383 TAILQ_ENTRY(pv_chunk) pc_list; \
384 uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \
385 TAILQ_ENTRY(pv_chunk) pc_lru;
386
387 struct pv_chunk_header {
388 PV_CHUNK_HEADER
389 };
390
391 struct pv_chunk {
392 PV_CHUNK_HEADER
393 struct pv_entry pc_pventry[_NPCPV];
394 };
395
396 #ifdef _KERNEL
397
398 extern caddr_t CADDR1;
399 extern pt_entry_t *CMAP1;
400 extern vm_paddr_t phys_avail[];
401 extern vm_paddr_t dump_avail[];
402 extern vm_offset_t virtual_avail;
403 extern vm_offset_t virtual_end;
404 extern vm_paddr_t dmaplimit;
405 extern int pmap_pcid_enabled;
406 extern int invpcid_works;
407
408 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
409 #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
410 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
411
412 struct thread;
413
414 void pmap_activate_boot(pmap_t pmap);
415 void pmap_activate_sw(struct thread *);
416 void pmap_bootstrap(vm_paddr_t *);
417 int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde);
418 int pmap_change_attr(vm_offset_t, vm_size_t, int);
419 void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate);
420 void pmap_flush_cache_range(vm_offset_t, vm_offset_t);
421 void pmap_flush_cache_phys_range(vm_paddr_t, vm_paddr_t, vm_memattr_t);
422 void pmap_init_pat(void);
423 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
424 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
425 vm_paddr_t pmap_kextract(vm_offset_t);
426 void pmap_kremove(vm_offset_t);
427 int pmap_large_map(vm_paddr_t, vm_size_t, void **, vm_memattr_t);
428 void pmap_large_map_wb(void *sva, vm_size_t len);
429 void pmap_large_unmap(void *sva, vm_size_t len);
430 void *pmap_mapbios(vm_paddr_t, vm_size_t);
431 void *pmap_mapdev(vm_paddr_t, vm_size_t);
432 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
433 void *pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size);
434 boolean_t pmap_page_is_mapped(vm_page_t m);
435 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
436 void pmap_pinit_pml4(vm_page_t);
437 bool pmap_ps_enabled(pmap_t pmap);
438 void pmap_unmapdev(vm_offset_t, vm_size_t);
439 void pmap_invalidate_page(pmap_t, vm_offset_t);
440 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
441 void pmap_invalidate_all(pmap_t);
442 void pmap_invalidate_cache(void);
443 void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
444 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
445 void pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
446 void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
447 boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
448 void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
449 void pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec);
450 void pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva);
451 void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
452 void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
453 void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
454 vm_offset_t eva);
455 #endif /* _KERNEL */
456
457 /* Return various clipped indexes for a given VA */
458 static __inline vm_pindex_t
459 pmap_pte_index(vm_offset_t va)
460 {
461
462 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
463 }
464
465 static __inline vm_pindex_t
466 pmap_pde_index(vm_offset_t va)
467 {
468
469 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
470 }
471
472 static __inline vm_pindex_t
473 pmap_pdpe_index(vm_offset_t va)
474 {
475
476 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
477 }
478
479 static __inline vm_pindex_t
480 pmap_pml4e_index(vm_offset_t va)
481 {
482
483 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
484 }
485
486 #endif /* !LOCORE */
487
488 #endif /* !_MACHINE_PMAP_H_ */
Cache object: e977ea3eb292dc14be6458efd675fb9c
|