1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1991 Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department and William Jolitz of UUNET Technologies Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * Derived from hp300 version by Mike Hibler, this version by William
35 * Jolitz uses a recursive map [a pde points to the page directory] to
36 * map the page tables using the pagetables themselves. This is done to
37 * reduce the impact on kernel virtual memory for lots of sparse address
38 * space, and to reduce the cost of memory to each process.
39 *
40 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
41 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
42 * $FreeBSD: releng/11.0/sys/amd64/include/pmap.h 299350 2016-05-10 09:58:51Z kib $
43 */
44
45 #ifndef _MACHINE_PMAP_H_
46 #define _MACHINE_PMAP_H_
47
48 /*
49 * Page-directory and page-table entries follow this format, with a few
50 * of the fields not present here and there, depending on a lot of things.
51 */
52 /* ---- Intel Nomenclature ---- */
53 #define X86_PG_V 0x001 /* P Valid */
54 #define X86_PG_RW 0x002 /* R/W Read/Write */
55 #define X86_PG_U 0x004 /* U/S User/Supervisor */
56 #define X86_PG_NC_PWT 0x008 /* PWT Write through */
57 #define X86_PG_NC_PCD 0x010 /* PCD Cache disable */
58 #define X86_PG_A 0x020 /* A Accessed */
59 #define X86_PG_M 0x040 /* D Dirty */
60 #define X86_PG_PS 0x080 /* PS Page size (0=4k,1=2M) */
61 #define X86_PG_PTE_PAT 0x080 /* PAT PAT index */
62 #define X86_PG_G 0x100 /* G Global */
63 #define X86_PG_AVAIL1 0x200 /* / Available for system */
64 #define X86_PG_AVAIL2 0x400 /* < programmers use */
65 #define X86_PG_AVAIL3 0x800 /* \ */
66 #define X86_PG_PDE_PAT 0x1000 /* PAT PAT index */
67 #define X86_PG_NX (1ul<<63) /* No-execute */
68 #define X86_PG_AVAIL(x) (1ul << (x))
69
70 /* Page level cache control fields used to determine the PAT type */
71 #define X86_PG_PDE_CACHE (X86_PG_PDE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD)
72 #define X86_PG_PTE_CACHE (X86_PG_PTE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD)
73
74 /*
75 * Intel extended page table (EPT) bit definitions.
76 */
77 #define EPT_PG_READ 0x001 /* R Read */
78 #define EPT_PG_WRITE 0x002 /* W Write */
79 #define EPT_PG_EXECUTE 0x004 /* X Execute */
80 #define EPT_PG_IGNORE_PAT 0x040 /* IPAT Ignore PAT */
81 #define EPT_PG_PS 0x080 /* PS Page size */
82 #define EPT_PG_A 0x100 /* A Accessed */
83 #define EPT_PG_M 0x200 /* D Dirty */
84 #define EPT_PG_MEMORY_TYPE(x) ((x) << 3) /* MT Memory Type */
85
86 /*
87 * Define the PG_xx macros in terms of the bits on x86 PTEs.
88 */
89 #define PG_V X86_PG_V
90 #define PG_RW X86_PG_RW
91 #define PG_U X86_PG_U
92 #define PG_NC_PWT X86_PG_NC_PWT
93 #define PG_NC_PCD X86_PG_NC_PCD
94 #define PG_A X86_PG_A
95 #define PG_M X86_PG_M
96 #define PG_PS X86_PG_PS
97 #define PG_PTE_PAT X86_PG_PTE_PAT
98 #define PG_G X86_PG_G
99 #define PG_AVAIL1 X86_PG_AVAIL1
100 #define PG_AVAIL2 X86_PG_AVAIL2
101 #define PG_AVAIL3 X86_PG_AVAIL3
102 #define PG_PDE_PAT X86_PG_PDE_PAT
103 #define PG_NX X86_PG_NX
104 #define PG_PDE_CACHE X86_PG_PDE_CACHE
105 #define PG_PTE_CACHE X86_PG_PTE_CACHE
106
107 /* Our various interpretations of the above */
108 #define PG_W X86_PG_AVAIL3 /* "Wired" pseudoflag */
109 #define PG_MANAGED X86_PG_AVAIL2
110 #define EPT_PG_EMUL_V X86_PG_AVAIL(52)
111 #define EPT_PG_EMUL_RW X86_PG_AVAIL(53)
112 #define PG_FRAME (0x000ffffffffff000ul)
113 #define PG_PS_FRAME (0x000fffffffe00000ul)
114
115 /*
116 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
117 * (PTE) page mappings have identical settings for the following fields:
118 */
119 #define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_CACHE | \
120 PG_M | PG_A | PG_U | PG_RW | PG_V)
121
122 /*
123 * Page Protection Exception bits
124 */
125
126 #define PGEX_P 0x01 /* Protection violation vs. not present */
127 #define PGEX_W 0x02 /* during a Write cycle */
128 #define PGEX_U 0x04 /* access from User mode (UPL) */
129 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */
130 #define PGEX_I 0x10 /* during an instruction fetch */
131
132 /*
133 * undef the PG_xx macros that define bits in the regular x86 PTEs that
134 * have a different position in nested PTEs. This is done when compiling
135 * code that needs to be aware of the differences between regular x86 and
136 * nested PTEs.
137 *
138 * The appropriate bitmask will be calculated at runtime based on the pmap
139 * type.
140 */
141 #ifdef AMD64_NPT_AWARE
142 #undef PG_AVAIL1 /* X86_PG_AVAIL1 aliases with EPT_PG_M */
143 #undef PG_G
144 #undef PG_A
145 #undef PG_M
146 #undef PG_PDE_PAT
147 #undef PG_PDE_CACHE
148 #undef PG_PTE_PAT
149 #undef PG_PTE_CACHE
150 #undef PG_RW
151 #undef PG_V
152 #endif
153
154 /*
155 * Pte related macros. This is complicated by having to deal with
156 * the sign extension of the 48th bit.
157 */
158 #define KVADDR(l4, l3, l2, l1) ( \
159 ((unsigned long)-1 << 47) | \
160 ((unsigned long)(l4) << PML4SHIFT) | \
161 ((unsigned long)(l3) << PDPSHIFT) | \
162 ((unsigned long)(l2) << PDRSHIFT) | \
163 ((unsigned long)(l1) << PAGE_SHIFT))
164
165 #define UVADDR(l4, l3, l2, l1) ( \
166 ((unsigned long)(l4) << PML4SHIFT) | \
167 ((unsigned long)(l3) << PDPSHIFT) | \
168 ((unsigned long)(l2) << PDRSHIFT) | \
169 ((unsigned long)(l1) << PAGE_SHIFT))
170
171 /*
172 * Number of kernel PML4 slots. Can be anywhere from 1 to 64 or so,
173 * but setting it larger than NDMPML4E makes no sense.
174 *
175 * Each slot provides .5 TB of kernel virtual space.
176 */
177 #define NKPML4E 4
178
179 #define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */
180 #define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */
181 #define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */
182
183 /*
184 * NDMPML4E is the maximum number of PML4 entries that will be
185 * used to implement the direct map. It must be a power of two,
186 * and should generally exceed NKPML4E. The maximum possible
187 * value is 64; using 128 will make the direct map intrude into
188 * the recursive page table map.
189 */
190 #define NDMPML4E 8
191
192 /*
193 * These values control the layout of virtual memory. The starting address
194 * of the direct map, which is controlled by DMPML4I, must be a multiple of
195 * its size. (See the PHYS_TO_DMAP() and DMAP_TO_PHYS() macros.)
196 *
197 * Note: KPML4I is the index of the (single) level 4 page that maps
198 * the KVA that holds KERNBASE, while KPML4BASE is the index of the
199 * first level 4 page that maps VM_MIN_KERNEL_ADDRESS. If NKPML4E
200 * is 1, these are the same, otherwise KPML4BASE < KPML4I and extra
201 * level 4 PDEs are needed to map from VM_MIN_KERNEL_ADDRESS up to
202 * KERNBASE.
203 *
204 * (KPML4I combines with KPDPI to choose where KERNBASE starts.
205 * Or, in other words, KPML4I provides bits 39..47 of KERNBASE,
206 * and KPDPI provides bits 30..38.)
207 */
208 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */
209
210 #define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */
211 #define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */
212
213 #define KPML4I (NPML4EPG-1)
214 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
215
216 /*
217 * XXX doesn't really belong here I guess...
218 */
219 #define ISA_HOLE_START 0xa0000
220 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
221
222 #define PMAP_PCID_NONE 0xffffffff
223 #define PMAP_PCID_KERN 0
224 #define PMAP_PCID_OVERMAX 0x1000
225
226 #ifndef LOCORE
227
228 #include <sys/queue.h>
229 #include <sys/_cpuset.h>
230 #include <sys/_lock.h>
231 #include <sys/_mutex.h>
232
233 #include <vm/_vm_radix.h>
234
235 typedef u_int64_t pd_entry_t;
236 typedef u_int64_t pt_entry_t;
237 typedef u_int64_t pdp_entry_t;
238 typedef u_int64_t pml4_entry_t;
239
240 /*
241 * Address of current address space page table maps and directories.
242 */
243 #ifdef _KERNEL
244 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0))
245 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0))
246 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0))
247 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I))
248 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t)))
249 #define PTmap ((pt_entry_t *)(addr_PTmap))
250 #define PDmap ((pd_entry_t *)(addr_PDmap))
251 #define PDPmap ((pd_entry_t *)(addr_PDPmap))
252 #define PML4map ((pd_entry_t *)(addr_PML4map))
253 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e))
254
255 extern int nkpt; /* Initial number of kernel page tables */
256 extern u_int64_t KPDPphys; /* physical address of kernel level 3 */
257 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */
258
259 /*
260 * virtual address to page table entry and
261 * to physical address.
262 * Note: these work recursively, thus vtopte of a pte will give
263 * the corresponding pde that in turn maps it.
264 */
265 pt_entry_t *vtopte(vm_offset_t);
266 #define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
267
268 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte)
269 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0)
270 #define pte_store(ptep, pte) do { \
271 *(u_long *)(ptep) = (u_long)(pte); \
272 } while (0)
273 #define pte_clear(ptep) pte_store(ptep, 0)
274
275 #define pde_store(pdep, pde) pte_store(pdep, pde)
276
277 extern pt_entry_t pg_nx;
278
279 #endif /* _KERNEL */
280
281 /*
282 * Pmap stuff
283 */
284 struct pv_entry;
285 struct pv_chunk;
286
287 /*
288 * Locks
289 * (p) PV list lock
290 */
291 struct md_page {
292 TAILQ_HEAD(, pv_entry) pv_list; /* (p) */
293 int pv_gen; /* (p) */
294 int pat_mode;
295 };
296
297 enum pmap_type {
298 PT_X86, /* regular x86 page tables */
299 PT_EPT, /* Intel's nested page tables */
300 PT_RVI, /* AMD's nested page tables */
301 };
302
303 struct pmap_pcids {
304 uint32_t pm_pcid;
305 uint32_t pm_gen;
306 };
307
308 /*
309 * The kernel virtual address (KVA) of the level 4 page table page is always
310 * within the direct map (DMAP) region.
311 */
312 struct pmap {
313 struct mtx pm_mtx;
314 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */
315 uint64_t pm_cr3;
316 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
317 cpuset_t pm_active; /* active on cpus */
318 enum pmap_type pm_type; /* regular or nested tables */
319 struct pmap_statistics pm_stats; /* pmap statistics */
320 struct vm_radix pm_root; /* spare page table pages */
321 long pm_eptgen; /* EPT pmap generation id */
322 int pm_flags;
323 struct pmap_pcids pm_pcids[MAXCPU];
324 };
325
326 /* flags */
327 #define PMAP_NESTED_IPIMASK 0xff
328 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */
329 #define PMAP_EMULATE_AD_BITS (1 << 9) /* needs A/D bits emulation */
330 #define PMAP_SUPPORTS_EXEC_ONLY (1 << 10) /* execute only mappings ok */
331
332 typedef struct pmap *pmap_t;
333
334 #ifdef _KERNEL
335 extern struct pmap kernel_pmap_store;
336 #define kernel_pmap (&kernel_pmap_store)
337
338 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
339 #define PMAP_LOCK_ASSERT(pmap, type) \
340 mtx_assert(&(pmap)->pm_mtx, (type))
341 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
342 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
343 NULL, MTX_DEF | MTX_DUPOK)
344 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
345 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
346 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
347 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
348
349 int pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags);
350 int pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype);
351 #endif
352
353 /*
354 * For each vm_page_t, there is a list of all currently valid virtual
355 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
356 */
357 typedef struct pv_entry {
358 vm_offset_t pv_va; /* virtual address for mapping */
359 TAILQ_ENTRY(pv_entry) pv_next;
360 } *pv_entry_t;
361
362 /*
363 * pv_entries are allocated in chunks per-process. This avoids the
364 * need to track per-pmap assignments.
365 */
366 #define _NPCM 3
367 #define _NPCPV 168
368 struct pv_chunk {
369 pmap_t pc_pmap;
370 TAILQ_ENTRY(pv_chunk) pc_list;
371 uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */
372 TAILQ_ENTRY(pv_chunk) pc_lru;
373 struct pv_entry pc_pventry[_NPCPV];
374 };
375
376 #ifdef _KERNEL
377
378 extern caddr_t CADDR1;
379 extern pt_entry_t *CMAP1;
380 extern vm_paddr_t phys_avail[];
381 extern vm_paddr_t dump_avail[];
382 extern vm_offset_t virtual_avail;
383 extern vm_offset_t virtual_end;
384 extern vm_paddr_t dmaplimit;
385
386 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
387 #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
388 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
389
390 struct thread;
391
392 void pmap_activate_sw(struct thread *);
393 void pmap_bootstrap(vm_paddr_t *);
394 int pmap_change_attr(vm_offset_t, vm_size_t, int);
395 void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate);
396 void pmap_init_pat(void);
397 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
398 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
399 vm_paddr_t pmap_kextract(vm_offset_t);
400 void pmap_kremove(vm_offset_t);
401 void *pmap_mapbios(vm_paddr_t, vm_size_t);
402 void *pmap_mapdev(vm_paddr_t, vm_size_t);
403 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
404 boolean_t pmap_page_is_mapped(vm_page_t m);
405 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
406 void pmap_unmapdev(vm_offset_t, vm_size_t);
407 void pmap_invalidate_page(pmap_t, vm_offset_t);
408 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
409 void pmap_invalidate_all(pmap_t);
410 void pmap_invalidate_cache(void);
411 void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
412 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
413 boolean_t force);
414 void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
415 boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
416 void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
417 #endif /* _KERNEL */
418
419 #endif /* !LOCORE */
420
421 #endif /* !_MACHINE_PMAP_H_ */
Cache object: 38e8f9bf3b3ac7b0b3972bc9aa20a17e
|