1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1991 Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department and William Jolitz of UUNET Technologies Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * Derived from hp300 version by Mike Hibler, this version by William
35 * Jolitz uses a recursive map [a pde points to the page directory] to
36 * map the page tables using the pagetables themselves. This is done to
37 * reduce the impact on kernel virtual memory for lots of sparse address
38 * space, and to reduce the cost of memory to each process.
39 *
40 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
41 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
42 * $FreeBSD: releng/7.4/sys/amd64/include/pmap.h 202970 2010-01-25 15:50:52Z jhb $
43 */
44
45 #ifndef _MACHINE_PMAP_H_
46 #define _MACHINE_PMAP_H_
47
48 /*
49 * Page-directory and page-table entries follow this format, with a few
50 * of the fields not present here and there, depending on a lot of things.
51 */
52 /* ---- Intel Nomenclature ---- */
53 #define PG_V 0x001 /* P Valid */
54 #define PG_RW 0x002 /* R/W Read/Write */
55 #define PG_U 0x004 /* U/S User/Supervisor */
56 #define PG_NC_PWT 0x008 /* PWT Write through */
57 #define PG_NC_PCD 0x010 /* PCD Cache disable */
58 #define PG_A 0x020 /* A Accessed */
59 #define PG_M 0x040 /* D Dirty */
60 #define PG_PS 0x080 /* PS Page size (0=4k,1=2M) */
61 #define PG_PTE_PAT 0x080 /* PAT PAT index */
62 #define PG_G 0x100 /* G Global */
63 #define PG_AVAIL1 0x200 /* / Available for system */
64 #define PG_AVAIL2 0x400 /* < programmers use */
65 #define PG_AVAIL3 0x800 /* \ */
66 #define PG_PDE_PAT 0x1000 /* PAT PAT index */
67 #define PG_NX (1ul<<63) /* No-execute */
68
69
70 /* Our various interpretations of the above */
71 #define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
72 #define PG_MANAGED PG_AVAIL2
73 #define PG_FRAME (0x000ffffffffff000ul)
74 #define PG_PS_FRAME (0x000fffffffe00000ul)
75 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */
76 #define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
77
78 /* Page level cache control fields used to determine the PAT type */
79 #define PG_PDE_CACHE (PG_PDE_PAT | PG_NC_PWT | PG_NC_PCD)
80 #define PG_PTE_CACHE (PG_PTE_PAT | PG_NC_PWT | PG_NC_PCD)
81
82 /*
83 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
84 * (PTE) page mappings have identical settings for the following fields:
85 */
86 #define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \
87 PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V)
88
89 /*
90 * Page Protection Exception bits
91 */
92
93 #define PGEX_P 0x01 /* Protection violation vs. not present */
94 #define PGEX_W 0x02 /* during a Write cycle */
95 #define PGEX_U 0x04 /* access from User mode (UPL) */
96 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */
97 #define PGEX_I 0x10 /* during an instruction fetch */
98
99 /*
100 * Pte related macros. This is complicated by having to deal with
101 * the sign extension of the 48th bit.
102 */
103 #define KVADDR(l4, l3, l2, l1) ( \
104 ((unsigned long)-1 << 47) | \
105 ((unsigned long)(l4) << PML4SHIFT) | \
106 ((unsigned long)(l3) << PDPSHIFT) | \
107 ((unsigned long)(l2) << PDRSHIFT) | \
108 ((unsigned long)(l1) << PAGE_SHIFT))
109
110 #define UVADDR(l4, l3, l2, l1) ( \
111 ((unsigned long)(l4) << PML4SHIFT) | \
112 ((unsigned long)(l3) << PDPSHIFT) | \
113 ((unsigned long)(l2) << PDRSHIFT) | \
114 ((unsigned long)(l1) << PAGE_SHIFT))
115
116 /* Initial number of kernel page tables. */
117 #ifndef NKPT
118 #define NKPT 32
119 #endif
120
121 #define NKPML4E 1 /* number of kernel PML4 slots */
122 #define NKPDPE howmany(NKPT, NPDEPG)/* number of kernel PDP slots */
123
124 #define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */
125 #define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */
126 #define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */
127
128 #define NDMPML4E 1 /* number of dmap PML4 slots */
129
130 /*
131 * The *PDI values control the layout of virtual memory
132 */
133 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */
134
135 #define KPML4I (NPML4EPG-1) /* Top 512GB for KVM */
136 #define DMPML4I (KPML4I-1) /* Next 512GB down for direct map */
137
138 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
139
140 /*
141 * XXX doesn't really belong here I guess...
142 */
143 #define ISA_HOLE_START 0xa0000
144 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
145
146 #ifndef LOCORE
147
148 #include <sys/queue.h>
149 #include <sys/_lock.h>
150 #include <sys/_mutex.h>
151
152 typedef u_int64_t pd_entry_t;
153 typedef u_int64_t pt_entry_t;
154 typedef u_int64_t pdp_entry_t;
155 typedef u_int64_t pml4_entry_t;
156
157 #define PML4ESHIFT (3)
158 #define PDPESHIFT (3)
159 #define PTESHIFT (3)
160 #define PDESHIFT (3)
161
162 /*
163 * Address of current address space page table maps and directories.
164 */
165 #ifdef _KERNEL
166 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0))
167 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0))
168 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0))
169 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I))
170 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t)))
171 #define PTmap ((pt_entry_t *)(addr_PTmap))
172 #define PDmap ((pd_entry_t *)(addr_PDmap))
173 #define PDPmap ((pd_entry_t *)(addr_PDPmap))
174 #define PML4map ((pd_entry_t *)(addr_PML4map))
175 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e))
176
177 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */
178 #endif
179
180 #ifdef _KERNEL
181 /*
182 * virtual address to page table entry and
183 * to physical address.
184 * Note: these work recursively, thus vtopte of a pte will give
185 * the corresponding pde that in turn maps it.
186 */
187 pt_entry_t *vtopte(vm_offset_t);
188 #define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
189
190 static __inline pt_entry_t
191 pte_load(pt_entry_t *ptep)
192 {
193 pt_entry_t r;
194
195 r = *ptep;
196 return (r);
197 }
198
199 static __inline pt_entry_t
200 pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
201 {
202 pt_entry_t r;
203
204 __asm __volatile(
205 "xchgq %0,%1"
206 : "=m" (*ptep),
207 "=r" (r)
208 : "1" (pte),
209 "m" (*ptep));
210 return (r);
211 }
212
213 #define pte_load_clear(pte) atomic_readandclear_long(pte)
214
215 static __inline void
216 pte_store(pt_entry_t *ptep, pt_entry_t pte)
217 {
218
219 *ptep = pte;
220 }
221
222 #define pte_clear(ptep) pte_store((ptep), (pt_entry_t)0ULL)
223
224 #define pde_store(pdep, pde) pte_store((pdep), (pde))
225
226 extern pt_entry_t pg_nx;
227
228 #endif /* _KERNEL */
229
230 /*
231 * Pmap stuff
232 */
233 struct pv_entry;
234 struct pv_chunk;
235
236 struct md_page {
237 int pat_mode;
238 TAILQ_HEAD(,pv_entry) pv_list;
239 };
240
241 /*
242 * The kernel virtual address (KVA) of the level 4 page table page is always
243 * within the direct map (DMAP) region.
244 */
245 struct pmap {
246 struct mtx pm_mtx;
247 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */
248 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
249 u_int pm_active; /* active on cpus */
250 /* spare u_int here due to padding */
251 struct pmap_statistics pm_stats; /* pmap statistics */
252 vm_page_t pm_root; /* spare page table pages */
253 };
254
255 typedef struct pmap *pmap_t;
256
257 #ifdef _KERNEL
258 extern struct pmap kernel_pmap_store;
259 #define kernel_pmap (&kernel_pmap_store)
260
261 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
262 #define PMAP_LOCK_ASSERT(pmap, type) \
263 mtx_assert(&(pmap)->pm_mtx, (type))
264 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
265 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
266 NULL, MTX_DEF | MTX_DUPOK)
267 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
268 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
269 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
270 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
271 #endif
272
273 /*
274 * For each vm_page_t, there is a list of all currently valid virtual
275 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
276 */
277 typedef struct pv_entry {
278 vm_offset_t pv_va; /* virtual address for mapping */
279 TAILQ_ENTRY(pv_entry) pv_list;
280 } *pv_entry_t;
281
282 /*
283 * pv_entries are allocated in chunks per-process. This avoids the
284 * need to track per-pmap assignments.
285 */
286 #define _NPCM 3
287 #define _NPCPV 168
288 struct pv_chunk {
289 pmap_t pc_pmap;
290 TAILQ_ENTRY(pv_chunk) pc_list;
291 uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */
292 uint64_t pc_spare[2];
293 struct pv_entry pc_pventry[_NPCPV];
294 };
295
296 #ifdef _KERNEL
297
298 #define NPPROVMTRR 8
299 #define PPRO_VMTRRphysBase0 0x200
300 #define PPRO_VMTRRphysMask0 0x201
301 struct ppro_vmtrr {
302 u_int64_t base, mask;
303 };
304 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
305
306 extern caddr_t CADDR1;
307 extern pt_entry_t *CMAP1;
308 extern vm_paddr_t phys_avail[];
309 extern vm_paddr_t dump_avail[];
310 extern vm_offset_t virtual_avail;
311 extern vm_offset_t virtual_end;
312
313 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
314 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
315
316 void pmap_bootstrap(vm_paddr_t *);
317 int pmap_change_attr(vm_offset_t, vm_size_t, int);
318 void pmap_init_pat(void);
319 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
320 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
321 vm_paddr_t pmap_kextract(vm_offset_t);
322 void pmap_kremove(vm_offset_t);
323 void *pmap_mapbios(vm_paddr_t, vm_size_t);
324 void *pmap_mapdev(vm_paddr_t, vm_size_t);
325 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
326 boolean_t pmap_page_is_mapped(vm_page_t m);
327 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
328 void pmap_unmapdev(vm_offset_t, vm_size_t);
329 void pmap_invalidate_page(pmap_t, vm_offset_t);
330 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
331 void pmap_invalidate_all(pmap_t);
332 void pmap_invalidate_cache(void);
333
334 #endif /* _KERNEL */
335
336 #endif /* !LOCORE */
337
338 #endif /* !_MACHINE_PMAP_H_ */
Cache object: 6bb3fa46021b4f306c005a1bd768a16b
|