1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1991 Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department and William Jolitz of UUNET Technologies Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * Derived from hp300 version by Mike Hibler, this version by William
35 * Jolitz uses a recursive map [a pde points to the page directory] to
36 * map the page tables using the pagetables themselves. This is done to
37 * reduce the impact on kernel virtual memory for lots of sparse address
38 * space, and to reduce the cost of memory to each process.
39 *
40 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
41 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
42 * $FreeBSD$
43 */
44
45 #ifndef _MACHINE_PMAP_H_
46 #define _MACHINE_PMAP_H_
47
48 /*
49 * Page-directory and page-table entries follow this format, with a few
50 * of the fields not present here and there, depending on a lot of things.
51 */
52 /* ---- Intel Nomenclature ---- */
53 #define PG_V 0x001 /* P Valid */
54 #define PG_RW 0x002 /* R/W Read/Write */
55 #define PG_U 0x004 /* U/S User/Supervisor */
56 #define PG_NC_PWT 0x008 /* PWT Write through */
57 #define PG_NC_PCD 0x010 /* PCD Cache disable */
58 #define PG_A 0x020 /* A Accessed */
59 #define PG_M 0x040 /* D Dirty */
60 #define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */
61 #define PG_PTE_PAT 0x080 /* PAT PAT index */
62 #define PG_G 0x100 /* G Global */
63 #define PG_AVAIL1 0x200 /* / Available for system */
64 #define PG_AVAIL2 0x400 /* < programmers use */
65 #define PG_AVAIL3 0x800 /* \ */
66 #define PG_PDE_PAT 0x1000 /* PAT PAT index */
67 #define PG_NX (1ul<<63) /* No-execute */
68
69
70 /* Our various interpretations of the above */
71 #define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
72 #define PG_MANAGED PG_AVAIL2
73 #define PG_FRAME (0x000ffffffffff000ul)
74 #define PG_PS_FRAME (0x000fffffffe00000ul)
75 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */
76 #define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
77
78 /*
79 * Page Protection Exception bits
80 */
81
82 #define PGEX_P 0x01 /* Protection violation vs. not present */
83 #define PGEX_W 0x02 /* during a Write cycle */
84 #define PGEX_U 0x04 /* access from User mode (UPL) */
85 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */
86 #define PGEX_I 0x10 /* during an instruction fetch */
87
88 /*
89 * Pte related macros. This is complicated by having to deal with
90 * the sign extension of the 48th bit.
91 */
92 #define KVADDR(l4, l3, l2, l1) ( \
93 ((unsigned long)-1 << 47) | \
94 ((unsigned long)(l4) << PML4SHIFT) | \
95 ((unsigned long)(l3) << PDPSHIFT) | \
96 ((unsigned long)(l2) << PDRSHIFT) | \
97 ((unsigned long)(l1) << PAGE_SHIFT))
98
99 #define UVADDR(l4, l3, l2, l1) ( \
100 ((unsigned long)(l4) << PML4SHIFT) | \
101 ((unsigned long)(l3) << PDPSHIFT) | \
102 ((unsigned long)(l2) << PDRSHIFT) | \
103 ((unsigned long)(l1) << PAGE_SHIFT))
104
105 /* Initial number of kernel page tables. */
106 #ifndef NKPT
107 /* 240 page tables needed to map 16G (120B "struct vm_page", 2M page tables). */
108 #define NKPT 240
109 #endif
110
111 #define NKPML4E 1 /* number of kernel PML4 slots */
112 #define NKPDPE 1 /* number of kernel PDP slots */
113 #define NKPDE (NKPDPE*NPDEPG) /* number of kernel PD slots */
114
115 #define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */
116 #define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */
117 #define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */
118
119 #define NDMPML4E 1 /* number of dmap PML4 slots */
120
121 /*
122 * The *PDI values control the layout of virtual memory
123 */
124 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */
125
126 #define KPML4I (NPML4EPG-1) /* Top 512GB for KVM */
127 #define DMPML4I (KPML4I-1) /* Next 512GB down for direct map */
128
129 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
130
131 /*
132 * XXX doesn't really belong here I guess...
133 */
134 #define ISA_HOLE_START 0xa0000
135 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
136
137 #ifndef LOCORE
138
139 #include <sys/queue.h>
140 #include <sys/_lock.h>
141 #include <sys/_mutex.h>
142
143 typedef u_int64_t pd_entry_t;
144 typedef u_int64_t pt_entry_t;
145 typedef u_int64_t pdp_entry_t;
146 typedef u_int64_t pml4_entry_t;
147
148 #define PML4ESHIFT (3)
149 #define PDPESHIFT (3)
150 #define PTESHIFT (3)
151 #define PDESHIFT (3)
152
153 /*
154 * Address of current and alternate address space page table maps
155 * and directories.
156 * XXX it might be saner to just direct map all of physical memory
157 * into the kernel using 2MB pages. We have enough space to do
158 * it (2^47 bits of KVM, while current max physical addressability
159 * is 2^40 physical bits). Then we can get rid of the evil hole
160 * in the page tables and the evil overlapping.
161 */
162 #ifdef _KERNEL
163 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0))
164 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0))
165 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0))
166 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I))
167 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t)))
168 #define PTmap ((pt_entry_t *)(addr_PTmap))
169 #define PDmap ((pd_entry_t *)(addr_PDmap))
170 #define PDPmap ((pd_entry_t *)(addr_PDPmap))
171 #define PML4map ((pd_entry_t *)(addr_PML4map))
172 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e))
173
174 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */
175 #endif
176
177 #ifdef _KERNEL
178 /*
179 * virtual address to page table entry and
180 * to physical address.
181 * Note: these work recursively, thus vtopte of a pte will give
182 * the corresponding pde that in turn maps it.
183 */
184 pt_entry_t *vtopte(vm_offset_t);
185 #define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
186
187 static __inline pt_entry_t
188 pte_load(pt_entry_t *ptep)
189 {
190 pt_entry_t r;
191
192 r = *ptep;
193 return (r);
194 }
195
196 static __inline pt_entry_t
197 pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
198 {
199 pt_entry_t r;
200
201 __asm __volatile(
202 "xchgq %0,%1"
203 : "=m" (*ptep),
204 "=r" (r)
205 : "1" (pte),
206 "m" (*ptep));
207 return (r);
208 }
209
210 #define pte_load_clear(pte) atomic_readandclear_long(pte)
211
212 static __inline void
213 pte_store(pt_entry_t *ptep, pt_entry_t pte)
214 {
215
216 *ptep = pte;
217 }
218
219 #define pte_clear(ptep) pte_store((ptep), (pt_entry_t)0ULL)
220
221 #define pde_store(pdep, pde) pte_store((pdep), (pde))
222
223 extern pt_entry_t pg_nx;
224
225 #endif /* _KERNEL */
226
227 /*
228 * Pmap stuff
229 */
230 struct pv_entry;
231 struct pv_chunk;
232
233 struct md_page {
234 int pv_list_count;
235 TAILQ_HEAD(,pv_entry) pv_list;
236 };
237
238 struct pmap {
239 struct mtx pm_mtx;
240 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */
241 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
242 u_int pm_active; /* active on cpus */
243 /* spare u_int here due to padding */
244 struct pmap_statistics pm_stats; /* pmap statistics */
245 };
246
247 typedef struct pmap *pmap_t;
248
249 #ifdef _KERNEL
250 extern struct pmap kernel_pmap_store;
251 #define kernel_pmap (&kernel_pmap_store)
252
253 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
254 #define PMAP_LOCK_ASSERT(pmap, type) \
255 mtx_assert(&(pmap)->pm_mtx, (type))
256 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
257 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
258 NULL, MTX_DEF | MTX_DUPOK)
259 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
260 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
261 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
262 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
263 #endif
264
265 /*
266 * For each vm_page_t, there is a list of all currently valid virtual
267 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
268 */
269 typedef struct pv_entry {
270 vm_offset_t pv_va; /* virtual address for mapping */
271 TAILQ_ENTRY(pv_entry) pv_list;
272 } *pv_entry_t;
273
274 /*
275 * pv_entries are allocated in chunks per-process. This avoids the
276 * need to track per-pmap assignments.
277 */
278 #define _NPCM 3
279 #define _NPCPV 168
280 struct pv_chunk {
281 pmap_t pc_pmap;
282 TAILQ_ENTRY(pv_chunk) pc_list;
283 uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */
284 uint64_t pc_spare[2];
285 struct pv_entry pc_pventry[_NPCPV];
286 };
287
288 #ifdef _KERNEL
289
290 #define NPPROVMTRR 8
291 #define PPRO_VMTRRphysBase0 0x200
292 #define PPRO_VMTRRphysMask0 0x201
293 struct ppro_vmtrr {
294 u_int64_t base, mask;
295 };
296 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
297
298 extern caddr_t CADDR1;
299 extern pt_entry_t *CMAP1;
300 extern vm_paddr_t phys_avail[];
301 extern vm_paddr_t dump_avail[];
302 extern vm_offset_t virtual_avail;
303 extern vm_offset_t virtual_end;
304
305 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
306 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
307
308 void pmap_bootstrap(vm_paddr_t *);
309 int pmap_change_attr(vm_offset_t, vm_size_t, int);
310 void pmap_init_pat(void);
311 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
312 void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
313 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
314 vm_paddr_t pmap_kextract(vm_offset_t);
315 void pmap_kremove(vm_offset_t);
316 void *pmap_mapbios(vm_paddr_t, vm_size_t);
317 void *pmap_mapdev(vm_paddr_t, vm_size_t);
318 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
319 void pmap_unmapdev(vm_offset_t, vm_size_t);
320 void pmap_invalidate_page(pmap_t, vm_offset_t);
321 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
322 void pmap_invalidate_all(pmap_t);
323 void pmap_invalidate_cache(void);
324
325 #endif /* _KERNEL */
326
327 #endif /* !LOCORE */
328
329 #endif /* !_MACHINE_PMAP_H_ */
Cache object: d322c81f34c925134388737a1a50158c
|