1 /*
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1991 Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department and William Jolitz of UUNET Technologies Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * Derived from hp300 version by Mike Hibler, this version by William
39 * Jolitz uses a recursive map [a pde points to the page directory] to
40 * map the page tables using the pagetables themselves. This is done to
41 * reduce the impact on kernel virtual memory for lots of sparse address
42 * space, and to reduce the cost of memory to each process.
43 *
44 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
45 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
46 * $FreeBSD: releng/5.2/sys/amd64/include/pmap.h 122849 2003-11-17 08:58:16Z peter $
47 */
48
49 #ifndef _MACHINE_PMAP_H_
50 #define _MACHINE_PMAP_H_
51
52 /*
53 * Page-directory and page-table entires follow this format, with a few
54 * of the fields not present here and there, depending on a lot of things.
55 */
56 /* ---- Intel Nomenclature ---- */
57 #define PG_V 0x001 /* P Valid */
58 #define PG_RW 0x002 /* R/W Read/Write */
59 #define PG_U 0x004 /* U/S User/Supervisor */
60 #define PG_NC_PWT 0x008 /* PWT Write through */
61 #define PG_NC_PCD 0x010 /* PCD Cache disable */
62 #define PG_A 0x020 /* A Accessed */
63 #define PG_M 0x040 /* D Dirty */
64 #define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */
65 #define PG_G 0x100 /* G Global */
66 #define PG_AVAIL1 0x200 /* / Available for system */
67 #define PG_AVAIL2 0x400 /* < programmers use */
68 #define PG_AVAIL3 0x800 /* \ */
69
70
71 /* Our various interpretations of the above */
72 #define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
73 #define PG_MANAGED PG_AVAIL2
74 #define PG_FRAME (~((vm_paddr_t)PAGE_MASK))
75 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */
76 #define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
77
78 /*
79 * Page Protection Exception bits
80 */
81
82 #define PGEX_P 0x01 /* Protection violation vs. not present */
83 #define PGEX_W 0x02 /* during a Write cycle */
84 #define PGEX_U 0x04 /* access from User mode (UPL) */
85
86 /*
87 * Pte related macros. This is complicated by having to deal with
88 * the sign extension of the 48th bit.
89 */
90 #define KVADDR(l4, l3, l2, l1) ( \
91 ((unsigned long)-1 << 47) | \
92 ((unsigned long)(l4) << PML4SHIFT) | \
93 ((unsigned long)(l3) << PDPSHIFT) | \
94 ((unsigned long)(l2) << PDRSHIFT) | \
95 ((unsigned long)(l1) << PAGE_SHIFT))
96
97 #define UVADDR(l4, l3, l2, l1) ( \
98 ((unsigned long)(l4) << PML4SHIFT) | \
99 ((unsigned long)(l3) << PDPSHIFT) | \
100 ((unsigned long)(l2) << PDRSHIFT) | \
101 ((unsigned long)(l1) << PAGE_SHIFT))
102
103 #ifndef NKPT
104 #define NKPT 120 /* initial number of kernel page tables */
105 #endif
106
107 #define NKPML4E 1 /* number of kernel PML4 slots */
108 #define NKPDPE 1 /* number of kernel PDP slots */
109 #define NKPDE (NKPDPE*NPDEPG) /* number of kernel PD slots */
110
111 #define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */
112 #define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */
113 #define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */
114
115 #define NDMPML4E 1 /* number of dmap PML4 slots */
116
117 /*
118 * The *PDI values control the layout of virtual memory
119 */
120 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */
121
122 #define KPML4I (NPML4EPG-1) /* Top 512GB for KVM */
123 #define DMPML4I (KPML4I-1) /* Next 512GB down for direct map */
124
125 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
126
127 /*
128 * XXX doesn't really belong here I guess...
129 */
130 #define ISA_HOLE_START 0xa0000
131 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
132
133 #ifndef LOCORE
134
135 #include <sys/queue.h>
136
137 typedef u_int64_t pd_entry_t;
138 typedef u_int64_t pt_entry_t;
139 typedef u_int64_t pdp_entry_t;
140 typedef u_int64_t pml4_entry_t;
141
142 #define PML4ESHIFT (3)
143 #define PDPESHIFT (3)
144 #define PTESHIFT (3)
145 #define PDESHIFT (3)
146
147 /*
148 * Address of current and alternate address space page table maps
149 * and directories.
150 * XXX it might be saner to just direct map all of physical memory
151 * into the kernel using 2MB pages. We have enough space to do
152 * it (2^47 bits of KVM, while current max physical addressability
153 * is 2^40 physical bits). Then we can get rid of the evil hole
154 * in the page tables and the evil overlapping.
155 */
156 #ifdef _KERNEL
157 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0))
158 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0))
159 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0))
160 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I))
161 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t)))
162 #define PTmap ((pt_entry_t *)(addr_PTmap))
163 #define PDmap ((pd_entry_t *)(addr_PDmap))
164 #define PDPmap ((pd_entry_t *)(addr_PDPmap))
165 #define PML4map ((pd_entry_t *)(addr_PML4map))
166 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e))
167
168 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */
169 #endif
170
171 #ifdef _KERNEL
172 /*
173 * virtual address to page table entry and
174 * to physical address. Likewise for alternate address space.
175 * Note: these work recursively, thus vtopte of a pte will give
176 * the corresponding pde that in turn maps it.
177 */
178 pt_entry_t *vtopte(vm_offset_t);
179 vm_paddr_t pmap_kextract(vm_offset_t);
180
181 #define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
182
183 static __inline pt_entry_t
184 pte_load(pt_entry_t *ptep)
185 {
186 pt_entry_t r;
187
188 r = *ptep;
189 return (r);
190 }
191
192 static __inline pt_entry_t
193 pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
194 {
195 pt_entry_t r;
196
197 r = *ptep;
198 *ptep = pte;
199 return (r);
200 }
201
202 #define pte_load_clear(pte) atomic_readandclear_long(pte)
203
204 #define pte_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL)
205 #define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte)
206
207 #define pde_store(pdep, pde) pte_store((pdep), (pde))
208
209 #endif /* _KERNEL */
210
211 /*
212 * Pmap stuff
213 */
214 struct pv_entry;
215
216 struct md_page {
217 int pv_list_count;
218 TAILQ_HEAD(,pv_entry) pv_list;
219 };
220
221 struct pmap {
222 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */
223 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
224 u_int pm_active; /* active on cpus */
225 /* spare u_int here due to padding */
226 struct pmap_statistics pm_stats; /* pmap statistics */
227 LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
228 };
229
230 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
231
232 typedef struct pmap *pmap_t;
233
234 #ifdef _KERNEL
235 extern struct pmap kernel_pmap_store;
236 #define kernel_pmap (&kernel_pmap_store)
237 #endif
238
239 /*
240 * For each vm_page_t, there is a list of all currently valid virtual
241 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
242 */
243 typedef struct pv_entry {
244 pmap_t pv_pmap; /* pmap where mapping lies */
245 vm_offset_t pv_va; /* virtual address for mapping */
246 TAILQ_ENTRY(pv_entry) pv_list;
247 TAILQ_ENTRY(pv_entry) pv_plist;
248 vm_page_t pv_ptem; /* VM page for pte */
249 } *pv_entry_t;
250
251 #ifdef _KERNEL
252
253 #define NPPROVMTRR 8
254 #define PPRO_VMTRRphysBase0 0x200
255 #define PPRO_VMTRRphysMask0 0x201
256 struct ppro_vmtrr {
257 u_int64_t base, mask;
258 };
259 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
260
261 extern caddr_t CADDR1;
262 extern pt_entry_t *CMAP1;
263 extern vm_paddr_t avail_end;
264 extern vm_paddr_t avail_start;
265 extern vm_offset_t clean_eva;
266 extern vm_offset_t clean_sva;
267 extern vm_paddr_t phys_avail[];
268 extern char *ptvmmap; /* poor name! */
269 extern vm_offset_t virtual_avail;
270 extern vm_offset_t virtual_end;
271
272 void pmap_bootstrap(vm_paddr_t *);
273 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
274 void pmap_kremove(vm_offset_t);
275 void *pmap_mapdev(vm_paddr_t, vm_size_t);
276 void pmap_unmapdev(vm_offset_t, vm_size_t);
277 pt_entry_t *pmap_pte_quick(pmap_t, vm_offset_t) __pure2;
278 void pmap_invalidate_page(pmap_t, vm_offset_t);
279 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
280 void pmap_invalidate_all(pmap_t);
281
282 #endif /* _KERNEL */
283
284 #endif /* !LOCORE */
285
286 #endif /* !_MACHINE_PMAP_H_ */
Cache object: aae6d07b642b0dcec08fc7f916966711
|