1 /*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Derived from hp300 version by Mike Hibler, this version by William
34 * Jolitz uses a recursive map [a pde points to the page directory] to
35 * map the page tables using the pagetables themselves. This is done to
36 * reduce the impact on kernel virtual memory for lots of sparse address
37 * space, and to reduce the cost of memory to each process.
38 *
39 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
40 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
41 * $FreeBSD$
42 */
43
44 #ifndef _MACHINE_PMAP_H_
45 #define _MACHINE_PMAP_H_
46
47 /*
48 * Page-directory and page-table entries follow this format, with a few
49 * of the fields not present here and there, depending on a lot of things.
50 */
51 /* ---- Intel Nomenclature ---- */
52 #define PG_V 0x001 /* P Valid */
53 #define PG_RW 0x002 /* R/W Read/Write */
54 #define PG_U 0x004 /* U/S User/Supervisor */
55 #define PG_NC_PWT 0x008 /* PWT Write through */
56 #define PG_NC_PCD 0x010 /* PCD Cache disable */
57 #define PG_A 0x020 /* A Accessed */
58 #define PG_M 0x040 /* D Dirty */
59 #define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */
60 #define PG_PTE_PAT 0x080 /* PAT PAT index */
61 #define PG_G 0x100 /* G Global */
62 #define PG_AVAIL1 0x200 /* / Available for system */
63 #define PG_AVAIL2 0x400 /* < programmers use */
64 #define PG_AVAIL3 0x800 /* \ */
65 #define PG_PDE_PAT 0x1000 /* PAT PAT index */
66 #ifdef PAE
67 #define PG_NX (1ull<<63) /* No-execute */
68 #endif
69
70
71 /* Our various interpretations of the above */
72 #define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
73 #define PG_MANAGED PG_AVAIL2
74 #ifdef PAE
75 #define PG_FRAME (0x000ffffffffff000ull)
76 #define PG_PS_FRAME (0x000fffffffe00000ull)
77 #else
78 #define PG_FRAME (~PAGE_MASK)
79 #define PG_PS_FRAME (0xffc00000)
80 #endif
81 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */
82 #define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
83
84 /*
85 * Page Protection Exception bits
86 */
87
88 #define PGEX_P 0x01 /* Protection violation vs. not present */
89 #define PGEX_W 0x02 /* during a Write cycle */
90 #define PGEX_U 0x04 /* access from User mode (UPL) */
91 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */
92 #define PGEX_I 0x10 /* during an instruction fetch */
93
94 /*
95 * Size of Kernel address space. This is the number of page table pages
96 * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte.
97 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
98 * For PAE, the page table page unit size is 2MB. This means that 512 pages
99 * is 1 Gigabyte. Double everything. It must be a multiple of 8 for PAE.
100 */
101 #ifndef KVA_PAGES
102 #ifdef PAE
103 #define KVA_PAGES 512
104 #else
105 #define KVA_PAGES 256
106 #endif
107 #endif
108
109 /*
110 * Pte related macros
111 */
112 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
113
114 /* Initial number of kernel page tables. */
115 #ifndef NKPT
116 #ifdef PAE
117 /* 152 page tables needed to map 16G (76B "struct vm_page", 2M page tables). */
118 #define NKPT 240
119 #else
120 /* 18 page tables needed to map 4G (72B "struct vm_page", 4M page tables). */
121 #define NKPT 30
122 #endif
123 #endif
124
125 #ifndef NKPDE
126 #define NKPDE (KVA_PAGES) /* number of page tables/pde's */
127 #endif
128
129 /*
130 * The *PTDI values control the layout of virtual memory
131 *
132 * XXX This works for now, but I am not real happy with it, I'll fix it
133 * right after I fix locore.s and the magic 28K hole
134 */
135 #define KPTDI (NPDEPTD-NKPDE) /* start of kernel virtual pde's */
136 #define PTDPTDI (KPTDI-NPGPTD) /* ptd entry that points to ptd! */
137
138 /*
139 * XXX doesn't really belong here I guess...
140 */
141 #define ISA_HOLE_START 0xa0000
142 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
143
144 #ifndef LOCORE
145
146 #include <sys/queue.h>
147 #include <sys/_lock.h>
148 #include <sys/_mutex.h>
149
150 #ifdef PAE
151
152 typedef uint64_t pdpt_entry_t;
153 typedef uint64_t pd_entry_t;
154 typedef uint64_t pt_entry_t;
155
156 #define PTESHIFT (3)
157 #define PDESHIFT (3)
158
159 #else
160
161 typedef uint32_t pd_entry_t;
162 typedef uint32_t pt_entry_t;
163
164 #define PTESHIFT (2)
165 #define PDESHIFT (2)
166
167 #endif
168
169 /*
170 * Address of current and alternate address space page table maps
171 * and directories.
172 */
173 #ifdef _KERNEL
174 extern pt_entry_t PTmap[];
175 extern pd_entry_t PTD[];
176 extern pd_entry_t PTDpde[];
177
178 #ifdef PAE
179 extern pdpt_entry_t *IdlePDPT;
180 #endif
181 extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */
182 #endif
183
184 #ifdef _KERNEL
185 /*
186 * virtual address to page table entry and
187 * to physical address.
188 * Note: these work recursively, thus vtopte of a pte will give
189 * the corresponding pde that in turn maps it.
190 */
191 #define vtopte(va) (PTmap + i386_btop(va))
192 #define vtophys(va) pmap_kextract((vm_offset_t)(va))
193
194 /*
195 * Routine: pmap_kextract
196 * Function:
197 * Extract the physical page address associated
198 * kernel virtual address.
199 */
200 static __inline vm_paddr_t
201 pmap_kextract(vm_offset_t va)
202 {
203 vm_paddr_t pa;
204
205 if ((pa = PTD[va >> PDRSHIFT]) & PG_PS) {
206 pa = (pa & PG_PS_FRAME) | (va & PDRMASK);
207 } else {
208 pa = *vtopte(va);
209 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
210 }
211 return pa;
212 }
213
214 #ifdef PAE
215
216 static __inline pt_entry_t
217 pte_load(pt_entry_t *ptep)
218 {
219 pt_entry_t r;
220
221 __asm __volatile(
222 "lock; cmpxchg8b %1"
223 : "=A" (r)
224 : "m" (*ptep), "a" (0), "d" (0), "b" (0), "c" (0));
225 return (r);
226 }
227
228 static __inline pt_entry_t
229 pte_load_store(pt_entry_t *ptep, pt_entry_t v)
230 {
231 pt_entry_t r;
232
233 r = *ptep;
234 __asm __volatile(
235 "1:\n"
236 "\tlock; cmpxchg8b %1\n"
237 "\tjnz 1b"
238 : "+A" (r)
239 : "m" (*ptep), "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)));
240 return (r);
241 }
242
243 /* XXXRU move to atomic.h? */
244 static __inline int
245 atomic_cmpset_64(volatile uint64_t *dst, uint64_t exp, uint64_t src)
246 {
247 int64_t res = exp;
248
249 __asm __volatile (
250 " lock ; "
251 " cmpxchg8b %2 ; "
252 " setz %%al ; "
253 " movzbl %%al,%0 ; "
254 "# atomic_cmpset_64"
255 : "+A" (res), /* 0 (result) */
256 "=m" (*dst) /* 1 */
257 : "m" (*dst), /* 2 */
258 "b" ((uint32_t)src),
259 "c" ((uint32_t)(src >> 32)));
260
261 return (res);
262 }
263
264 #define pte_load_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL)
265
266 #define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte)
267
268 extern pt_entry_t pg_nx;
269
270 #else /* PAE */
271
272 static __inline pt_entry_t
273 pte_load(pt_entry_t *ptep)
274 {
275 pt_entry_t r;
276
277 r = *ptep;
278 return (r);
279 }
280
281 static __inline pt_entry_t
282 pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
283 {
284 pt_entry_t r;
285
286 __asm __volatile(
287 "xchgl %0,%1"
288 : "=m" (*ptep),
289 "=r" (r)
290 : "1" (pte),
291 "m" (*ptep));
292 return (r);
293 }
294
295 #define pte_load_clear(pte) atomic_readandclear_int(pte)
296
297 static __inline void
298 pte_store(pt_entry_t *ptep, pt_entry_t pte)
299 {
300
301 *ptep = pte;
302 }
303
304 #endif /* PAE */
305
306 #define pte_clear(ptep) pte_store((ptep), (pt_entry_t)0ULL)
307
308 #define pde_store(pdep, pde) pte_store((pdep), (pde))
309
310 #endif /* _KERNEL */
311
312 /*
313 * Pmap stuff
314 */
315 struct pv_entry;
316 struct pv_chunk;
317
318 struct md_page {
319 int pv_list_count;
320 TAILQ_HEAD(,pv_entry) pv_list;
321 };
322
323 struct pmap {
324 struct mtx pm_mtx;
325 pd_entry_t *pm_pdir; /* KVA of page directory */
326 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
327 u_int pm_active; /* active on cpus */
328 struct pmap_statistics pm_stats; /* pmap statistics */
329 LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
330 #ifdef PAE
331 pdpt_entry_t *pm_pdpt; /* KVA of page director pointer
332 table */
333 #endif
334 };
335
336 typedef struct pmap *pmap_t;
337
338 #ifdef _KERNEL
339 extern struct pmap kernel_pmap_store;
340 #define kernel_pmap (&kernel_pmap_store)
341
342 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
343 #define PMAP_LOCK_ASSERT(pmap, type) \
344 mtx_assert(&(pmap)->pm_mtx, (type))
345 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
346 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
347 NULL, MTX_DEF | MTX_DUPOK)
348 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
349 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
350 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
351 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
352 #endif
353
354 /*
355 * For each vm_page_t, there is a list of all currently valid virtual
356 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
357 */
358 typedef struct pv_entry {
359 vm_offset_t pv_va; /* virtual address for mapping */
360 TAILQ_ENTRY(pv_entry) pv_list;
361 } *pv_entry_t;
362
363 /*
364 * pv_entries are allocated in chunks per-process. This avoids the
365 * need to track per-pmap assignments.
366 */
367 #define _NPCM 11
368 #define _NPCPV 336
369 struct pv_chunk {
370 pmap_t pc_pmap;
371 TAILQ_ENTRY(pv_chunk) pc_list;
372 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */
373 uint32_t pc_spare[2];
374 struct pv_entry pc_pventry[_NPCPV];
375 };
376
377 #ifdef _KERNEL
378
379 #define NPPROVMTRR 8
380 #define PPRO_VMTRRphysBase0 0x200
381 #define PPRO_VMTRRphysMask0 0x201
382 struct ppro_vmtrr {
383 u_int64_t base, mask;
384 };
385 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
386
387 extern caddr_t CADDR1;
388 extern pt_entry_t *CMAP1;
389 extern vm_paddr_t phys_avail[];
390 extern vm_paddr_t dump_avail[];
391 extern int pseflag;
392 extern int pgeflag;
393 extern char *ptvmmap; /* poor name! */
394 extern vm_offset_t virtual_avail;
395 extern vm_offset_t virtual_end;
396
397 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
398 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
399
400 void pmap_bootstrap(vm_paddr_t);
401 int pmap_change_attr(vm_offset_t, vm_size_t, int);
402 void pmap_init_pat(void);
403 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
404 void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
405 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
406 void pmap_kremove(vm_offset_t);
407 void *pmap_mapbios(vm_paddr_t, vm_size_t);
408 void *pmap_mapdev(vm_paddr_t, vm_size_t);
409 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
410 void pmap_unmapdev(vm_offset_t, vm_size_t);
411 pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
412 void pmap_set_pg(void);
413 void pmap_invalidate_page(pmap_t, vm_offset_t);
414 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
415 void pmap_invalidate_all(pmap_t);
416 void pmap_invalidate_cache(void);
417
418 #endif /* _KERNEL */
419
420 #endif /* !LOCORE */
421
422 #endif /* !_MACHINE_PMAP_H_ */
Cache object: caafe4dd186288ef0f02896a0a5dd97b
|