1 /*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Derived from hp300 version by Mike Hibler, this version by William
34 * Jolitz uses a recursive map [a pde points to the page directory] to
35 * map the page tables using the pagetables themselves. This is done to
36 * reduce the impact on kernel virtual memory for lots of sparse address
37 * space, and to reduce the cost of memory to each process.
38 *
39 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
40 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
41 * $FreeBSD: releng/8.1/sys/i386/include/pmap.h 203182 2010-01-30 06:23:28Z alc $
42 */
43
44 #ifndef _MACHINE_PMAP_H_
45 #define _MACHINE_PMAP_H_
46
47 /*
48 * Page-directory and page-table entries follow this format, with a few
49 * of the fields not present here and there, depending on a lot of things.
50 */
51 /* ---- Intel Nomenclature ---- */
52 #define PG_V 0x001 /* P Valid */
53 #define PG_RW 0x002 /* R/W Read/Write */
54 #define PG_U 0x004 /* U/S User/Supervisor */
55 #define PG_NC_PWT 0x008 /* PWT Write through */
56 #define PG_NC_PCD 0x010 /* PCD Cache disable */
57 #define PG_A 0x020 /* A Accessed */
58 #define PG_M 0x040 /* D Dirty */
59 #define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */
60 #define PG_PTE_PAT 0x080 /* PAT PAT index */
61 #define PG_G 0x100 /* G Global */
62 #define PG_AVAIL1 0x200 /* / Available for system */
63 #define PG_AVAIL2 0x400 /* < programmers use */
64 #define PG_AVAIL3 0x800 /* \ */
65 #define PG_PDE_PAT 0x1000 /* PAT PAT index */
66 #ifdef PAE
67 #define PG_NX (1ull<<63) /* No-execute */
68 #endif
69
70
71 /* Our various interpretations of the above */
72 #define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
73 #define PG_MANAGED PG_AVAIL2
74 #ifdef PAE
75 #define PG_FRAME (0x000ffffffffff000ull)
76 #define PG_PS_FRAME (0x000fffffffe00000ull)
77 #else
78 #define PG_FRAME (~PAGE_MASK)
79 #define PG_PS_FRAME (0xffc00000)
80 #endif
81 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */
82 #define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
83
84 /* Page level cache control fields used to determine the PAT type */
85 #define PG_PDE_CACHE (PG_PDE_PAT | PG_NC_PWT | PG_NC_PCD)
86 #define PG_PTE_CACHE (PG_PTE_PAT | PG_NC_PWT | PG_NC_PCD)
87
88 /*
89 * Promotion to a 2 or 4MB (PDE) page mapping requires that the corresponding
90 * 4KB (PTE) page mappings have identical settings for the following fields:
91 */
92 #define PG_PTE_PROMOTE (PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \
93 PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V)
94
95 /*
96 * Page Protection Exception bits
97 */
98
99 #define PGEX_P 0x01 /* Protection violation vs. not present */
100 #define PGEX_W 0x02 /* during a Write cycle */
101 #define PGEX_U 0x04 /* access from User mode (UPL) */
102 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */
103 #define PGEX_I 0x10 /* during an instruction fetch */
104
105 /*
106 * Size of Kernel address space. This is the number of page table pages
107 * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte.
108 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
109 * For PAE, the page table page unit size is 2MB. This means that 512 pages
110 * is 1 Gigabyte. Double everything. It must be a multiple of 8 for PAE.
111 */
112 #ifndef KVA_PAGES
113 #ifdef PAE
114 #define KVA_PAGES 512
115 #else
116 #define KVA_PAGES 256
117 #endif
118 #endif
119
120 /*
121 * Pte related macros
122 */
123 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
124
125 /* Initial number of kernel page tables. */
126 #ifndef NKPT
127 #ifdef PAE
128 /* 152 page tables needed to map 16G (76B "struct vm_page", 2M page tables). */
129 #define NKPT 240
130 #else
131 /* 18 page tables needed to map 4G (72B "struct vm_page", 4M page tables). */
132 #define NKPT 30
133 #endif
134 #endif
135
136 #ifndef NKPDE
137 #define NKPDE (KVA_PAGES) /* number of page tables/pde's */
138 #endif
139
140 /*
141 * The *PTDI values control the layout of virtual memory
142 *
143 * XXX This works for now, but I am not real happy with it, I'll fix it
144 * right after I fix locore.s and the magic 28K hole
145 */
146 #define KPTDI (NPDEPTD-NKPDE) /* start of kernel virtual pde's */
147 #define PTDPTDI (KPTDI-NPGPTD) /* ptd entry that points to ptd! */
148
149 /*
150 * XXX doesn't really belong here I guess...
151 */
152 #define ISA_HOLE_START 0xa0000
153 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
154
155 #ifndef LOCORE
156
157 #include <sys/queue.h>
158 #include <sys/_lock.h>
159 #include <sys/_mutex.h>
160
161 #ifdef PAE
162
163 typedef uint64_t pdpt_entry_t;
164 typedef uint64_t pd_entry_t;
165 typedef uint64_t pt_entry_t;
166
167 #define PTESHIFT (3)
168 #define PDESHIFT (3)
169
170 #else
171
172 typedef uint32_t pd_entry_t;
173 typedef uint32_t pt_entry_t;
174
175 #define PTESHIFT (2)
176 #define PDESHIFT (2)
177
178 #endif
179
180 /*
181 * Address of current address space page table maps and directories.
182 */
183 #ifdef _KERNEL
184 extern pt_entry_t PTmap[];
185 extern pd_entry_t PTD[];
186 extern pd_entry_t PTDpde[];
187
188 #ifdef PAE
189 extern pdpt_entry_t *IdlePDPT;
190 #endif
191 extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */
192
193 /*
194 * virtual address to page table entry and
195 * to physical address.
196 * Note: these work recursively, thus vtopte of a pte will give
197 * the corresponding pde that in turn maps it.
198 */
199 #define vtopte(va) (PTmap + i386_btop(va))
200 #define vtophys(va) pmap_kextract((vm_offset_t)(va))
201
202 #ifdef XEN
203 #include <sys/param.h>
204 #include <machine/xen/xen-os.h>
205 #include <machine/xen/xenvar.h>
206 #include <machine/xen/xenpmap.h>
207
208 extern pt_entry_t pg_nx;
209
210 #define PG_KERNEL (PG_V | PG_A | PG_RW | PG_M)
211
212 #define MACH_TO_VM_PAGE(ma) PHYS_TO_VM_PAGE(xpmap_mtop((ma)))
213 #define VM_PAGE_TO_MACH(m) xpmap_ptom(VM_PAGE_TO_PHYS((m)))
214
215 static __inline vm_paddr_t
216 pmap_kextract_ma(vm_offset_t va)
217 {
218 vm_paddr_t ma;
219 if ((ma = PTD[va >> PDRSHIFT]) & PG_PS) {
220 ma = (ma & ~(NBPDR - 1)) | (va & (NBPDR - 1));
221 } else {
222 ma = (*vtopte(va) & PG_FRAME) | (va & PAGE_MASK);
223 }
224 return ma;
225 }
226
227 static __inline vm_paddr_t
228 pmap_kextract(vm_offset_t va)
229 {
230 return xpmap_mtop(pmap_kextract_ma(va));
231 }
232 #define vtomach(va) pmap_kextract_ma(((vm_offset_t) (va)))
233
234 vm_paddr_t pmap_extract_ma(struct pmap *pmap, vm_offset_t va);
235
236 void pmap_kenter_ma(vm_offset_t va, vm_paddr_t pa);
237 void pmap_map_readonly(struct pmap *pmap, vm_offset_t va, int len);
238 void pmap_map_readwrite(struct pmap *pmap, vm_offset_t va, int len);
239
240 static __inline pt_entry_t
241 pte_load_store(pt_entry_t *ptep, pt_entry_t v)
242 {
243 pt_entry_t r;
244
245 v = xpmap_ptom(v);
246 r = *ptep;
247 PT_SET_VA(ptep, v, TRUE);
248 return (r);
249 }
250
251 static __inline pt_entry_t
252 pte_load_store_ma(pt_entry_t *ptep, pt_entry_t v)
253 {
254 pt_entry_t r;
255
256 r = *ptep;
257 PT_SET_VA_MA(ptep, v, TRUE);
258 return (r);
259 }
260
261 #define pte_load_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL)
262
263 #define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte)
264 #define pte_store_ma(ptep, pte) pte_load_store_ma((ptep), (pt_entry_t)pte)
265 #define pde_store_ma(ptep, pte) pte_load_store_ma((ptep), (pt_entry_t)pte)
266
267 #elif !defined(XEN)
268
269 /*
270 * KPTmap is a linear mapping of the kernel page table. It differs from the
271 * recursive mapping in two ways: (1) it only provides access to kernel page
272 * table pages, and not user page table pages, and (2) it provides access to
273 * a kernel page table page after the corresponding virtual addresses have
274 * been promoted to a 2/4MB page mapping.
275 */
276 extern pt_entry_t *KPTmap;
277
278 /*
279 * Routine: pmap_kextract
280 * Function:
281 * Extract the physical page address associated
282 * kernel virtual address.
283 */
284 static __inline vm_paddr_t
285 pmap_kextract(vm_offset_t va)
286 {
287 vm_paddr_t pa;
288
289 if ((pa = PTD[va >> PDRSHIFT]) & PG_PS) {
290 pa = (pa & PG_PS_FRAME) | (va & PDRMASK);
291 } else {
292 /*
293 * Beware of a concurrent promotion that changes the PDE at
294 * this point! For example, vtopte() must not be used to
295 * access the PTE because it would use the new PDE. It is,
296 * however, safe to use the old PDE because the page table
297 * page is preserved by the promotion.
298 */
299 pa = KPTmap[i386_btop(va)];
300 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
301 }
302 return (pa);
303 }
304
305 #define PT_UPDATES_FLUSH()
306 #endif
307
308 #if defined(PAE) && !defined(XEN)
309
310 #define pde_cmpset(pdep, old, new) \
311 atomic_cmpset_64((pdep), (old), (new))
312
313 static __inline pt_entry_t
314 pte_load(pt_entry_t *ptep)
315 {
316 pt_entry_t r;
317
318 __asm __volatile(
319 "lock; cmpxchg8b %1"
320 : "=A" (r)
321 : "m" (*ptep), "a" (0), "d" (0), "b" (0), "c" (0));
322 return (r);
323 }
324
325 static __inline pt_entry_t
326 pte_load_store(pt_entry_t *ptep, pt_entry_t v)
327 {
328 pt_entry_t r;
329
330 r = *ptep;
331 __asm __volatile(
332 "1:\n"
333 "\tlock; cmpxchg8b %1\n"
334 "\tjnz 1b"
335 : "+A" (r)
336 : "m" (*ptep), "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)));
337 return (r);
338 }
339
340 /* XXXRU move to atomic.h? */
341 static __inline int
342 atomic_cmpset_64(volatile uint64_t *dst, uint64_t exp, uint64_t src)
343 {
344 int64_t res = exp;
345
346 __asm __volatile (
347 " lock ; "
348 " cmpxchg8b %2 ; "
349 " setz %%al ; "
350 " movzbl %%al,%0 ; "
351 "# atomic_cmpset_64"
352 : "+A" (res), /* 0 (result) */
353 "=m" (*dst) /* 1 */
354 : "m" (*dst), /* 2 */
355 "b" ((uint32_t)src),
356 "c" ((uint32_t)(src >> 32)));
357
358 return (res);
359 }
360
361 #define pte_load_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL)
362
363 #define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte)
364
365 extern pt_entry_t pg_nx;
366
367 #elif !defined(PAE) && !defined (XEN)
368
369 #define pde_cmpset(pdep, old, new) \
370 atomic_cmpset_int((pdep), (old), (new))
371
372 static __inline pt_entry_t
373 pte_load(pt_entry_t *ptep)
374 {
375 pt_entry_t r;
376
377 r = *ptep;
378 return (r);
379 }
380
381 static __inline pt_entry_t
382 pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
383 {
384 __asm volatile("xchgl %0, %1" : "+m" (*ptep), "+r" (pte));
385 return (pte);
386 }
387
388 #define pte_load_clear(pte) atomic_readandclear_int(pte)
389
390 static __inline void
391 pte_store(pt_entry_t *ptep, pt_entry_t pte)
392 {
393
394 *ptep = pte;
395 }
396
397 #endif /* PAE */
398
399 #define pte_clear(ptep) pte_store((ptep), (pt_entry_t)0ULL)
400
401 #define pde_store(pdep, pde) pte_store((pdep), (pde))
402
403 #endif /* _KERNEL */
404
405 /*
406 * Pmap stuff
407 */
408 struct pv_entry;
409 struct pv_chunk;
410
411 struct md_page {
412 TAILQ_HEAD(,pv_entry) pv_list;
413 int pat_mode;
414 };
415
416 struct pmap {
417 struct mtx pm_mtx;
418 pd_entry_t *pm_pdir; /* KVA of page directory */
419 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
420 u_int pm_active; /* active on cpus */
421 struct pmap_statistics pm_stats; /* pmap statistics */
422 LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
423 #ifdef PAE
424 pdpt_entry_t *pm_pdpt; /* KVA of page director pointer
425 table */
426 #endif
427 vm_page_t pm_root; /* spare page table pages */
428 };
429
430 typedef struct pmap *pmap_t;
431
432 #ifdef _KERNEL
433 extern struct pmap kernel_pmap_store;
434 #define kernel_pmap (&kernel_pmap_store)
435
436 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
437 #define PMAP_LOCK_ASSERT(pmap, type) \
438 mtx_assert(&(pmap)->pm_mtx, (type))
439 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
440 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
441 NULL, MTX_DEF | MTX_DUPOK)
442 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
443 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
444 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
445 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
446 #endif
447
448 /*
449 * For each vm_page_t, there is a list of all currently valid virtual
450 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
451 */
452 typedef struct pv_entry {
453 vm_offset_t pv_va; /* virtual address for mapping */
454 TAILQ_ENTRY(pv_entry) pv_list;
455 } *pv_entry_t;
456
457 /*
458 * pv_entries are allocated in chunks per-process. This avoids the
459 * need to track per-pmap assignments.
460 */
461 #define _NPCM 11
462 #define _NPCPV 336
463 struct pv_chunk {
464 pmap_t pc_pmap;
465 TAILQ_ENTRY(pv_chunk) pc_list;
466 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */
467 uint32_t pc_spare[2];
468 struct pv_entry pc_pventry[_NPCPV];
469 };
470
471 #ifdef _KERNEL
472
473 extern caddr_t CADDR1;
474 extern pt_entry_t *CMAP1;
475 extern vm_paddr_t phys_avail[];
476 extern vm_paddr_t dump_avail[];
477 extern int pseflag;
478 extern int pgeflag;
479 extern char *ptvmmap; /* poor name! */
480 extern vm_offset_t virtual_avail;
481 extern vm_offset_t virtual_end;
482
483 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
484 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
485
486 void pmap_bootstrap(vm_paddr_t);
487 int pmap_cache_bits(int mode, boolean_t is_pde);
488 int pmap_change_attr(vm_offset_t, vm_size_t, int);
489 void pmap_init_pat(void);
490 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
491 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
492 void pmap_kremove(vm_offset_t);
493 void *pmap_mapbios(vm_paddr_t, vm_size_t);
494 void *pmap_mapdev(vm_paddr_t, vm_size_t);
495 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
496 boolean_t pmap_page_is_mapped(vm_page_t m);
497 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
498 void pmap_unmapdev(vm_offset_t, vm_size_t);
499 pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
500 void pmap_set_pg(void);
501 void pmap_invalidate_page(pmap_t, vm_offset_t);
502 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
503 void pmap_invalidate_all(pmap_t);
504 void pmap_invalidate_cache(void);
505 void pmap_invalidate_cache_range(vm_offset_t, vm_offset_t);
506
507 #endif /* _KERNEL */
508
509 #endif /* !LOCORE */
510
511 #endif /* !_MACHINE_PMAP_H_ */
Cache object: 9d4b1e1b507d221c7bbcc0e45b8f92b6
|