1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department and William Jolitz of UUNET Technologies Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * Derived from hp300 version by Mike Hibler, this version by William
36 * Jolitz uses a recursive map [a pde points to the page directory] to
37 * map the page tables using the pagetables themselves. This is done to
38 * reduce the impact on kernel virtual memory for lots of sparse address
39 * space, and to reduce the cost of memory to each process.
40 *
41 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
42 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
43 * $FreeBSD$
44 */
45
46 #ifndef _MACHINE_PMAP_H_
47 #define _MACHINE_PMAP_H_
48
49 /*
50 * Page-directory and page-table entries follow this format, with a few
51 * of the fields not present here and there, depending on a lot of things.
52 */
53 /* ---- Intel Nomenclature ---- */
54 #define PG_V 0x001 /* P Valid */
55 #define PG_RW 0x002 /* R/W Read/Write */
56 #define PG_U 0x004 /* U/S User/Supervisor */
57 #define PG_NC_PWT 0x008 /* PWT Write through */
58 #define PG_NC_PCD 0x010 /* PCD Cache disable */
59 #define PG_A 0x020 /* A Accessed */
60 #define PG_M 0x040 /* D Dirty */
61 #define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */
62 #define PG_PTE_PAT 0x080 /* PAT PAT index */
63 #define PG_G 0x100 /* G Global */
64 #define PG_AVAIL1 0x200 /* / Available for system */
65 #define PG_AVAIL2 0x400 /* < programmers use */
66 #define PG_AVAIL3 0x800 /* \ */
67 #define PG_PDE_PAT 0x1000 /* PAT PAT index */
68 #if defined(PAE) || defined(PAE_TABLES)
69 #define PG_NX (1ull<<63) /* No-execute */
70 #endif
71
72
73 /* Our various interpretations of the above */
74 #define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
75 #define PG_MANAGED PG_AVAIL2
76 #define PG_PROMOTED PG_AVAIL3 /* PDE only */
77 #if defined(PAE) || defined(PAE_TABLES)
78 #define PG_FRAME (0x000ffffffffff000ull)
79 #define PG_PS_FRAME (0x000fffffffe00000ull)
80 #else
81 #define PG_FRAME (~PAGE_MASK)
82 #define PG_PS_FRAME (0xffc00000)
83 #endif
84 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */
85 #define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
86
87 /* Page level cache control fields used to determine the PAT type */
88 #define PG_PDE_CACHE (PG_PDE_PAT | PG_NC_PWT | PG_NC_PCD)
89 #define PG_PTE_CACHE (PG_PTE_PAT | PG_NC_PWT | PG_NC_PCD)
90
91 /*
92 * Promotion to a 2 or 4MB (PDE) page mapping requires that the corresponding
93 * 4KB (PTE) page mappings have identical settings for the following fields:
94 */
95 #define PG_PTE_PROMOTE (PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \
96 PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V)
97
98 /*
99 * Page Protection Exception bits
100 */
101
102 #define PGEX_P 0x01 /* Protection violation vs. not present */
103 #define PGEX_W 0x02 /* during a Write cycle */
104 #define PGEX_U 0x04 /* access from User mode (UPL) */
105 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */
106 #define PGEX_I 0x10 /* during an instruction fetch */
107
108 /*
109 * Size of Kernel address space. This is the number of page table pages
110 * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte.
111 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
112 * For PAE, the page table page unit size is 2MB. This means that 512 pages
113 * is 1 Gigabyte. Double everything. It must be a multiple of 8 for PAE.
114 */
115 #if defined(PAE) || defined(PAE_TABLES)
116 #define KVA_PAGES (512*4)
117 #else
118 #define KVA_PAGES (256*4)
119 #endif
120
121 /*
122 * Pte related macros
123 */
124 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
125
126 /*
127 * The initial number of kernel page table pages that are constructed
128 * by pmap_cold() must be sufficient to map vm_page_array[]. That number can
129 * be calculated as follows:
130 * max_phys / PAGE_SIZE * sizeof(struct vm_page) / NBPDR
131 * PAE: max_phys 16G, sizeof(vm_page) 76, NBPDR 2M, 152 page table pages.
132 * PAE_TABLES: max_phys 4G, sizeof(vm_page) 68, NBPDR 2M, 36 page table pages.
133 * Non-PAE: max_phys 4G, sizeof(vm_page) 68, NBPDR 4M, 18 page table pages.
134 */
135 #ifndef NKPT
136 #if defined(PAE)
137 #define NKPT 240
138 #elif defined(PAE_TABLES)
139 #define NKPT 60
140 #else
141 #define NKPT 30
142 #endif
143 #endif
144
145 #ifndef NKPDE
146 #define NKPDE (KVA_PAGES) /* number of page tables/pde's */
147 #endif
148
149 /*
150 * The *PTDI values control the layout of virtual memory
151 */
152 #define KPTDI 0 /* start of kernel virtual pde's */
153 #define LOWPTDI 1 /* low memory map pde */
154 #define KERNPTDI 2 /* start of kernel text pde */
155 #define PTDPTDI (NPDEPTD - 1 - NPGPTD) /* ptd entry that points
156 to ptd! */
157 #define TRPTDI (NPDEPTD - 1) /* u/k trampoline ptd */
158
159 /*
160 * XXX doesn't really belong here I guess...
161 */
162 #define ISA_HOLE_START 0xa0000
163 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
164
165 #ifndef LOCORE
166
167 #include <sys/queue.h>
168 #include <sys/_cpuset.h>
169 #include <sys/_lock.h>
170 #include <sys/_mutex.h>
171
172 #include <vm/_vm_radix.h>
173
174 #if defined(PAE) || defined(PAE_TABLES)
175
176 typedef uint64_t pdpt_entry_t;
177 typedef uint64_t pd_entry_t;
178 typedef uint64_t pt_entry_t;
179
180 #define PTESHIFT (3)
181 #define PDESHIFT (3)
182
183 #else
184
185 typedef uint32_t pd_entry_t;
186 typedef uint32_t pt_entry_t;
187
188 #define PTESHIFT (2)
189 #define PDESHIFT (2)
190
191 #endif
192
193 /*
194 * Address of current address space page table maps and directories.
195 */
196 #ifdef _KERNEL
197 #include <machine/atomic.h>
198
199 extern pt_entry_t PTmap[];
200 extern pd_entry_t PTD[];
201 extern pd_entry_t PTDpde[];
202
203 #if defined(PAE) || defined(PAE_TABLES)
204 extern pdpt_entry_t *IdlePDPT;
205 #endif
206 extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */
207
208 /*
209 * Translate a virtual address to the kernel virtual address of its page table
210 * entry (PTE). This can be used recursively. If the address of a PTE as
211 * previously returned by this macro is itself given as the argument, then the
212 * address of the page directory entry (PDE) that maps the PTE will be
213 * returned.
214 *
215 * This macro may be used before pmap_bootstrap() is called.
216 */
217 #define vtopte(va) (PTmap + i386_btop(va))
218
219 /*
220 * Translate a virtual address to its physical address.
221 *
222 * This macro may be used before pmap_bootstrap() is called.
223 */
224 #define vtophys(va) pmap_kextract((vm_offset_t)(va))
225
226 /*
227 * KPTmap is a linear mapping of the kernel page table. It differs from the
228 * recursive mapping in two ways: (1) it only provides access to kernel page
229 * table pages, and not user page table pages, and (2) it provides access to
230 * a kernel page table page after the corresponding virtual addresses have
231 * been promoted to a 2/4MB page mapping.
232 *
233 * KPTmap is first initialized by pmap_cold() to support just NPKT page table
234 * pages. Later, it is reinitialized by pmap_bootstrap() to allow for
235 * expansion of the kernel page table.
236 */
237 extern pt_entry_t *KPTmap;
238
239 #if (defined(PAE) || defined(PAE_TABLES))
240
241 #define pde_cmpset(pdep, old, new) atomic_cmpset_64_i586(pdep, old, new)
242 #define pte_load_store(ptep, pte) atomic_swap_64_i586(ptep, pte)
243 #define pte_load_clear(ptep) atomic_swap_64_i586(ptep, 0)
244 #define pte_store(ptep, pte) atomic_store_rel_64_i586(ptep, pte)
245 #define pte_load(ptep) atomic_load_acq_64_i586(ptep)
246
247 extern pt_entry_t pg_nx;
248
249 #else /* !(PAE || PAE_TABLES) */
250
251 #define pde_cmpset(pdep, old, new) atomic_cmpset_int(pdep, old, new)
252 #define pte_load_store(ptep, pte) atomic_swap_int(ptep, pte)
253 #define pte_load_clear(ptep) atomic_swap_int(ptep, 0)
254 #define pte_store(ptep, pte) do { \
255 *(u_int *)(ptep) = (u_int)(pte); \
256 } while (0)
257 #define pte_load(ptep) atomic_load_acq_int(ptep)
258
259 #endif /* !(PAE || PAE_TABLES) */
260
261 #define pte_clear(ptep) pte_store(ptep, 0)
262
263 #define pde_store(pdep, pde) pte_store(pdep, pde)
264
265 /*
266 * Extract from the kernel page table the physical address that is mapped by
267 * the given virtual address "va".
268 *
269 * This function may be used before pmap_bootstrap() is called.
270 */
271 static __inline vm_paddr_t
272 pmap_kextract(vm_offset_t va)
273 {
274 vm_paddr_t pa;
275
276 if ((pa = pte_load(&PTD[va >> PDRSHIFT])) & PG_PS) {
277 pa = (pa & PG_PS_FRAME) | (va & PDRMASK);
278 } else {
279 /*
280 * Beware of a concurrent promotion that changes the PDE at
281 * this point! For example, vtopte() must not be used to
282 * access the PTE because it would use the new PDE. It is,
283 * however, safe to use the old PDE because the page table
284 * page is preserved by the promotion.
285 */
286 pa = KPTmap[i386_btop(va)];
287 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
288 }
289 return (pa);
290 }
291
292 #endif /* _KERNEL */
293
294 /*
295 * Pmap stuff
296 */
297 struct pv_entry;
298 struct pv_chunk;
299
300 struct md_page {
301 TAILQ_HEAD(,pv_entry) pv_list;
302 int pat_mode;
303 };
304
305 struct pmap {
306 struct mtx pm_mtx;
307 pd_entry_t *pm_pdir; /* KVA of page directory */
308 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
309 cpuset_t pm_active; /* active on cpus */
310 struct pmap_statistics pm_stats; /* pmap statistics */
311 LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
312 #if defined(PAE) || defined(PAE_TABLES)
313 pdpt_entry_t *pm_pdpt; /* KVA of page directory pointer
314 table */
315 #endif
316 struct vm_radix pm_root; /* spare page table pages */
317 vm_page_t pm_ptdpg[NPGPTD];
318 };
319
320 typedef struct pmap *pmap_t;
321
322 #ifdef _KERNEL
323 extern struct pmap kernel_pmap_store;
324 #define kernel_pmap (&kernel_pmap_store)
325
326 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
327 #define PMAP_LOCK_ASSERT(pmap, type) \
328 mtx_assert(&(pmap)->pm_mtx, (type))
329 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
330 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
331 NULL, MTX_DEF | MTX_DUPOK)
332 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
333 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
334 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
335 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
336 #endif
337
338 /*
339 * For each vm_page_t, there is a list of all currently valid virtual
340 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
341 */
342 typedef struct pv_entry {
343 vm_offset_t pv_va; /* virtual address for mapping */
344 TAILQ_ENTRY(pv_entry) pv_next;
345 } *pv_entry_t;
346
347 /*
348 * pv_entries are allocated in chunks per-process. This avoids the
349 * need to track per-pmap assignments.
350 */
351 #define _NPCM 11
352 #define _NPCPV 336
353 struct pv_chunk {
354 pmap_t pc_pmap;
355 TAILQ_ENTRY(pv_chunk) pc_list;
356 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */
357 TAILQ_ENTRY(pv_chunk) pc_lru;
358 struct pv_entry pc_pventry[_NPCPV];
359 };
360
361 #ifdef _KERNEL
362
363 extern caddr_t CADDR3;
364 extern pt_entry_t *CMAP3;
365 extern vm_paddr_t phys_avail[];
366 extern vm_paddr_t dump_avail[];
367 extern char *ptvmmap; /* poor name! */
368 extern vm_offset_t virtual_avail;
369 extern vm_offset_t virtual_end;
370
371 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
372 #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
373 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
374
375 static inline int
376 pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused)
377 {
378
379 return (0);
380 }
381
382 /*
383 * Only the following functions or macros may be used before pmap_bootstrap()
384 * is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and
385 * vtopte().
386 */
387 void pmap_activate_boot(pmap_t pmap);
388 void pmap_bootstrap(vm_paddr_t);
389 int pmap_cache_bits(pmap_t, int mode, boolean_t is_pde);
390 int pmap_change_attr(vm_offset_t, vm_size_t, int);
391 void pmap_init_pat(void);
392 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
393 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
394 void pmap_kremove(vm_offset_t);
395 void *pmap_mapbios(vm_paddr_t, vm_size_t);
396 void *pmap_mapdev(vm_paddr_t, vm_size_t);
397 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
398 boolean_t pmap_page_is_mapped(vm_page_t m);
399 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
400 bool pmap_ps_enabled(pmap_t pmap);
401 void pmap_unmapdev(vm_offset_t, vm_size_t);
402 pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
403 void pmap_invalidate_page(pmap_t, vm_offset_t);
404 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
405 void pmap_invalidate_all(pmap_t);
406 void pmap_invalidate_cache(void);
407 void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
408 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
409 void pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
410 void *pmap_trm_alloc(size_t size, int flags);
411 void pmap_trm_free(void *addr, size_t size);
412
413 void invltlb_glob(void);
414
415 #endif /* _KERNEL */
416
417 #endif /* !LOCORE */
418
419 #endif /* !_MACHINE_PMAP_H_ */
Cache object: 24b6efd358285112aa129b3657731c52
|