FreeBSD/Linux Kernel Cross Reference
sys/arm/include/pmap.h
1 /*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * Derived from hp300 version by Mike Hibler, this version by William
38 * Jolitz uses a recursive map [a pde points to the page directory] to
39 * map the page tables using the pagetables themselves. This is done to
40 * reduce the impact on kernel virtual memory for lots of sparse address
41 * space, and to reduce the cost of memory to each process.
42 *
43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
45 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
46 *
47 * $FreeBSD$
48 */
49
50 #ifndef _MACHINE_PMAP_H_
51 #define _MACHINE_PMAP_H_
52
53 #include <machine/pte.h>
54 #include <machine/cpuconf.h>
55 /*
56 * Pte related macros
57 */
58 #define PTE_NOCACHE 0
59 #define PTE_CACHE 1
60 #define PTE_PAGETABLE 2
61
62 #ifndef LOCORE
63
64 #include <sys/queue.h>
65 #include <sys/_lock.h>
66 #include <sys/_mutex.h>
67
68 #define PDESIZE sizeof(pd_entry_t) /* for assembly files */
69 #define PTESIZE sizeof(pt_entry_t) /* for assembly files */
70
71 #ifdef _KERNEL
72
73 #define vtophys(va) pmap_extract(pmap_kernel(), (vm_offset_t)(va))
74 #define pmap_kextract(va) pmap_extract(pmap_kernel(), (vm_offset_t)(va))
75
76 #endif
77
78 #define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
79 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
80 #define pmap_page_set_memattr(m, ma) (void)0
81
82 /*
83 * Pmap stuff
84 */
85
86 /*
87 * This structure is used to hold a virtual<->physical address
88 * association and is used mostly by bootstrap code
89 */
90 struct pv_addr {
91 SLIST_ENTRY(pv_addr) pv_list;
92 vm_offset_t pv_va;
93 vm_paddr_t pv_pa;
94 };
95
96 struct pv_entry;
97
98 struct md_page {
99 int pvh_attrs;
100 u_int uro_mappings;
101 u_int urw_mappings;
102 union {
103 u_short s_mappings[2]; /* Assume kernel count <= 65535 */
104 u_int i_mappings;
105 } k_u;
106 #define kro_mappings k_u.s_mappings[0]
107 #define krw_mappings k_u.s_mappings[1]
108 #define k_mappings k_u.i_mappings
109 int pv_list_count;
110 TAILQ_HEAD(,pv_entry) pv_list;
111 };
112
113 #define VM_MDPAGE_INIT(pg) \
114 do { \
115 TAILQ_INIT(&pg->pv_list); \
116 mtx_init(&(pg)->md_page.pvh_mtx, "MDPAGE Mutex", NULL, MTX_DEV);\
117 (pg)->mdpage.pvh_attrs = 0; \
118 (pg)->mdpage.uro_mappings = 0; \
119 (pg)->mdpage.urw_mappings = 0; \
120 (pg)->mdpage.k_mappings = 0; \
121 } while (/*CONSTCOND*/0)
122
123 struct l1_ttable;
124 struct l2_dtable;
125
126
127 /*
128 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
129 * A bucket size of 16 provides for 16MB of contiguous virtual address
130 * space per l2_dtable. Most processes will, therefore, require only two or
131 * three of these to map their whole working set.
132 */
133 #define L2_BUCKET_LOG2 4
134 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
135 /*
136 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
137 * of l2_dtable structures required to track all possible page descriptors
138 * mappable by an L1 translation table is given by the following constants:
139 */
140 #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
141 #define L2_SIZE (1 << L2_LOG2)
142
143 struct pmap {
144 struct mtx pm_mtx;
145 u_int8_t pm_domain;
146 struct l1_ttable *pm_l1;
147 struct l2_dtable *pm_l2[L2_SIZE];
148 pd_entry_t *pm_pdir; /* KVA of page directory */
149 int pm_count; /* reference count */
150 int pm_active; /* active on cpus */
151 struct pmap_statistics pm_stats; /* pmap statictics */
152 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
153 };
154
155 typedef struct pmap *pmap_t;
156
157 #ifdef _KERNEL
158 extern pmap_t kernel_pmap;
159 #define pmap_kernel() kernel_pmap
160
161 #define PMAP_ASSERT_LOCKED(pmap) \
162 mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
163 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
164 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
165 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
166 NULL, MTX_DEF | MTX_DUPOK)
167 #define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx)
168 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
169 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
170 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
171 #endif
172
173
174 /*
175 * For each vm_page_t, there is a list of all currently valid virtual
176 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
177 */
178 typedef struct pv_entry {
179 pmap_t pv_pmap; /* pmap where mapping lies */
180 vm_offset_t pv_va; /* virtual address for mapping */
181 TAILQ_ENTRY(pv_entry) pv_list;
182 TAILQ_ENTRY(pv_entry) pv_plist;
183 int pv_flags; /* flags (wired, etc...) */
184 } *pv_entry_t;
185
186 #define PV_ENTRY_NULL ((pv_entry_t) 0)
187
188 #ifdef _KERNEL
189
190 boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **);
191
192 /*
193 * virtual address to page table entry and
194 * to physical address. Likewise for alternate address space.
195 * Note: these work recursively, thus vtopte of a pte will give
196 * the corresponding pde that in turn maps it.
197 */
198
199 /*
200 * The current top of kernel VM.
201 */
202 extern vm_offset_t pmap_curmaxkvaddr;
203
204 struct pcb;
205
206 void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
207 /* Virtual address to page table entry */
208 static __inline pt_entry_t *
209 vtopte(vm_offset_t va)
210 {
211 pd_entry_t *pdep;
212 pt_entry_t *ptep;
213
214 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
215 return (NULL);
216 return (ptep);
217 }
218
219 extern vm_offset_t phys_avail[];
220 extern vm_offset_t virtual_avail;
221 extern vm_offset_t virtual_end;
222
223 void pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *);
224 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
225 void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
226 void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
227 void pmap_kremove(vm_offset_t);
228 void *pmap_mapdev(vm_offset_t, vm_size_t);
229 void pmap_unmapdev(vm_offset_t, vm_size_t);
230 vm_page_t pmap_use_pt(pmap_t, vm_offset_t);
231 void pmap_debug(int);
232 void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
233 void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
234 vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
235 void
236 pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
237 int cache);
238 int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
239
240 /*
241 * Definitions for MMU domains
242 */
243 #define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
244 #define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */
245
246 /*
247 * The new pmap ensures that page-tables are always mapping Write-Thru.
248 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
249 * on every change.
250 *
251 * Unfortunately, not all CPUs have a write-through cache mode. So we
252 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
253 * and if there is the chance for PTE syncs to be needed, we define
254 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
255 * the code.
256 */
257 extern int pmap_needs_pte_sync;
258
259 /*
260 * These macros define the various bit masks in the PTE.
261 *
262 * We use these macros since we use different bits on different processor
263 * models.
264 */
265 #define L1_S_PROT_U (L1_S_AP(AP_U))
266 #define L1_S_PROT_W (L1_S_AP(AP_W))
267 #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
268
269 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
270 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\
271 L1_S_XSCALE_TEX(TEX_XSCALE_T))
272
273 #define L2_L_PROT_U (L2_AP(AP_U))
274 #define L2_L_PROT_W (L2_AP(AP_W))
275 #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
276
277 #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
278 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \
279 L2_XSCALE_L_TEX(TEX_XSCALE_T))
280
281 #define L2_S_PROT_U_generic (L2_AP(AP_U))
282 #define L2_S_PROT_W_generic (L2_AP(AP_W))
283 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
284
285 #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
286 #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
287 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
288
289 #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
290 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \
291 L2_XSCALE_T_TEX(TEX_XSCALE_X))
292
293 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
294 #define L1_S_PROTO_xscale (L1_TYPE_S)
295
296 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
297 #define L1_C_PROTO_xscale (L1_TYPE_C)
298
299 #define L2_L_PROTO (L2_TYPE_L)
300
301 #define L2_S_PROTO_generic (L2_TYPE_S)
302 #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS)
303
304 /*
305 * User-visible names for the ones that vary with MMU class.
306 */
307
308 #if ARM_NMMUS > 1
309 /* More than one MMU class configured; use variables. */
310 #define L2_S_PROT_U pte_l2_s_prot_u
311 #define L2_S_PROT_W pte_l2_s_prot_w
312 #define L2_S_PROT_MASK pte_l2_s_prot_mask
313
314 #define L1_S_CACHE_MASK pte_l1_s_cache_mask
315 #define L2_L_CACHE_MASK pte_l2_l_cache_mask
316 #define L2_S_CACHE_MASK pte_l2_s_cache_mask
317
318 #define L1_S_PROTO pte_l1_s_proto
319 #define L1_C_PROTO pte_l1_c_proto
320 #define L2_S_PROTO pte_l2_s_proto
321
322 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
323 #define L2_S_PROT_U L2_S_PROT_U_generic
324 #define L2_S_PROT_W L2_S_PROT_W_generic
325 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
326
327 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
328 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
329 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
330
331 #define L1_S_PROTO L1_S_PROTO_generic
332 #define L1_C_PROTO L1_C_PROTO_generic
333 #define L2_S_PROTO L2_S_PROTO_generic
334
335 #elif ARM_MMU_XSCALE == 1
336 #define L2_S_PROT_U L2_S_PROT_U_xscale
337 #define L2_S_PROT_W L2_S_PROT_W_xscale
338 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
339
340 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
341 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
342 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
343
344 #define L1_S_PROTO L1_S_PROTO_xscale
345 #define L1_C_PROTO L1_C_PROTO_xscale
346 #define L2_S_PROTO L2_S_PROTO_xscale
347
348 #endif /* ARM_NMMUS > 1 */
349
350 #ifdef SKYEYE_WORKAROUNDS
351 #define PMAP_NEEDS_PTE_SYNC 1
352 #define PMAP_INCLUDE_PTE_SYNC
353 #else
354 #if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
355 #define PMAP_NEEDS_PTE_SYNC 1
356 #define PMAP_INCLUDE_PTE_SYNC
357 #elif defined(CPU_XSCALE_81342)
358 #define PMAP_NEEDS_PTE_SYNC 1
359 #define PMAP_INCLUDE_PTE_SYNC
360 #elif (ARM_MMU_SA1 == 0)
361 #define PMAP_NEEDS_PTE_SYNC 0
362 #endif
363 #endif
364
365 /*
366 * These macros return various bits based on kernel/user and protection.
367 * Note that the compiler will usually fold these at compile time.
368 */
369 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
370 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
371
372 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
373 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
374
375 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
376 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
377
378 /*
379 * Macros to test if a mapping is mappable with an L1 Section mapping
380 * or an L2 Large Page mapping.
381 */
382 #define L1_S_MAPPABLE_P(va, pa, size) \
383 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
384
385 #define L2_L_MAPPABLE_P(va, pa, size) \
386 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
387
388 /*
389 * Provide a fallback in case we were not able to determine it at
390 * compile-time.
391 */
392 #ifndef PMAP_NEEDS_PTE_SYNC
393 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
394 #define PMAP_INCLUDE_PTE_SYNC
395 #endif
396
397 #define PTE_SYNC(pte) \
398 do { \
399 if (PMAP_NEEDS_PTE_SYNC) { \
400 cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
401 cpu_l2cache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
402 }\
403 } while (/*CONSTCOND*/0)
404
405 #define PTE_SYNC_RANGE(pte, cnt) \
406 do { \
407 if (PMAP_NEEDS_PTE_SYNC) { \
408 cpu_dcache_wb_range((vm_offset_t)(pte), \
409 (cnt) << 2); /* * sizeof(pt_entry_t) */ \
410 cpu_l2cache_wb_range((vm_offset_t)(pte), \
411 (cnt) << 2); /* * sizeof(pt_entry_t) */ \
412 } \
413 } while (/*CONSTCOND*/0)
414
415 extern pt_entry_t pte_l1_s_cache_mode;
416 extern pt_entry_t pte_l1_s_cache_mask;
417
418 extern pt_entry_t pte_l2_l_cache_mode;
419 extern pt_entry_t pte_l2_l_cache_mask;
420
421 extern pt_entry_t pte_l2_s_cache_mode;
422 extern pt_entry_t pte_l2_s_cache_mask;
423
424 extern pt_entry_t pte_l1_s_cache_mode_pt;
425 extern pt_entry_t pte_l2_l_cache_mode_pt;
426 extern pt_entry_t pte_l2_s_cache_mode_pt;
427
428 extern pt_entry_t pte_l2_s_prot_u;
429 extern pt_entry_t pte_l2_s_prot_w;
430 extern pt_entry_t pte_l2_s_prot_mask;
431
432 extern pt_entry_t pte_l1_s_proto;
433 extern pt_entry_t pte_l1_c_proto;
434 extern pt_entry_t pte_l2_s_proto;
435
436 extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
437 extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
438
439 #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342)
440 void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
441 void pmap_zero_page_generic(vm_paddr_t, int, int);
442
443 void pmap_pte_init_generic(void);
444 #if defined(CPU_ARM8)
445 void pmap_pte_init_arm8(void);
446 #endif
447 #if defined(CPU_ARM9)
448 void pmap_pte_init_arm9(void);
449 #endif /* CPU_ARM9 */
450 #if defined(CPU_ARM10)
451 void pmap_pte_init_arm10(void);
452 #endif /* CPU_ARM10 */
453 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
454
455 #if /* ARM_MMU_SA1 == */1
456 void pmap_pte_init_sa1(void);
457 #endif /* ARM_MMU_SA1 == 1 */
458
459 #if ARM_MMU_XSCALE == 1
460 void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
461 void pmap_zero_page_xscale(vm_paddr_t, int, int);
462
463 void pmap_pte_init_xscale(void);
464
465 void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t);
466
467 void pmap_use_minicache(vm_offset_t, vm_size_t);
468 #endif /* ARM_MMU_XSCALE == 1 */
469 #if defined(CPU_XSCALE_81342)
470 #define ARM_HAVE_SUPERSECTIONS
471 #endif
472
473 #define PTE_KERNEL 0
474 #define PTE_USER 1
475 #define l1pte_valid(pde) ((pde) != 0)
476 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
477 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
478 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
479
480 #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
481 #define l2pte_valid(pte) ((pte) != 0)
482 #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
483 #define l2pte_minidata(pte) (((pte) & \
484 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
485 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
486
487 /* L1 and L2 page table macros */
488 #define pmap_pde_v(pde) l1pte_valid(*(pde))
489 #define pmap_pde_section(pde) l1pte_section_p(*(pde))
490 #define pmap_pde_page(pde) l1pte_page_p(*(pde))
491 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
492
493 #define pmap_pte_v(pte) l2pte_valid(*(pte))
494 #define pmap_pte_pa(pte) l2pte_pa(*(pte))
495
496 /*
497 * Flags that indicate attributes of pages or mappings of pages.
498 *
499 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
500 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
501 * pv_entry's for each page. They live in the same "namespace" so
502 * that we can clear multiple attributes at a time.
503 *
504 * Note the "non-cacheable" flag generally means the page has
505 * multiple mappings in a given address space.
506 */
507 #define PVF_MOD 0x01 /* page is modified */
508 #define PVF_REF 0x02 /* page is referenced */
509 #define PVF_WIRED 0x04 /* mapping is wired */
510 #define PVF_WRITE 0x08 /* mapping is writable */
511 #define PVF_EXEC 0x10 /* mapping is executable */
512 #define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
513 #define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
514 #define PVF_NC (PVF_UNC|PVF_KNC)
515
516 void vector_page_setprot(int);
517
518 void pmap_update(pmap_t);
519
520 /*
521 * This structure is used by machine-dependent code to describe
522 * static mappings of devices, created at bootstrap time.
523 */
524 struct pmap_devmap {
525 vm_offset_t pd_va; /* virtual address */
526 vm_paddr_t pd_pa; /* physical address */
527 vm_size_t pd_size; /* size of region */
528 vm_prot_t pd_prot; /* protection code */
529 int pd_cache; /* cache attributes */
530 };
531
532 const struct pmap_devmap *pmap_devmap_find_pa(vm_paddr_t, vm_size_t);
533 const struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t);
534
535 void pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *);
536 void pmap_devmap_register(const struct pmap_devmap *);
537
538 #define SECTION_CACHE 0x1
539 #define SECTION_PT 0x2
540 void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags);
541 #ifdef ARM_HAVE_SUPERSECTIONS
542 void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags);
543 #endif
544
545 extern char *_tmppt;
546
547 void pmap_postinit(void);
548
549 #ifdef ARM_USE_SMALL_ALLOC
550 void arm_add_smallalloc_pages(void *, void *, int, int);
551 vm_offset_t arm_ptovirt(vm_paddr_t);
552 void arm_init_smallalloc(void);
553 struct arm_small_page {
554 void *addr;
555 TAILQ_ENTRY(arm_small_page) pg_list;
556 };
557
558 #endif
559
560 #define ARM_NOCACHE_KVA_SIZE 0x1000000
561 extern vm_offset_t arm_nocache_startaddr;
562 void *arm_remap_nocache(void *, vm_size_t);
563 void arm_unmap_nocache(void *, vm_size_t);
564
565 extern vm_paddr_t dump_avail[];
566 #endif /* _KERNEL */
567
568 #endif /* !LOCORE */
569
570 #endif /* !_MACHINE_PMAP_H_ */
Cache object: a14b94a816d8058653aec930ed6a412b
|