1 /*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * Derived from hp300 version by Mike Hibler, this version by William
38 * Jolitz uses a recursive map [a pde points to the page directory] to
39 * map the page tables using the pagetables themselves. This is done to
40 * reduce the impact on kernel virtual memory for lots of sparse address
41 * space, and to reduce the cost of memory to each process.
42 *
43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
45 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
46 *
47 * $FreeBSD: releng/11.0/sys/arm/include/pmap-v4.h 295801 2016-02-19 09:23:32Z skra $
48 */
49
50 #ifndef _MACHINE_PMAP_V4_H_
51 #define _MACHINE_PMAP_V4_H_
52
53 #include <machine/pte-v4.h>
54 #include <machine/cpuconf.h>
55 /*
56 * Pte related macros
57 */
58 #define PTE_NOCACHE 1
59 #define PTE_CACHE 2
60 #define PTE_DEVICE PTE_NOCACHE
61 #define PTE_PAGETABLE 3
62
63 enum mem_type {
64 STRONG_ORD = 0,
65 DEVICE_NOSHARE,
66 DEVICE_SHARE,
67 NRML_NOCACHE,
68 NRML_IWT_OWT,
69 NRML_IWB_OWB,
70 NRML_IWBA_OWBA
71 };
72
73 #ifndef LOCORE
74
75 #include <sys/queue.h>
76 #include <sys/_cpuset.h>
77 #include <sys/_lock.h>
78 #include <sys/_mutex.h>
79
80 #define PDESIZE sizeof(pd_entry_t) /* for assembly files */
81 #define PTESIZE sizeof(pt_entry_t) /* for assembly files */
82
83 #define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
84 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
85
86 /*
87 * Pmap stuff
88 */
89
90 /*
91 * This structure is used to hold a virtual<->physical address
92 * association and is used mostly by bootstrap code
93 */
94 struct pv_addr {
95 SLIST_ENTRY(pv_addr) pv_list;
96 vm_offset_t pv_va;
97 vm_paddr_t pv_pa;
98 };
99
100 struct pv_entry;
101 struct pv_chunk;
102
103 struct md_page {
104 int pvh_attrs;
105 vm_memattr_t pv_memattr;
106 vm_offset_t pv_kva; /* first kernel VA mapping */
107 TAILQ_HEAD(,pv_entry) pv_list;
108 };
109
110 struct l1_ttable;
111 struct l2_dtable;
112
113
114 /*
115 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
116 * A bucket size of 16 provides for 16MB of contiguous virtual address
117 * space per l2_dtable. Most processes will, therefore, require only two or
118 * three of these to map their whole working set.
119 */
120 #define L2_BUCKET_LOG2 4
121 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
122 /*
123 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
124 * of l2_dtable structures required to track all possible page descriptors
125 * mappable by an L1 translation table is given by the following constants:
126 */
127 #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
128 #define L2_SIZE (1 << L2_LOG2)
129
130 struct pmap {
131 struct mtx pm_mtx;
132 u_int8_t pm_domain;
133 struct l1_ttable *pm_l1;
134 struct l2_dtable *pm_l2[L2_SIZE];
135 cpuset_t pm_active; /* active on cpus */
136 struct pmap_statistics pm_stats; /* pmap statictics */
137 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
138 };
139
140 typedef struct pmap *pmap_t;
141
142 #ifdef _KERNEL
143 extern struct pmap kernel_pmap_store;
144 #define kernel_pmap (&kernel_pmap_store)
145
146 #define PMAP_ASSERT_LOCKED(pmap) \
147 mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
148 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
149 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
150 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
151 NULL, MTX_DEF | MTX_DUPOK)
152 #define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx)
153 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
154 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
155 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
156 #endif
157
158 /*
159 * For each vm_page_t, there is a list of all currently valid virtual
160 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
161 */
162 typedef struct pv_entry {
163 vm_offset_t pv_va; /* virtual address for mapping */
164 TAILQ_ENTRY(pv_entry) pv_list;
165 int pv_flags; /* flags (wired, etc...) */
166 pmap_t pv_pmap; /* pmap where mapping lies */
167 TAILQ_ENTRY(pv_entry) pv_plist;
168 } *pv_entry_t;
169
170 /*
171 * pv_entries are allocated in chunks per-process. This avoids the
172 * need to track per-pmap assignments.
173 */
174 #define _NPCM 8
175 #define _NPCPV 252
176
177 struct pv_chunk {
178 pmap_t pc_pmap;
179 TAILQ_ENTRY(pv_chunk) pc_list;
180 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */
181 uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */
182 TAILQ_ENTRY(pv_chunk) pc_lru;
183 struct pv_entry pc_pventry[_NPCPV];
184 };
185
186 #ifdef _KERNEL
187
188 boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **);
189
190 /*
191 * virtual address to page table entry and
192 * to physical address. Likewise for alternate address space.
193 * Note: these work recursively, thus vtopte of a pte will give
194 * the corresponding pde that in turn maps it.
195 */
196
197 /*
198 * The current top of kernel VM.
199 */
200 extern vm_offset_t pmap_curmaxkvaddr;
201
202 /* Virtual address to page table entry */
203 static __inline pt_entry_t *
204 vtopte(vm_offset_t va)
205 {
206 pd_entry_t *pdep;
207 pt_entry_t *ptep;
208
209 if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE)
210 return (NULL);
211 return (ptep);
212 }
213
214 void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt);
215 int pmap_change_attr(vm_offset_t, vm_size_t, int);
216 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
217 void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
218 void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
219 vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *);
220 void pmap_kremove(vm_offset_t);
221 vm_page_t pmap_use_pt(pmap_t, vm_offset_t);
222 void pmap_debug(int);
223 void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
224 void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
225 vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
226 void
227 pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
228 int cache);
229 int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
230
231 /*
232 * Definitions for MMU domains
233 */
234 #define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
235 #define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */
236
237 /*
238 * The new pmap ensures that page-tables are always mapping Write-Thru.
239 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
240 * on every change.
241 *
242 * Unfortunately, not all CPUs have a write-through cache mode. So we
243 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
244 * and if there is the chance for PTE syncs to be needed, we define
245 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
246 * the code.
247 */
248 extern int pmap_needs_pte_sync;
249
250 /*
251 * These macros define the various bit masks in the PTE.
252 *
253 * We use these macros since we use different bits on different processor
254 * models.
255 */
256
257 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
258 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\
259 L1_S_XSCALE_TEX(TEX_XSCALE_T))
260
261 #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
262 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \
263 L2_XSCALE_L_TEX(TEX_XSCALE_T))
264
265 #define L2_S_PROT_U_generic (L2_AP(AP_U))
266 #define L2_S_PROT_W_generic (L2_AP(AP_W))
267 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
268
269 #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
270 #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
271 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
272
273 #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
274 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \
275 L2_XSCALE_T_TEX(TEX_XSCALE_X))
276
277 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
278 #define L1_S_PROTO_xscale (L1_TYPE_S)
279
280 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
281 #define L1_C_PROTO_xscale (L1_TYPE_C)
282
283 #define L2_L_PROTO (L2_TYPE_L)
284
285 #define L2_S_PROTO_generic (L2_TYPE_S)
286 #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS)
287
288 /*
289 * User-visible names for the ones that vary with MMU class.
290 */
291 #define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
292
293 #if ARM_NMMUS > 1
294 /* More than one MMU class configured; use variables. */
295 #define L2_S_PROT_U pte_l2_s_prot_u
296 #define L2_S_PROT_W pte_l2_s_prot_w
297 #define L2_S_PROT_MASK pte_l2_s_prot_mask
298
299 #define L1_S_CACHE_MASK pte_l1_s_cache_mask
300 #define L2_L_CACHE_MASK pte_l2_l_cache_mask
301 #define L2_S_CACHE_MASK pte_l2_s_cache_mask
302
303 #define L1_S_PROTO pte_l1_s_proto
304 #define L1_C_PROTO pte_l1_c_proto
305 #define L2_S_PROTO pte_l2_s_proto
306
307 #elif ARM_MMU_GENERIC != 0
308 #define L2_S_PROT_U L2_S_PROT_U_generic
309 #define L2_S_PROT_W L2_S_PROT_W_generic
310 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
311
312 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
313 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
314 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
315
316 #define L1_S_PROTO L1_S_PROTO_generic
317 #define L1_C_PROTO L1_C_PROTO_generic
318 #define L2_S_PROTO L2_S_PROTO_generic
319
320 #elif ARM_MMU_XSCALE == 1
321 #define L2_S_PROT_U L2_S_PROT_U_xscale
322 #define L2_S_PROT_W L2_S_PROT_W_xscale
323 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
324
325 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
326 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
327 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
328
329 #define L1_S_PROTO L1_S_PROTO_xscale
330 #define L1_C_PROTO L1_C_PROTO_xscale
331 #define L2_S_PROTO L2_S_PROTO_xscale
332
333 #endif /* ARM_NMMUS > 1 */
334
335 #if defined(CPU_XSCALE_81342)
336 #define PMAP_NEEDS_PTE_SYNC 1
337 #define PMAP_INCLUDE_PTE_SYNC
338 #else
339 #define PMAP_NEEDS_PTE_SYNC 0
340 #endif
341
342 /*
343 * These macros return various bits based on kernel/user and protection.
344 * Note that the compiler will usually fold these at compile time.
345 */
346 #define L1_S_PROT_U (L1_S_AP(AP_U))
347 #define L1_S_PROT_W (L1_S_AP(AP_W))
348 #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
349 #define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W)
350
351 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
352 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
353
354 #define L2_L_PROT_U (L2_AP(AP_U))
355 #define L2_L_PROT_W (L2_AP(AP_W))
356 #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
357
358 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
359 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
360
361 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
362 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
363
364 /*
365 * Macros to test if a mapping is mappable with an L1 Section mapping
366 * or an L2 Large Page mapping.
367 */
368 #define L1_S_MAPPABLE_P(va, pa, size) \
369 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
370
371 #define L2_L_MAPPABLE_P(va, pa, size) \
372 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
373
374 /*
375 * Provide a fallback in case we were not able to determine it at
376 * compile-time.
377 */
378 #ifndef PMAP_NEEDS_PTE_SYNC
379 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
380 #define PMAP_INCLUDE_PTE_SYNC
381 #endif
382
383 #ifdef ARM_L2_PIPT
384 #define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size)
385 #else
386 #define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size)
387 #endif
388
389 #define PTE_SYNC(pte) \
390 do { \
391 if (PMAP_NEEDS_PTE_SYNC) { \
392 cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
393 cpu_drain_writebuf(); \
394 _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\
395 } else \
396 cpu_drain_writebuf(); \
397 } while (/*CONSTCOND*/0)
398
399 #define PTE_SYNC_RANGE(pte, cnt) \
400 do { \
401 if (PMAP_NEEDS_PTE_SYNC) { \
402 cpu_dcache_wb_range((vm_offset_t)(pte), \
403 (cnt) << 2); /* * sizeof(pt_entry_t) */ \
404 cpu_drain_writebuf(); \
405 _sync_l2((vm_offset_t)(pte), \
406 (cnt) << 2); /* * sizeof(pt_entry_t) */ \
407 } else \
408 cpu_drain_writebuf(); \
409 } while (/*CONSTCOND*/0)
410
411 extern pt_entry_t pte_l1_s_cache_mode;
412 extern pt_entry_t pte_l1_s_cache_mask;
413
414 extern pt_entry_t pte_l2_l_cache_mode;
415 extern pt_entry_t pte_l2_l_cache_mask;
416
417 extern pt_entry_t pte_l2_s_cache_mode;
418 extern pt_entry_t pte_l2_s_cache_mask;
419
420 extern pt_entry_t pte_l1_s_cache_mode_pt;
421 extern pt_entry_t pte_l2_l_cache_mode_pt;
422 extern pt_entry_t pte_l2_s_cache_mode_pt;
423
424 extern pt_entry_t pte_l2_s_prot_u;
425 extern pt_entry_t pte_l2_s_prot_w;
426 extern pt_entry_t pte_l2_s_prot_mask;
427
428 extern pt_entry_t pte_l1_s_proto;
429 extern pt_entry_t pte_l1_c_proto;
430 extern pt_entry_t pte_l2_s_proto;
431
432 extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
433 extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
434 vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
435 extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
436
437 #if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342)
438 void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
439 void pmap_zero_page_generic(vm_paddr_t, int, int);
440
441 void pmap_pte_init_generic(void);
442 #endif /* ARM_MMU_GENERIC != 0 */
443
444 #if ARM_MMU_XSCALE == 1
445 void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
446 void pmap_zero_page_xscale(vm_paddr_t, int, int);
447
448 void pmap_pte_init_xscale(void);
449
450 void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t);
451
452 void pmap_use_minicache(vm_offset_t, vm_size_t);
453 #endif /* ARM_MMU_XSCALE == 1 */
454 #if defined(CPU_XSCALE_81342)
455 #define ARM_HAVE_SUPERSECTIONS
456 #endif
457
458 #define PTE_KERNEL 0
459 #define PTE_USER 1
460 #define l1pte_valid(pde) ((pde) != 0)
461 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
462 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
463 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
464
465 #define l2pte_index(v) (((v) & L1_S_OFFSET) >> L2_S_SHIFT)
466 #define l2pte_valid(pte) ((pte) != 0)
467 #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
468 #define l2pte_minidata(pte) (((pte) & \
469 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
470 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
471
472 /* L1 and L2 page table macros */
473 #define pmap_pde_v(pde) l1pte_valid(*(pde))
474 #define pmap_pde_section(pde) l1pte_section_p(*(pde))
475 #define pmap_pde_page(pde) l1pte_page_p(*(pde))
476 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
477
478 #define pmap_pte_v(pte) l2pte_valid(*(pte))
479 #define pmap_pte_pa(pte) l2pte_pa(*(pte))
480
481 /*
482 * Flags that indicate attributes of pages or mappings of pages.
483 *
484 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
485 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
486 * pv_entry's for each page. They live in the same "namespace" so
487 * that we can clear multiple attributes at a time.
488 *
489 * Note the "non-cacheable" flag generally means the page has
490 * multiple mappings in a given address space.
491 */
492 #define PVF_MOD 0x01 /* page is modified */
493 #define PVF_REF 0x02 /* page is referenced */
494 #define PVF_WIRED 0x04 /* mapping is wired */
495 #define PVF_WRITE 0x08 /* mapping is writable */
496 #define PVF_EXEC 0x10 /* mapping is executable */
497 #define PVF_NC 0x20 /* mapping is non-cacheable */
498 #define PVF_MWC 0x40 /* mapping is used multiple times in userland */
499 #define PVF_UNMAN 0x80 /* mapping is unmanaged */
500
501 void vector_page_setprot(int);
502
503 #define SECTION_CACHE 0x1
504 #define SECTION_PT 0x2
505 void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags);
506 #ifdef ARM_HAVE_SUPERSECTIONS
507 void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags);
508 #endif
509
510 void pmap_postinit(void);
511
512 #endif /* _KERNEL */
513
514 #endif /* !LOCORE */
515
516 #endif /* !_MACHINE_PMAP_V4_H_ */
Cache object: e37c3f94b4600146a6d298186cf76337
|