1 /*-
2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department and William Jolitz of UUNET Technologies Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * The ARM version of this file was more or less based on the i386 version,
33 * which has the following provenance...
34 *
35 * Derived from hp300 version by Mike Hibler, this version by William
36 * Jolitz uses a recursive map [a pde points to the page directory] to
37 * map the page tables using the pagetables themselves. This is done to
38 * reduce the impact on kernel virtual memory for lots of sparse address
39 * space, and to reduce the cost of memory to each process.
40 *
41 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
42 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
43 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
44 *
45 * $FreeBSD: releng/12.0/sys/arm/include/pmap-v6.h 337463 2018-08-08 16:55:01Z alc $
46 */
47
48 #ifndef _MACHINE_PMAP_V6_H_
49 #define _MACHINE_PMAP_V6_H_
50
51 #include <sys/queue.h>
52 #include <sys/_cpuset.h>
53 #include <sys/_lock.h>
54 #include <sys/_mutex.h>
55
56 typedef uint32_t pt1_entry_t; /* L1 table entry */
57 typedef uint32_t pt2_entry_t; /* L2 table entry */
58 typedef uint32_t ttb_entry_t; /* TTB entry */
59
60 #ifdef _KERNEL
61
62 #if 0
63 #define PMAP_PTE_NOCACHE // Use uncached page tables
64 #endif
65
66 /*
67 * (1) During pmap bootstrap, physical pages for L2 page tables are
68 * allocated in advance which are used for KVA continuous mapping
69 * starting from KERNBASE. This makes things more simple.
70 * (2) During vm subsystem initialization, only vm subsystem itself can
71 * allocate physical memory safely. As pmap_map() is called during
72 * this initialization, we must be prepared for that and have some
73 * preallocated physical pages for L2 page tables.
74 *
75 * Note that some more pages for L2 page tables are preallocated too
76 * for mappings laying above VM_MAX_KERNEL_ADDRESS.
77 */
78 #ifndef NKPT2PG
79 /*
80 * The optimal way is to define this in board configuration as
81 * definition here must be safe enough. It means really big.
82 *
83 * 1 GB KVA <=> 256 kernel L2 page table pages
84 *
85 * From real platforms:
86 * 1 GB physical memory <=> 10 pages is enough
87 * 2 GB physical memory <=> 21 pages is enough
88 */
89 #define NKPT2PG 32
90 #endif
91 #endif /* _KERNEL */
92
93 /*
94 * Pmap stuff
95 */
96 struct pv_entry;
97 struct pv_chunk;
98
99 struct md_page {
100 TAILQ_HEAD(,pv_entry) pv_list;
101 uint16_t pt2_wirecount[4];
102 vm_memattr_t pat_mode;
103 };
104
105 struct pmap {
106 struct mtx pm_mtx;
107 pt1_entry_t *pm_pt1; /* KVA of pt1 */
108 pt2_entry_t *pm_pt2tab; /* KVA of pt2 pages table */
109 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
110 cpuset_t pm_active; /* active on cpus */
111 struct pmap_statistics pm_stats; /* pmap statictics */
112 LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
113 };
114
115 typedef struct pmap *pmap_t;
116
117 #ifdef _KERNEL
118 extern struct pmap kernel_pmap_store;
119 #define kernel_pmap (&kernel_pmap_store)
120
121 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
122 #define PMAP_LOCK_ASSERT(pmap, type) \
123 mtx_assert(&(pmap)->pm_mtx, (type))
124 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
125 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
126 NULL, MTX_DEF | MTX_DUPOK)
127 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
128 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
129 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
130 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
131 #endif
132
133 /*
134 * For each vm_page_t, there is a list of all currently valid virtual
135 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
136 */
137 typedef struct pv_entry {
138 vm_offset_t pv_va; /* virtual address for mapping */
139 TAILQ_ENTRY(pv_entry) pv_next;
140 } *pv_entry_t;
141
142 /*
143 * pv_entries are allocated in chunks per-process. This avoids the
144 * need to track per-pmap assignments.
145 */
146 #define _NPCM 11
147 #define _NPCPV 336
148 struct pv_chunk {
149 pmap_t pc_pmap;
150 TAILQ_ENTRY(pv_chunk) pc_list;
151 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */
152 TAILQ_ENTRY(pv_chunk) pc_lru;
153 struct pv_entry pc_pventry[_NPCPV];
154 };
155
156 #ifdef _KERNEL
157 extern ttb_entry_t pmap_kern_ttb; /* TTB for kernel pmap */
158
159 #define pmap_page_get_memattr(m) ((m)->md.pat_mode)
160
161 /*
162 * Only the following functions or macros may be used before pmap_bootstrap()
163 * is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and
164 * vtopte2().
165 */
166 void pmap_bootstrap(vm_offset_t);
167 void pmap_kenter(vm_offset_t, vm_paddr_t);
168 void pmap_kremove(vm_offset_t);
169 boolean_t pmap_page_is_mapped(vm_page_t);
170 bool pmap_ps_enabled(pmap_t pmap);
171
172 void pmap_tlb_flush(pmap_t, vm_offset_t);
173 void pmap_tlb_flush_range(pmap_t, vm_offset_t, vm_size_t);
174
175 vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *);
176
177 int pmap_fault(pmap_t, vm_offset_t, uint32_t, int, bool);
178
179 void pmap_set_tex(void);
180
181 /*
182 * Pre-bootstrap epoch functions set.
183 */
184 void pmap_bootstrap_prepare(vm_paddr_t);
185 vm_paddr_t pmap_preboot_get_pages(u_int);
186 void pmap_preboot_map_pages(vm_paddr_t, vm_offset_t, u_int);
187 vm_offset_t pmap_preboot_reserve_pages(u_int);
188 vm_offset_t pmap_preboot_get_vpages(u_int);
189 void pmap_preboot_map_attr(vm_paddr_t, vm_offset_t, vm_size_t, vm_prot_t,
190 vm_memattr_t);
191 void pmap_remap_vm_attr(vm_memattr_t old_attr, vm_memattr_t new_attr);
192
193 #endif /* _KERNEL */
194 #endif /* !_MACHINE_PMAP_V6_H_ */
Cache object: 92978fa206d344a62b0759e7dfa306cc
|