1 /*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * Derived from hp300 version by Mike Hibler, this version by William
38 * Jolitz uses a recursive map [a pde points to the page directory] to
39 * map the page tables using the pagetables themselves. This is done to
40 * reduce the impact on kernel virtual memory for lots of sparse address
41 * space, and to reduce the cost of memory to each process.
42 *
43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
45 * $FreeBSD: releng/5.0/sys/i386/include/pmap.h 101349 2002-08-05 03:40:28Z alc $
46 */
47
48 #ifndef _MACHINE_PMAP_H_
49 #define _MACHINE_PMAP_H_
50
51 /*
52 * Page-directory and page-table entires follow this format, with a few
53 * of the fields not present here and there, depending on a lot of things.
54 */
55 /* ---- Intel Nomenclature ---- */
56 #define PG_V 0x001 /* P Valid */
57 #define PG_RW 0x002 /* R/W Read/Write */
58 #define PG_U 0x004 /* U/S User/Supervisor */
59 #define PG_NC_PWT 0x008 /* PWT Write through */
60 #define PG_NC_PCD 0x010 /* PCD Cache disable */
61 #define PG_A 0x020 /* A Accessed */
62 #define PG_M 0x040 /* D Dirty */
63 #define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */
64 #define PG_G 0x100 /* G Global */
65 #define PG_AVAIL1 0x200 /* / Available for system */
66 #define PG_AVAIL2 0x400 /* < programmers use */
67 #define PG_AVAIL3 0x800 /* \ */
68
69
70 /* Our various interpretations of the above */
71 #define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
72 #define PG_MANAGED PG_AVAIL2
73 #define PG_FRAME (~PAGE_MASK)
74 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */
75 #define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
76
77 /*
78 * Page Protection Exception bits
79 */
80
81 #define PGEX_P 0x01 /* Protection violation vs. not present */
82 #define PGEX_W 0x02 /* during a Write cycle */
83 #define PGEX_U 0x04 /* access from User mode (UPL) */
84
85 /*
86 * Size of Kernel address space. This is the number of page table pages
87 * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte.
88 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
89 */
90 #ifndef KVA_PAGES
91 #define KVA_PAGES 256
92 #endif
93
94 /*
95 * Pte related macros
96 */
97 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
98
99 #ifndef NKPT
100 #define NKPT 30 /* actual number of kernel page tables */
101 #endif
102 #ifndef NKPDE
103 #ifdef SMP
104 #define NKPDE (KVA_PAGES - 2) /* addressable number of page tables/pde's */
105 #else
106 #define NKPDE (KVA_PAGES - 1) /* addressable number of page tables/pde's */
107 #endif
108 #endif
109
110 /*
111 * The *PTDI values control the layout of virtual memory
112 *
113 * XXX This works for now, but I am not real happy with it, I'll fix it
114 * right after I fix locore.s and the magic 28K hole
115 *
116 * SMP_PRIVPAGES: The per-cpu address space is 0xff80000 -> 0xffbfffff
117 */
118 #define APTDPTDI (NPDEPG-1) /* alt ptd entry that points to APTD */
119 #ifdef SMP
120 #define MPPTDI (APTDPTDI-1) /* per cpu ptd entry */
121 #define KPTDI (MPPTDI-NKPDE) /* start of kernel virtual pde's */
122 #else
123 #define KPTDI (APTDPTDI-NKPDE)/* start of kernel virtual pde's */
124 #endif /* SMP */
125 #define PTDPTDI (KPTDI-1) /* ptd entry that points to ptd! */
126 #define UMAXPTDI (PTDPTDI-1) /* ptd entry for user space end */
127 #define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */
128
129 /*
130 * XXX doesn't really belong here I guess...
131 */
132 #define ISA_HOLE_START 0xa0000
133 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
134
135 #ifndef LOCORE
136
137 #include <sys/queue.h>
138
139 typedef u_int32_t pd_entry_t;
140 typedef u_int32_t pt_entry_t;
141
142 #define PDESIZE sizeof(pd_entry_t) /* for assembly files */
143 #define PTESIZE sizeof(pt_entry_t) /* for assembly files */
144
145 /*
146 * Address of current and alternate address space page table maps
147 * and directories.
148 */
149 #ifdef _KERNEL
150 extern pt_entry_t PTmap[], APTmap[];
151 extern pd_entry_t PTD[], APTD[];
152 extern pd_entry_t PTDpde, APTDpde;
153
154 extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */
155 #endif
156
157 #ifdef _KERNEL
158 /*
159 * virtual address to page table entry and
160 * to physical address. Likewise for alternate address space.
161 * Note: these work recursively, thus vtopte of a pte will give
162 * the corresponding pde that in turn maps it.
163 */
164 #define vtopte(va) (PTmap + i386_btop(va))
165 #define avtopte(va) (APTmap + i386_btop(va))
166
167 /*
168 * Routine: pmap_kextract
169 * Function:
170 * Extract the physical page address associated
171 * kernel virtual address.
172 */
173 static __inline vm_offset_t
174 pmap_kextract(vm_offset_t va)
175 {
176 vm_offset_t pa;
177 if ((pa = (vm_offset_t) PTD[va >> PDRSHIFT]) & PG_PS) {
178 pa = (pa & ~(NBPDR - 1)) | (va & (NBPDR - 1));
179 } else {
180 pa = *(vm_offset_t *)vtopte(va);
181 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
182 }
183 return pa;
184 }
185
186 #define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
187 #endif
188
189 /*
190 * Pmap stuff
191 */
192 struct pv_entry;
193
194 struct md_page {
195 int pv_list_count;
196 TAILQ_HEAD(,pv_entry) pv_list;
197 };
198
199 struct pmap {
200 pd_entry_t *pm_pdir; /* KVA of page directory */
201 vm_object_t pm_pteobj; /* Container for pte's */
202 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
203 int pm_active; /* active on cpus */
204 struct pmap_statistics pm_stats; /* pmap statistics */
205 struct vm_page *pm_ptphint; /* pmap ptp hint */
206 LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
207 };
208
209 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
210 #define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count
211
212 typedef struct pmap *pmap_t;
213
214 #ifdef _KERNEL
215 extern struct pmap kernel_pmap_store;
216 #define kernel_pmap (&kernel_pmap_store)
217 #endif
218
219 /*
220 * For each vm_page_t, there is a list of all currently valid virtual
221 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
222 */
223 typedef struct pv_entry {
224 pmap_t pv_pmap; /* pmap where mapping lies */
225 vm_offset_t pv_va; /* virtual address for mapping */
226 TAILQ_ENTRY(pv_entry) pv_list;
227 TAILQ_ENTRY(pv_entry) pv_plist;
228 vm_page_t pv_ptem; /* VM page for pte */
229 } *pv_entry_t;
230
231 #define PV_ENTRY_NULL ((pv_entry_t) 0)
232
233 #define PV_CI 0x01 /* all entries must be cache inhibited */
234 #define PV_PTPAGE 0x02 /* entry maps a page table page */
235
236 #ifdef _KERNEL
237
238 #define NPPROVMTRR 8
239 #define PPRO_VMTRRphysBase0 0x200
240 #define PPRO_VMTRRphysMask0 0x201
241 struct ppro_vmtrr {
242 u_int64_t base, mask;
243 };
244 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
245
246 extern caddr_t CADDR1;
247 extern pt_entry_t *CMAP1;
248 extern vm_offset_t avail_end;
249 extern vm_offset_t avail_start;
250 extern vm_offset_t clean_eva;
251 extern vm_offset_t clean_sva;
252 extern vm_offset_t phys_avail[];
253 extern char *ptvmmap; /* poor name! */
254 extern vm_offset_t virtual_avail;
255 extern vm_offset_t virtual_end;
256
257 void pmap_bootstrap(vm_offset_t, vm_offset_t);
258 void *pmap_mapdev(vm_offset_t, vm_size_t);
259 void pmap_unmapdev(vm_offset_t, vm_size_t);
260 pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
261 vm_page_t pmap_use_pt(pmap_t, vm_offset_t);
262 void pmap_set_opt(void);
263 void pmap_invalidate_page(pmap_t, vm_offset_t);
264 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
265 void pmap_invalidate_all(pmap_t);
266
267 #endif /* _KERNEL */
268
269 #endif /* !LOCORE */
270
271 #endif /* !_MACHINE_PMAP_H_ */
Cache object: b89463c1ddd92ef6f4dceda8239ef2d3
|