FreeBSD/Linux Kernel Cross Reference
sys/intel/pmap.h
1 /*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: pmap.h,v $
29 * Revision 2.7 91/12/10 16:32:23 jsb
30 * Fixes from Intel
31 * [91/12/10 15:51:47 jsb]
32 *
33 * Revision 2.6 91/08/28 11:13:15 jsb
34 * From Intel SSD: turn off caching for i860 for now.
35 * [91/08/26 18:31:19 jsb]
36 *
37 * Revision 2.5 91/05/14 16:30:44 mrt
38 * Correcting copyright
39 *
40 * Revision 2.4 91/05/08 12:46:54 dbg
41 * Add volatile declarations. Load CR3 when switching to kernel
42 * pmap in PMAP_ACTIVATE_USER. Fix PMAP_ACTIVATE_KERNEL.
43 * [91/04/26 14:41:54 dbg]
44 *
45 * Revision 2.3 91/02/05 17:20:49 mrt
46 * Changed to new Mach copyright
47 * [91/01/31 18:17:51 mrt]
48 *
49 * Revision 2.2 90/12/04 14:50:35 jsb
50 * First checkin (for intel directory).
51 * [90/12/03 21:55:40 jsb]
52 *
53 * Revision 2.4 90/08/06 15:07:23 rwd
54 * Remove ldt (not used).
55 * [90/07/17 dbg]
56 *
57 * Revision 2.3 90/06/02 14:48:53 rpd
58 * Added PMAP_CONTEXT definition.
59 * [90/06/02 rpd]
60 *
61 * Revision 2.2 90/05/03 15:37:16 dbg
62 * Move page-table definitions into i386/pmap.h.
63 * [90/04/05 dbg]
64 *
65 * Define separate Write and User bits in pte instead of protection
66 * code.
67 * [90/03/25 dbg]
68 *
69 * Load dirbase directly from pmap. Split PMAP_ACTIVATE and
70 * PMAP_DEACTIVATE into separate user and kernel versions.
71 * [90/02/08 dbg]
72 *
73 * Revision 1.6 89/09/25 12:25:50 rvb
74 * seg_desc -> fakedesc
75 * [89/09/23 rvb]
76 *
77 * Revision 1.5 89/09/05 20:41:38 jsb
78 * Added pmap_phys_to_frame definition.
79 * [89/09/05 18:47:08 jsb]
80 *
81 * Revision 1.4 89/03/09 20:03:34 rpd
82 * More cleanup.
83 *
84 * Revision 1.3 89/02/26 12:33:18 gm0w
85 * Changes for cleanup.
86 *
87 * 31-Dec-88 Robert Baron (rvb) at Carnegie-Mellon University
88 * Derived from MACH2.0 vax release.
89 *
90 * 17-Jan-88 David Golub (dbg) at Carnegie-Mellon University
91 * MARK_CPU_IDLE and MARK_CPU_ACTIVE must manipulate a separate
92 * cpu_idle set. The scheduler's cpu_idle indication is NOT
93 * synchronized with these calls. MARK_CPU_ACTIVE also needs spls.
94 *
95 */
96
97 /*
98 * File: pmap.h
99 *
100 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
101 * Date: 1985
102 *
103 * Machine-dependent structures for the physical map module.
104 */
105
106 #ifndef _PMAP_MACHINE_
107 #define _PMAP_MACHINE_ 1
108
109 #ifndef ASSEMBLER
110
111 #include <kern/zalloc.h>
112 #include <kern/lock.h>
113 #include <mach/machine/vm_param.h>
114 #include <mach/vm_statistics.h>
115 #include <mach/kern_return.h>
116
117 /*
118 * Define the generic in terms of the specific
119 */
120
121 #if i386
122 #define INTEL_PGBYTES I386_PGBYTES
123 #define INTEL_PGSHIFT I386_PGSHIFT
124 #define intel_btop(x) i386_btop(x)
125 #define intel_ptob(x) i386_ptob(x)
126 #define intel_round_page(x) i386_round_page(x)
127 #define intel_trunc_page(x) i386_trunc_page(x)
128 #define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
129 #define round_intel_to_vm(x) round_i386_to_vm(x)
130 #define vm_to_intel(x) vm_to_i386(x)
131 #endif i386
132 #if i860
133 #define INTEL_PGBYTES I860_PGBYTES
134 #define INTEL_PGSHIFT I860_PGSHIFT
135 #define intel_btop(x) i860_btop(x)
136 #define intel_ptob(x) i860_ptob(x)
137 #define intel_round_page(x) i860_round_page(x)
138 #define intel_trunc_page(x) i860_trunc_page(x)
139 #define trunc_intel_to_vm(x) trunc_i860_to_vm(x)
140 #define round_intel_to_vm(x) round_i860_to_vm(x)
141 #define vm_to_intel(x) vm_to_i860(x)
142 #endif i860
143
144 /*
145 * i386/i486/i860 Page Table Entry
146 */
147
148 typedef unsigned int pt_entry_t;
149 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
150
151 #endif ASSEMBLER
152
153 #define INTEL_OFFMASK 0xfff /* offset within page */
154 #define PDESHIFT 22 /* page descriptor shift */
155 #define PDEMASK 0x3ff /* mask for page descriptor index */
156 #define PTESHIFT 12 /* page table shift */
157 #define PTEMASK 0x3ff /* mask for page table index */
158
159 /*
160 * Convert address offset to page descriptor index
161 */
162 #define pdenum(a) (((a) >> PDESHIFT) & PDEMASK)
163
164 /*
165 * Convert page descriptor index to user virtual address
166 */
167 #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
168
169 /*
170 * Convert address offset to page table index
171 */
172 #define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
173
174 #define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
175 #define NPDES (intel_ptob(1)/sizeof(pt_entry_t))
176
177 /*
178 * Hardware pte bit definitions (to be used directly on the ptes
179 * without using the bit fields).
180 */
181
182 #if i860
183 #define INTEL_PTE_valid 0x00000001
184 #else
185 #define INTEL_PTE_VALID 0x00000001
186 #endif
187 #define INTEL_PTE_WRITE 0x00000002
188 #define INTEL_PTE_USER 0x00000004
189 #define INTEL_PTE_WTHRU 0x00000008
190 #define INTEL_PTE_NCACHE 0x00000010
191 #define INTEL_PTE_REF 0x00000020
192 #define INTEL_PTE_MOD 0x00000040
193 #define INTEL_PTE_WIRED 0x00000200
194 #define INTEL_PTE_PFN 0xfffff000
195
196 #if i860
197 #if NOCACHE
198 #define INTEL_PTE_VALID (INTEL_PTE_valid \
199 |INTEL_PTE_WTHRU \
200 |INTEL_PTE_NCACHE \
201 |INTEL_PTE_REF \
202 |INTEL_PTE_MOD \
203 )
204 #else NOCACHE
205 #define INTEL_PTE_VALID (INTEL_PTE_valid \
206 |INTEL_PTE_REF \
207 |INTEL_PTE_MOD \
208 )
209 #endif NOCACHE
210 #endif i860
211
212 #define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
213 #define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
214 #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
215
216 /*
217 * Convert page table entry to kernel virtual address
218 */
219 #define ptetokv(a) (phystokv(pte_to_pa(a)))
220
221 #ifndef ASSEMBLER
222 typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
223 /* changed by other processors */
224
225 struct pmap {
226 pt_entry_t *dirbase; /* page directory pointer register */
227 int ref_count; /* reference count */
228 decl_simple_lock_data(,lock)
229 /* lock on map */
230 struct pmap_statistics stats; /* map statistics */
231 cpu_set cpus_using; /* bitmap of cpus using pmap */
232 };
233
234 typedef struct pmap *pmap_t;
235
236 #define PMAP_NULL ((pmap_t) 0)
237
238 #if i860
239 /*#define set_dirbase(dirbase) flush_and_ctxsw(dirbase)*//*akp*/
240 #else
241 #define set_dirbase(dirbase) set_cr3(dirbase)
242 #endif
243
244 #if NCPUS > 1
245 /*
246 * List of cpus that are actively using mapped memory. Any
247 * pmap update operation must wait for all cpus in this list.
248 * Update operations must still be queued to cpus not in this
249 * list.
250 */
251 cpu_set cpus_active;
252
253 /*
254 * List of cpus that are idle, but still operating, and will want
255 * to see any kernel pmap updates when they become active.
256 */
257 cpu_set cpus_idle;
258
259 /*
260 * Quick test for pmap update requests.
261 */
262 volatile
263 boolean_t cpu_update_needed[NCPUS];
264
265 /*
266 * External declarations for PMAP_ACTIVATE.
267 */
268
269 void process_pmap_updates();
270 void pmap_update_interrupt();
271 extern pmap_t kernel_pmap;
272
273 #endif NCPUS > 1
274
275 /*
276 * Machine dependent routines that are used only for i386/i486/i860.
277 */
278
279 pt_entry_t *pmap_pte();
280
281 /*
282 * Macros for speed.
283 */
284
285 #if NCPUS > 1
286
287 /*
288 * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
289 * fields to control TLB invalidation on other CPUS.
290 */
291
292 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
293 \
294 /* \
295 * Let pmap updates proceed while we wait for this pmap. \
296 */ \
297 i_bit_clear((my_cpu), &cpus_active); \
298 \
299 /* \
300 * Lock the pmap to put this cpu in its active set. \
301 * Wait for updates here. \
302 */ \
303 simple_lock(&kernel_pmap->lock); \
304 \
305 /* \
306 * Process invalidate requests for the kernel pmap. \
307 */ \
308 if (cpu_update_needed[(my_cpu)]) \
309 process_pmap_updates(kernel_pmap); \
310 \
311 /* \
312 * Mark that this cpu is using the pmap. \
313 */ \
314 i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
315 \
316 /* \
317 * Mark this cpu active - IPL will be lowered by \
318 * load_context(). \
319 */ \
320 i_bit_set((my_cpu), &cpus_active); \
321 \
322 simple_unlock(&kernel_pmap->lock); \
323 }
324
325 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
326 /* \
327 * Mark pmap no longer in use by this cpu even if \
328 * pmap is locked against updates. \
329 */ \
330 i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
331 }
332
333 #define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
334 register pmap_t tpmap = (pmap); \
335 \
336 if (tpmap == kernel_pmap) { \
337 /* \
338 * If this is the kernel pmap, switch to its page tables. \
339 */ \
340 set_dirbase(kvtophys(tpmap->dirbase)); \
341 } \
342 else { \
343 /* \
344 * Let pmap updates proceed while we wait for this pmap. \
345 */ \
346 i_bit_clear((my_cpu), &cpus_active); \
347 \
348 /* \
349 * Lock the pmap to put this cpu in its active set. \
350 * Wait for updates here. \
351 */ \
352 simple_lock(&tpmap->lock); \
353 \
354 /* \
355 * No need to invalidate the TLB - the entire user pmap \
356 * will be invalidated by reloading dirbase. \
357 */ \
358 set_dirbase(kvtophys(tpmap->dirbase)); \
359 \
360 /* \
361 * Mark that this cpu is using the pmap. \
362 */ \
363 i_bit_set((my_cpu), &tpmap->cpus_using); \
364 \
365 /* \
366 * Mark this cpu active - IPL will be lowered by \
367 * load_context(). \
368 */ \
369 i_bit_set((my_cpu), &cpus_active); \
370 \
371 simple_unlock(&tpmap->lock); \
372 } \
373 }
374
375 #define PMAP_DEACTIVATE_USER(pmap, thread, my_cpu) { \
376 register pmap_t tpmap = (pmap); \
377 \
378 /* \
379 * Do nothing if this is the kernel pmap. \
380 */ \
381 if (tpmap != kernel_pmap) { \
382 /* \
383 * Mark pmap no longer in use by this cpu even if \
384 * pmap is locked against updates. \
385 */ \
386 i_bit_clear((my_cpu), &(pmap)->cpus_using); \
387 } \
388 }
389
390 #define MARK_CPU_IDLE(my_cpu) { \
391 /* \
392 * Mark this cpu idle, and remove it from the active set, \
393 * since it is not actively using any pmap. Signal_cpus \
394 * will notice that it is idle, and avoid signaling it, \
395 * but will queue the update request for when the cpu \
396 * becomes active. \
397 */ \
398 int s = splvm(); \
399 i_bit_set((my_cpu), &cpus_idle); \
400 i_bit_clear((my_cpu), &cpus_active); \
401 splx(s); \
402 }
403
404 #define MARK_CPU_ACTIVE(my_cpu) { \
405 \
406 int s = splvm(); \
407 /* \
408 * If a kernel_pmap update was requested while this cpu \
409 * was idle, process it as if we got the interrupt. \
410 * Before doing so, remove this cpu from the idle set. \
411 * Since we do not grab any pmap locks while we flush \
412 * our TLB, another cpu may start an update operation \
413 * before we finish. Removing this cpu from the idle \
414 * set assures that we will receive another update \
415 * interrupt if this happens. \
416 */ \
417 i_bit_clear((my_cpu), &cpus_idle); \
418 \
419 if (cpu_update_needed[(my_cpu)]) \
420 pmap_update_interrupt(); \
421 \
422 /* \
423 * Mark that this cpu is now active. \
424 */ \
425 i_bit_set((my_cpu), &cpus_active); \
426 splx(s); \
427 }
428
429 #else NCPUS > 1
430
431 /*
432 * With only one CPU, we just have to indicate whether the pmap is
433 * in use.
434 */
435
436 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
437 kernel_pmap->cpus_using = TRUE; \
438 }
439
440 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
441 kernel_pmap->cpus_using = FALSE; \
442 }
443
444 #define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
445 register pmap_t tpmap = (pmap); \
446 \
447 set_dirbase(kvtophys(tpmap->dirbase)); \
448 if (tpmap != kernel_pmap) { \
449 tpmap->cpus_using = TRUE; \
450 } \
451 }
452
453 #define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
454 if ((pmap) != kernel_pmap) \
455 (pmap)->cpus_using = FALSE; \
456 }
457
458 #endif NCPUS > 1
459
460 #define PMAP_CONTEXT(pmap, thread)
461
462 #define pmap_kernel() (kernel_pmap)
463 #define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
464 #define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame)))
465 #define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
466 #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
467 #define pmap_attribute(pmap,addr,size,attr,value) \
468 (KERN_INVALID_ADDRESS)
469
470 #endif ASSEMBLER
471
472 #endif _PMAP_MACHINE_
Cache object: 94d70fe5b3502de505e6c8dab47562cb
|