FreeBSD/Linux Kernel Cross Reference
sys/intel/pmap.h
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993,1991,1990,1989,1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: pmap.h,v $
29 * Revision 2.8 93/11/17 16:55:24 dbg
30 * Added ANSI function prototypes.
31 * [93/10/01 dbg]
32 *
33 * Revision 2.7 91/12/10 16:32:23 jsb
34 * Fixes from Intel
35 * [91/12/10 15:51:47 jsb]
36 *
37 * Revision 2.6 91/08/28 11:13:15 jsb
38 * From Intel SSD: turn off caching for i860 for now.
39 * [91/08/26 18:31:19 jsb]
40 *
41 * Revision 2.5 91/05/14 16:30:44 mrt
42 * Correcting copyright
43 *
44 * Revision 2.4 91/05/08 12:46:54 dbg
45 * Add volatile declarations. Load CR3 when switching to kernel
46 * pmap in PMAP_ACTIVATE_USER. Fix PMAP_ACTIVATE_KERNEL.
47 * [91/04/26 14:41:54 dbg]
48 *
49 * Revision 2.3 91/02/05 17:20:49 mrt
50 * Changed to new Mach copyright
51 * [91/01/31 18:17:51 mrt]
52 *
53 * Revision 2.2 90/12/04 14:50:35 jsb
54 * First checkin (for intel directory).
55 * [90/12/03 21:55:40 jsb]
56 *
57 * Revision 2.4 90/08/06 15:07:23 rwd
58 * Remove ldt (not used).
59 * [90/07/17 dbg]
60 *
61 * Revision 2.3 90/06/02 14:48:53 rpd
62 * Added PMAP_CONTEXT definition.
63 * [90/06/02 rpd]
64 *
65 * Revision 2.2 90/05/03 15:37:16 dbg
66 * Move page-table definitions into i386/pmap.h.
67 * [90/04/05 dbg]
68 *
69 * Define separate Write and User bits in pte instead of protection
70 * code.
71 * [90/03/25 dbg]
72 *
73 * Load dirbase directly from pmap. Split PMAP_ACTIVATE and
74 * PMAP_DEACTIVATE into separate user and kernel versions.
75 * [90/02/08 dbg]
76 *
77 * Revision 1.6 89/09/25 12:25:50 rvb
78 * seg_desc -> fakedesc
79 * [89/09/23 rvb]
80 *
81 * Revision 1.5 89/09/05 20:41:38 jsb
82 * Added pmap_phys_to_frame definition.
83 * [89/09/05 18:47:08 jsb]
84 *
85 * Revision 1.4 89/03/09 20:03:34 rpd
86 * More cleanup.
87 *
88 * Revision 1.3 89/02/26 12:33:18 gm0w
89 * Changes for cleanup.
90 *
91 * 31-Dec-88 Robert Baron (rvb) at Carnegie-Mellon University
92 * Derived from MACH2.0 vax release.
93 *
94 * 17-Jan-88 David Golub (dbg) at Carnegie-Mellon University
95 * MARK_CPU_IDLE and MARK_CPU_ACTIVE must manipulate a separate
96 * cpu_idle set. The scheduler's cpu_idle indication is NOT
97 * synchronized with these calls. MARK_CPU_ACTIVE also needs spls.
98 *
99 */
100
101 /*
102 * File: pmap.h
103 *
104 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
105 * Date: 1985
106 *
107 * Machine-dependent structures for the physical map module.
108 */
109
110 #ifndef _PMAP_MACHINE_
111 #define _PMAP_MACHINE_ 1
112
113 #ifndef ASSEMBLER
114
115 #include <kern/zalloc.h>
116 #include <kern/lock.h>
117 #include <mach/machine/vm_param.h>
118 #include <mach/vm_statistics.h>
119 #include <mach/kern_return.h>
120
121 #ifdef i386
122 #include <i386/proc_reg.h>
123 #endif
124
125 /*
126 * Define the generic in terms of the specific
127 */
128
129 #if i386
130 #define INTEL_PGBYTES I386_PGBYTES
131 #define INTEL_PGSHIFT I386_PGSHIFT
132 #define intel_btop(x) i386_btop(x)
133 #define intel_ptob(x) i386_ptob(x)
134 #define intel_round_page(x) i386_round_page(x)
135 #define intel_trunc_page(x) i386_trunc_page(x)
136 #define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
137 #define round_intel_to_vm(x) round_i386_to_vm(x)
138 #define vm_to_intel(x) vm_to_i386(x)
139 #endif /* i386 */
140 #if i860
141 #define INTEL_PGBYTES I860_PGBYTES
142 #define INTEL_PGSHIFT I860_PGSHIFT
143 #define intel_btop(x) i860_btop(x)
144 #define intel_ptob(x) i860_ptob(x)
145 #define intel_round_page(x) i860_round_page(x)
146 #define intel_trunc_page(x) i860_trunc_page(x)
147 #define trunc_intel_to_vm(x) trunc_i860_to_vm(x)
148 #define round_intel_to_vm(x) round_i860_to_vm(x)
149 #define vm_to_intel(x) vm_to_i860(x)
150 #endif /* i860 */
151
152 /*
153 * i386/i486/i860 Page Table Entry
154 */
155
156 typedef unsigned int pt_entry_t;
157 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
158
159 #endif /* ASSEMBLER */
160
161 #define INTEL_OFFMASK 0xfff /* offset within page */
162 #define PDESHIFT 22 /* page descriptor shift */
163 #define PDEMASK 0x3ff /* mask for page descriptor index */
164 #define PTESHIFT 12 /* page table shift */
165 #define PTEMASK 0x3ff /* mask for page table index */
166
167 /*
168 * Convert address offset to page descriptor index
169 */
170 #define pdenum(a) (((a) >> PDESHIFT) & PDEMASK)
171
172 /*
173 * Convert page descriptor index to user virtual address
174 */
175 #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
176
177 /*
178 * Convert address offset to page table index
179 */
180 #define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
181
182 #define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
183 #define NPDES (intel_ptob(1)/sizeof(pt_entry_t))
184
185 /*
186 * Hardware pte bit definitions (to be used directly on the ptes
187 * without using the bit fields).
188 */
189
190 #if i860
191 #define INTEL_PTE_valid 0x00000001
192 #else
193 #define INTEL_PTE_VALID 0x00000001
194 #endif
195 #define INTEL_PTE_WRITE 0x00000002
196 #define INTEL_PTE_USER 0x00000004
197 #define INTEL_PTE_WTHRU 0x00000008
198 #define INTEL_PTE_NCACHE 0x00000010
199 #define INTEL_PTE_REF 0x00000020
200 #define INTEL_PTE_MOD 0x00000040
201 #define INTEL_PTE_WIRED 0x00000200
202 #define INTEL_PTE_PFN 0xfffff000
203
204 #if i860
205 #if NOCACHE
206 #define INTEL_PTE_VALID (INTEL_PTE_valid \
207 |INTEL_PTE_WTHRU \
208 |INTEL_PTE_NCACHE \
209 |INTEL_PTE_REF \
210 |INTEL_PTE_MOD \
211 )
212 #else /* NOCACHE */
213 #define INTEL_PTE_VALID (INTEL_PTE_valid \
214 |INTEL_PTE_REF \
215 |INTEL_PTE_MOD \
216 )
217 #endif /* NOCACHE */
218 #endif /* i860 */
219
220 #define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
221 #define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
222 #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
223
224 /*
225 * Convert page table entry to kernel virtual address
226 */
227 #define ptetokv(a) (phystokv(pte_to_pa(a)))
228
229 #ifndef ASSEMBLER
230 typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
231 /* changed by other processors */
232
233 struct pmap {
234 pt_entry_t *dirbase; /* page directory pointer register */
235 int ref_count; /* reference count */
236 decl_simple_lock_data(,lock)
237 /* lock on map */
238 struct pmap_statistics stats; /* map statistics */
239 cpu_set cpus_using; /* bitmap of cpus using pmap */
240 };
241
242 typedef struct pmap *pmap_t;
243
244 #define PMAP_NULL ((pmap_t) 0)
245
246 #if i860
247 /*#define set_dirbase(dirbase) flush_and_ctxsw(dirbase)*//*akp*/
248 #else
249 #define set_dirbase(dirbase) (set_cr3(dirbase))
250 #define flush_tlb() (set_cr3(get_cr3()))
251 #endif
252
253 #if NCPUS > 1
254 /*
255 * List of cpus that are actively using mapped memory. Any
256 * pmap update operation must wait for all cpus in this list.
257 * Update operations must still be queued to cpus not in this
258 * list.
259 */
260 extern cpu_set cpus_active;
261
262 /*
263 * List of cpus that are idle, but still operating, and will want
264 * to see any kernel pmap updates when they become active.
265 */
266 extern cpu_set cpus_idle;
267
268 /*
269 * Quick test for pmap update requests.
270 */
271 extern
272 volatile boolean_t cpu_update_needed[NCPUS];
273
274 /*
275 * External declarations for PMAP_ACTIVATE.
276 */
277
278 extern void process_pmap_updates(pmap_t);
279 extern void pmap_update_interrupt(void);
280 extern pmap_t kernel_pmap;
281
282 #endif /* NCPUS > 1 */
283
284 /*
285 * Machine dependent routines that are used only for i386/i486/i860.
286 */
287
288 extern pt_entry_t *pmap_pte(
289 pmap_t pmap,
290 vm_offset_t addr);
291
292 extern void pmap_pageable(
293 pmap_t pmap,
294 vm_offset_t start,
295 vm_offset_t end,
296 boolean_t pageable);
297
298 extern vm_offset_t kvtophys(vm_offset_t);
299
300 /*
301 * Macros for speed.
302 */
303
304 #if NCPUS > 1
305
306 /*
307 * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
308 * fields to control TLB invalidation on other CPUS.
309 */
310
311 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
312 \
313 /* \
314 * Let pmap updates proceed while we wait for this pmap. \
315 */ \
316 i_bit_clear((my_cpu), &cpus_active); \
317 \
318 /* \
319 * Lock the pmap to put this cpu in its active set. \
320 * Wait for updates here. \
321 */ \
322 simple_lock(&kernel_pmap->lock); \
323 \
324 /* \
325 * Process invalidate requests for the kernel pmap. \
326 */ \
327 if (cpu_update_needed[(my_cpu)]) \
328 process_pmap_updates(kernel_pmap); \
329 \
330 /* \
331 * Mark that this cpu is using the pmap. \
332 */ \
333 i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
334 \
335 /* \
336 * Mark this cpu active - IPL will be lowered by \
337 * load_context(). \
338 */ \
339 i_bit_set((my_cpu), &cpus_active); \
340 \
341 simple_unlock(&kernel_pmap->lock); \
342 }
343
344 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
345 /* \
346 * Mark pmap no longer in use by this cpu even if \
347 * pmap is locked against updates. \
348 */ \
349 i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
350 }
351
352 #define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
353 register pmap_t tpmap = (pmap); \
354 \
355 if (tpmap == kernel_pmap) { \
356 /* \
357 * If this is the kernel pmap, switch to its page tables. \
358 */ \
359 set_dirbase(kvtophys((vm_offset_t)tpmap->dirbase)); \
360 } \
361 else { \
362 /* \
363 * Let pmap updates proceed while we wait for this pmap. \
364 */ \
365 i_bit_clear((my_cpu), &cpus_active); \
366 \
367 /* \
368 * Lock the pmap to put this cpu in its active set. \
369 * Wait for updates here. \
370 */ \
371 simple_lock(&tpmap->lock); \
372 \
373 /* \
374 * No need to invalidate the TLB - the entire user pmap \
375 * will be invalidated by reloading dirbase. \
376 */ \
377 set_dirbase(kvtophys((vm_offset_t)tpmap->dirbase)); \
378 \
379 /* \
380 * Mark that this cpu is using the pmap. \
381 */ \
382 i_bit_set((my_cpu), &tpmap->cpus_using); \
383 \
384 /* \
385 * Mark this cpu active - IPL will be lowered by \
386 * load_context(). \
387 */ \
388 i_bit_set((my_cpu), &cpus_active); \
389 \
390 simple_unlock(&tpmap->lock); \
391 } \
392 }
393
394 #define PMAP_DEACTIVATE_USER(pmap, thread, my_cpu) { \
395 register pmap_t tpmap = (pmap); \
396 \
397 /* \
398 * Do nothing if this is the kernel pmap. \
399 */ \
400 if (tpmap != kernel_pmap) { \
401 /* \
402 * Mark pmap no longer in use by this cpu even if \
403 * pmap is locked against updates. \
404 */ \
405 i_bit_clear((my_cpu), &(pmap)->cpus_using); \
406 } \
407 }
408
409 #define MARK_CPU_IDLE(my_cpu) { \
410 /* \
411 * Mark this cpu idle, and remove it from the active set, \
412 * since it is not actively using any pmap. Signal_cpus \
413 * will notice that it is idle, and avoid signaling it, \
414 * but will queue the update request for when the cpu \
415 * becomes active. \
416 */ \
417 int s = splvm(); \
418 i_bit_set((my_cpu), &cpus_idle); \
419 i_bit_clear((my_cpu), &cpus_active); \
420 splx(s); \
421 }
422
423 #define MARK_CPU_ACTIVE(my_cpu) { \
424 \
425 int s = splvm(); \
426 /* \
427 * If a kernel_pmap update was requested while this cpu \
428 * was idle, process it as if we got the interrupt. \
429 * Before doing so, remove this cpu from the idle set. \
430 * Since we do not grab any pmap locks while we flush \
431 * our TLB, another cpu may start an update operation \
432 * before we finish. Removing this cpu from the idle \
433 * set assures that we will receive another update \
434 * interrupt if this happens. \
435 */ \
436 i_bit_clear((my_cpu), &cpus_idle); \
437 \
438 if (cpu_update_needed[(my_cpu)]) \
439 pmap_update_interrupt(); \
440 \
441 /* \
442 * Mark that this cpu is now active. \
443 */ \
444 i_bit_set((my_cpu), &cpus_active); \
445 splx(s); \
446 }
447
448 #else /* NCPUS > 1 */
449
450 /*
451 * With only one CPU, we just have to indicate whether the pmap is
452 * in use.
453 */
454
455 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
456 kernel_pmap->cpus_using = TRUE; \
457 }
458
459 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
460 kernel_pmap->cpus_using = FALSE; \
461 }
462
463 #define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
464 register pmap_t tpmap = (pmap); \
465 \
466 set_dirbase(kvtophys((vm_offset_t)tpmap->dirbase)); \
467 if (tpmap != kernel_pmap) { \
468 tpmap->cpus_using = TRUE; \
469 } \
470 }
471
472 #define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
473 if ((pmap) != kernel_pmap) \
474 (pmap)->cpus_using = FALSE; \
475 }
476
477 #endif /* NCPUS > 1 */
478
479 #define PMAP_CONTEXT(pmap, thread)
480
481 #define pmap_kernel() (kernel_pmap)
482 #define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
483 #define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame)))
484 #define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
485 #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
486 #define pmap_attribute(pmap,addr,size,attr,value) \
487 (KERN_INVALID_ADDRESS)
488
489 #endif /* ASSEMBLER */
490
491 #endif /* _PMAP_MACHINE_ */
Cache object: ba12113b4010717c2eaf03563459e287
|