1 /*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Derived from hp300 version by Mike Hibler, this version by William
34 * Jolitz uses a recursive map [a pde points to the page directory] to
35 * map the page tables using the pagetables themselves. This is done to
36 * reduce the impact on kernel virtual memory for lots of sparse address
37 * space, and to reduce the cost of memory to each process.
38 *
39 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
40 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
41 * from: src/sys/i386/include/pmap.h,v 1.65.2.2 2000/11/30 01:54:42 peter
42 * JNPR: pmap.h,v 1.7.2.1 2007/09/10 07:44:12 girish
43 * $FreeBSD: releng/8.0/sys/mips/include/pmap.h 195649 2009-07-12 23:31:20Z alc $
44 */
45
46 #ifndef _MACHINE_PMAP_H_
47 #define _MACHINE_PMAP_H_
48
49 #include <machine/vmparam.h>
50 #include <machine/pte.h>
51
52 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
53
54 #define NKPT 120 /* actual number of kernel page tables */
55
56 #ifndef NKPDE
57 #define NKPDE 255 /* addressable number of page tables/pde's */
58 #endif
59
60 #define KPTDI (VM_MIN_KERNEL_ADDRESS >> SEGSHIFT)
61 #define NUSERPGTBLS (VM_MAXUSER_ADDRESS >> SEGSHIFT)
62
63 #ifndef LOCORE
64
65 #include <sys/queue.h>
66 #include <sys/_lock.h>
67 #include <sys/_mutex.h>
68
69 /*
70 * Pmap stuff
71 */
72 struct pv_entry;
73
74 struct md_page {
75 int pv_list_count;
76 int pv_flags;
77 TAILQ_HEAD(, pv_entry) pv_list;
78 };
79
80 #define PV_TABLE_MOD 0x01 /* modified */
81 #define PV_TABLE_REF 0x02 /* referenced */
82
83 #define ASID_BITS 8
84 #define ASIDGEN_BITS (32 - ASID_BITS)
85 #define ASIDGEN_MASK ((1 << ASIDGEN_BITS) - 1)
86
87 struct pmap {
88 pd_entry_t *pm_segtab; /* KVA of segment table */
89 TAILQ_HEAD(, pv_entry) pm_pvlist; /* list of mappings in
90 * pmap */
91 int pm_active; /* active on cpus */
92 struct {
93 u_int32_t asid:ASID_BITS; /* TLB address space tag */
94 u_int32_t gen:ASIDGEN_BITS; /* its generation number */
95 } pm_asid[MAXSMPCPU];
96 struct pmap_statistics pm_stats; /* pmap statistics */
97 struct vm_page *pm_ptphint; /* pmap ptp hint */
98 struct mtx pm_mtx;
99 };
100
101 typedef struct pmap *pmap_t;
102
103 #ifdef _KERNEL
104
105 pt_entry_t *pmap_pte(pmap_t, vm_offset_t);
106 pd_entry_t pmap_segmap(pmap_t pmap, vm_offset_t va);
107 vm_offset_t pmap_kextract(vm_offset_t va);
108
109 #define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
110
111 extern struct pmap kernel_pmap_store;
112 #define kernel_pmap (&kernel_pmap_store)
113
114 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
115 #define PMAP_LOCK_ASSERT(pmap, type) mtx_assert(&(pmap)->pm_mtx, (type))
116 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
117 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
118 NULL, MTX_DEF)
119 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
120 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
121 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
122 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
123
124 #define PMAP_LGMEM_LOCK_INIT(sysmap) mtx_init(&(sysmap)->lock, "pmap-lgmem", \
125 "per-cpu-map", (MTX_DEF| MTX_DUPOK))
126 #define PMAP_LGMEM_LOCK(sysmap) mtx_lock(&(sysmap)->lock)
127 #define PMAP_LGMEM_UNLOCK(sysmap) mtx_unlock(&(sysmap)->lock)
128 #define PMAP_LGMEM_DESTROY(sysmap) mtx_destroy(&(sysmap)->lock)
129
130 /*
131 * For each vm_page_t, there is a list of all currently valid virtual
132 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
133 */
134 typedef struct pv_entry {
135 pmap_t pv_pmap; /* pmap where mapping lies */
136 vm_offset_t pv_va; /* virtual address for mapping */
137 TAILQ_ENTRY(pv_entry) pv_list;
138 TAILQ_ENTRY(pv_entry) pv_plist;
139 vm_page_t pv_ptem; /* VM page for pte */
140 boolean_t pv_wired; /* whether this entry is wired */
141 } *pv_entry_t;
142
143
144 #if defined(DIAGNOSTIC)
145 #define PMAP_DIAGNOSTIC
146 #endif
147
148 extern vm_offset_t phys_avail[];
149 extern char *ptvmmap; /* poor name! */
150 extern vm_offset_t virtual_avail;
151 extern vm_offset_t virtual_end;
152 extern pd_entry_t *segbase;
153
154 extern vm_paddr_t mips_wired_tlb_physmem_start;
155 extern vm_paddr_t mips_wired_tlb_physmem_end;
156 extern u_int need_wired_tlb_page_pool;
157
158 #define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
159 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
160 #define pmap_page_set_memattr(m, ma) (void)0
161
162 void pmap_bootstrap(void);
163 void *pmap_mapdev(vm_offset_t, vm_size_t);
164 void pmap_unmapdev(vm_offset_t, vm_size_t);
165 vm_offset_t pmap_steal_memory(vm_size_t size);
166 void pmap_set_modified(vm_offset_t pa);
167 int page_is_managed(vm_offset_t pa);
168 void pmap_page_is_free(vm_page_t m);
169 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
170 void pmap_kremove(vm_offset_t va);
171 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
172 void pmap_kenter_temporary_free(vm_paddr_t pa);
173 int pmap_compute_pages_to_dump(void);
174 void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
175
176 /*
177 * floating virtual pages (FPAGES)
178 *
179 * These are the reserved virtual memory areas which can be
180 * mapped to any physical memory.
181 */
182 #define FPAGES 2
183 #define FPAGES_SHARED 2
184 #define FSPACE ((FPAGES * MAXCPU + FPAGES_SHARED) * PAGE_SIZE)
185 #define PMAP_FPAGE1 0x00 /* Used by pmap_zero_page &
186 * pmap_copy_page */
187 #define PMAP_FPAGE2 0x01 /* Used by pmap_copy_page */
188
189 #define PMAP_FPAGE3 0x00 /* Used by pmap_zero_page_idle */
190 #define PMAP_FPAGE_KENTER_TEMP 0x01 /* Used by coredump */
191
192 struct fpage {
193 vm_offset_t kva;
194 u_int state;
195 };
196
197 struct sysmaps {
198 struct mtx lock;
199 struct fpage fp[FPAGES];
200 };
201
202 vm_offset_t
203 pmap_map_fpage(vm_paddr_t pa, struct fpage *fp,
204 boolean_t check_unmaped);
205 void pmap_unmap_fpage(vm_paddr_t pa, struct fpage *fp);
206
207 #endif /* _KERNEL */
208
209 #endif /* !LOCORE */
210
211 #endif /* !_MACHINE_PMAP_H_ */
Cache object: 5f41698ae95a240c1e5978660a7d0f8a
|