FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_map.h
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.h 8.9 (Berkeley) 5/17/95
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD: releng/5.1/sys/vm/vm_map.h 112167 2003-03-12 23:13:16Z das $
65 */
66
67 /*
68 * Virtual memory map module definitions.
69 */
70 #ifndef _VM_MAP_
71 #define _VM_MAP_
72
73 #include <sys/lock.h>
74 #include <sys/lockmgr.h>
75 #include <sys/_mutex.h>
76
77 /*
78 * Types defined:
79 *
80 * vm_map_t the high-level address map data structure.
81 * vm_map_entry_t an entry in an address map.
82 */
83
84 typedef u_int vm_eflags_t;
85
86 /*
87 * Objects which live in maps may be either VM objects, or
88 * another map (called a "sharing map") which denotes read-write
89 * sharing with other maps.
90 */
91 union vm_map_object {
92 struct vm_object *vm_object; /* object object */
93 struct vm_map *sub_map; /* belongs to another map */
94 };
95
96 /*
97 * Address map entries consist of start and end addresses,
98 * a VM object (or sharing map) and offset into that object,
99 * and user-exported inheritance and protection information.
100 * Also included is control information for virtual copy operations.
101 */
102 struct vm_map_entry {
103 struct vm_map_entry *prev; /* previous entry */
104 struct vm_map_entry *next; /* next entry */
105 struct vm_map_entry *left; /* left child in binary search tree */
106 struct vm_map_entry *right; /* right child in binary search tree */
107 vm_offset_t start; /* start address */
108 vm_offset_t end; /* end address */
109 vm_offset_t avail_ssize; /* amt can grow if this is a stack */
110 union vm_map_object object; /* object I point to */
111 vm_ooffset_t offset; /* offset into object */
112 vm_eflags_t eflags; /* map entry flags */
113 /* Only in task maps: */
114 vm_prot_t protection; /* protection code */
115 vm_prot_t max_protection; /* maximum protection */
116 vm_inherit_t inheritance; /* inheritance */
117 int wired_count; /* can be paged if = 0 */
118 vm_pindex_t lastr; /* last read */
119 };
120
121 #define MAP_ENTRY_NOSYNC 0x0001
122 #define MAP_ENTRY_IS_SUB_MAP 0x0002
123 #define MAP_ENTRY_COW 0x0004
124 #define MAP_ENTRY_NEEDS_COPY 0x0008
125 #define MAP_ENTRY_NOFAULT 0x0010
126 #define MAP_ENTRY_USER_WIRED 0x0020
127
128 #define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */
129 #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */
130 #define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */
131 #define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */
132
133 #define MAP_ENTRY_BEHAV_MASK 0x00C0
134
135 #define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */
136 #define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiters in transition */
137 #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */
138
139 #ifdef _KERNEL
140 static __inline u_char
141 vm_map_entry_behavior(vm_map_entry_t entry)
142 {
143 return (entry->eflags & MAP_ENTRY_BEHAV_MASK);
144 }
145 #endif /* _KERNEL */
146
147 /*
148 * A map is a set of map entries. These map entries are
149 * organized both as a binary search tree and as a doubly-linked
150 * list. Both structures are ordered based upon the start and
151 * end addresses contained within each map entry. Sleator and
152 * Tarjan's top-down splay algorithm is employed to control
153 * height imbalance in the binary search tree.
154 *
155 * Note: the lock structure cannot be the first element of vm_map
156 * because this can result in a running lockup between two or more
157 * system processes trying to kmem_alloc_wait() due to kmem_alloc_wait()
158 * and free tsleep/waking up 'map' and the underlying lockmgr also
159 * sleeping and waking up on 'map'. The lockup occurs when the map fills
160 * up. The 'exec' map, for example.
161 *
162 * List of locks
163 * (c) const until freed
164 */
165 struct vm_map {
166 struct vm_map_entry header; /* List of entries */
167 struct lock lock; /* Lock for map data */
168 struct mtx system_mtx;
169 int nentries; /* Number of entries */
170 vm_size_t size; /* virtual size */
171 u_char needs_wakeup;
172 u_char system_map; /* Am I a system map? */
173 u_char infork; /* Am I in fork processing? */
174 vm_map_entry_t root; /* Root of a binary search tree */
175 unsigned int timestamp; /* Version number */
176 vm_map_entry_t first_free; /* First free space hint */
177 pmap_t pmap; /* (c) Physical map */
178 #define min_offset header.start /* (c) */
179 #define max_offset header.end /* (c) */
180 };
181
182 #ifdef _KERNEL
183 static __inline vm_offset_t
184 vm_map_max(vm_map_t map)
185 {
186 return (map->max_offset);
187 }
188
189 static __inline vm_offset_t
190 vm_map_min(vm_map_t map)
191 {
192 return (map->min_offset);
193 }
194
195 static __inline pmap_t
196 vm_map_pmap(vm_map_t map)
197 {
198 return (map->pmap);
199 }
200 #endif /* _KERNEL */
201
202 /*
203 * Shareable process virtual address space.
204 *
205 * List of locks
206 * (c) const until freed
207 */
208 struct vmspace {
209 struct vm_map vm_map; /* VM address map */
210 struct pmap vm_pmap; /* private physical map */
211 int vm_refcnt; /* number of references */
212 struct shmmap_state *vm_shm; /* SYS5 shared memory private data XXX */
213 /* we copy from vm_startcopy to the end of the structure on fork */
214 #define vm_startcopy vm_rssize
215 segsz_t vm_rssize; /* current resident set size in pages */
216 segsz_t vm_swrss; /* resident set size before last swap */
217 segsz_t vm_tsize; /* text size (pages) XXX */
218 segsz_t vm_dsize; /* data size (pages) XXX */
219 segsz_t vm_ssize; /* stack size (pages) */
220 caddr_t vm_taddr; /* (c) user virtual address of text */
221 caddr_t vm_daddr; /* (c) user virtual address of data */
222 caddr_t vm_maxsaddr; /* user VA at max stack growth */
223 #define vm_endcopy vm_exitingcnt
224 int vm_exitingcnt; /* several processes zombied in exit1 */
225 };
226
227 #ifdef _KERNEL
228 static __inline pmap_t
229 vmspace_pmap(struct vmspace *vmspace)
230 {
231 return &vmspace->vm_pmap;
232 }
233 #endif /* _KERNEL */
234
235 #ifdef _KERNEL
236 /*
237 * Macros: vm_map_lock, etc.
238 * Function:
239 * Perform locking on the data portion of a map. Note that
240 * these macros mimic procedure calls returning void. The
241 * semicolon is supplied by the user of these macros, not
242 * by the macros themselves. The macros can safely be used
243 * as unbraced elements in a higher level statement.
244 */
245
246 void _vm_map_lock(vm_map_t map, const char *file, int line);
247 void _vm_map_unlock(vm_map_t map, const char *file, int line);
248 void _vm_map_lock_read(vm_map_t map, const char *file, int line);
249 void _vm_map_unlock_read(vm_map_t map, const char *file, int line);
250 int _vm_map_trylock(vm_map_t map, const char *file, int line);
251 int _vm_map_trylock_read(vm_map_t map, const char *file, int line);
252 int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line);
253 void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line);
254 int vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait);
255 void vm_map_wakeup(vm_map_t map);
256
257 #define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE)
258 #define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE)
259 #define vm_map_lock_read(map) _vm_map_lock_read(map, LOCK_FILE, LOCK_LINE)
260 #define vm_map_unlock_read(map) _vm_map_unlock_read(map, LOCK_FILE, LOCK_LINE)
261 #define vm_map_trylock(map) _vm_map_trylock(map, LOCK_FILE, LOCK_LINE)
262 #define vm_map_trylock_read(map) \
263 _vm_map_trylock_read(map, LOCK_FILE, LOCK_LINE)
264 #define vm_map_lock_upgrade(map) \
265 _vm_map_lock_upgrade(map, LOCK_FILE, LOCK_LINE)
266 #define vm_map_lock_downgrade(map) \
267 _vm_map_lock_downgrade(map, LOCK_FILE, LOCK_LINE)
268
269 long vmspace_resident_count(struct vmspace *vmspace);
270 #endif /* _KERNEL */
271
272
273 /* XXX: number of kernel maps and entries to statically allocate */
274 #define MAX_KMAP 10
275 #define MAX_KMAPENT 128
276 #define MAX_MAPENT 128
277
278 /*
279 * Copy-on-write flags for vm_map operations
280 */
281 #define MAP_UNUSED_01 0x0001
282 #define MAP_COPY_ON_WRITE 0x0002
283 #define MAP_NOFAULT 0x0004
284 #define MAP_PREFAULT 0x0008
285 #define MAP_PREFAULT_PARTIAL 0x0010
286 #define MAP_DISABLE_SYNCER 0x0020
287 #define MAP_DISABLE_COREDUMP 0x0100
288 #define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */
289
290 /*
291 * vm_fault option flags
292 */
293 #define VM_FAULT_NORMAL 0 /* Nothing special */
294 #define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */
295 #define VM_FAULT_USER_WIRE 2 /* Likewise, but for user purposes */
296 #define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
297 #define VM_FAULT_DIRTY 8 /* Dirty the page */
298
299 #ifdef _KERNEL
300 boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
301 struct pmap;
302 vm_map_t vm_map_create (struct pmap *, vm_offset_t, vm_offset_t);
303 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t);
304 int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int);
305 int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *);
306 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
307 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t);
308 int vm_map_insert (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int);
309 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
310 vm_pindex_t *, vm_prot_t *, boolean_t *);
311 void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
312 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
313 int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
314 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
315 int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
316 void vm_map_startup (void);
317 int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
318 int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int);
319 void vm_map_simplify_entry (vm_map_t, vm_map_entry_t);
320 void vm_init2 (void);
321 int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int);
322 int vm_map_growstack (struct proc *p, vm_offset_t addr);
323 int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
324 boolean_t user_unwire);
325 int vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
326 boolean_t user_wire);
327 int vmspace_swap_count (struct vmspace *vmspace);
328 #endif /* _KERNEL */
329 #endif /* _VM_MAP_ */
Cache object: 8aef1be412d3ad00a92c105896c37cb9
|