FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_map.h
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vm_map.h 8.9 (Berkeley) 5/17/95
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 *
62 * $FreeBSD$
63 */
64
65 /*
66 * Virtual memory map module definitions.
67 */
68 #ifndef _VM_MAP_
69 #define _VM_MAP_
70
71 #include <sys/lock.h>
72 #include <sys/sx.h>
73 #include <sys/_mutex.h>
74
75 /*
76 * Types defined:
77 *
78 * vm_map_t the high-level address map data structure.
79 * vm_map_entry_t an entry in an address map.
80 */
81
82 typedef u_char vm_flags_t;
83 typedef u_int vm_eflags_t;
84
85 /*
86 * Objects which live in maps may be either VM objects, or
87 * another map (called a "sharing map") which denotes read-write
88 * sharing with other maps.
89 */
90 union vm_map_object {
91 struct vm_object *vm_object; /* object object */
92 struct vm_map *sub_map; /* belongs to another map */
93 };
94
95 /*
96 * Address map entries consist of start and end addresses,
97 * a VM object (or sharing map) and offset into that object,
98 * and user-exported inheritance and protection information.
99 * Also included is control information for virtual copy operations.
100 */
101 struct vm_map_entry {
102 struct vm_map_entry *prev; /* previous entry */
103 struct vm_map_entry *next; /* next entry */
104 struct vm_map_entry *left; /* left child in binary search tree */
105 struct vm_map_entry *right; /* right child in binary search tree */
106 vm_offset_t start; /* start address */
107 vm_offset_t end; /* end address */
108 vm_offset_t next_read; /* vaddr of the next sequential read */
109 vm_size_t pad_adj_free; /* pad */
110 vm_size_t max_free; /* max free space in subtree */
111 union vm_map_object object; /* object I point to */
112 vm_ooffset_t offset; /* offset into object */
113 vm_eflags_t eflags; /* map entry flags */
114 vm_prot_t protection; /* protection code */
115 vm_prot_t max_protection; /* maximum protection */
116 vm_inherit_t inheritance; /* inheritance */
117 uint8_t read_ahead; /* pages in the read-ahead window */
118 int wired_count; /* can be paged if = 0 */
119 struct ucred *cred; /* tmp storage for creator ref */
120 struct thread *wiring_thread;
121 };
122
123 #define MAP_ENTRY_NOSYNC 0x00000001
124 #define MAP_ENTRY_IS_SUB_MAP 0x00000002
125 #define MAP_ENTRY_COW 0x00000004
126 #define MAP_ENTRY_NEEDS_COPY 0x00000008
127 #define MAP_ENTRY_NOFAULT 0x00000010
128 #define MAP_ENTRY_USER_WIRED 0x00000020
129
130 #define MAP_ENTRY_BEHAV_NORMAL 0x00000000 /* default behavior */
131 #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x00000040 /* expect sequential
132 access */
133 #define MAP_ENTRY_BEHAV_RANDOM 0x00000080 /* expect random
134 access */
135 #define MAP_ENTRY_BEHAV_RESERVED 0x000000c0 /* future use */
136 #define MAP_ENTRY_BEHAV_MASK 0x000000c0
137 #define MAP_ENTRY_IN_TRANSITION 0x00000100 /* entry being
138 changed */
139 #define MAP_ENTRY_NEEDS_WAKEUP 0x00000200 /* waiters in
140 transition */
141 #define MAP_ENTRY_NOCOREDUMP 0x00000400 /* don't include in
142 a core */
143 #define MAP_ENTRY_VN_EXEC 0x00000800 /* text vnode mapping */
144 #define MAP_ENTRY_GROWS_DOWN 0x00001000 /* top-down stacks */
145 #define MAP_ENTRY_GROWS_UP 0x00002000 /* bottom-up stacks */
146
147 #define MAP_ENTRY_WIRE_SKIPPED 0x00004000
148 #define MAP_ENTRY_WRITECNT 0x00008000 /* tracked writeable
149 mapping */
150 #define MAP_ENTRY_GUARD 0x00010000
151 #define MAP_ENTRY_STACK_GAP_DN 0x00020000
152 #define MAP_ENTRY_STACK_GAP_UP 0x00040000
153 #define MAP_ENTRY_HEADER 0x00080000
154
155 #ifdef _KERNEL
156 static __inline u_char
157 vm_map_entry_behavior(vm_map_entry_t entry)
158 {
159 return (entry->eflags & MAP_ENTRY_BEHAV_MASK);
160 }
161
162 static __inline int
163 vm_map_entry_user_wired_count(vm_map_entry_t entry)
164 {
165 if (entry->eflags & MAP_ENTRY_USER_WIRED)
166 return (1);
167 return (0);
168 }
169
170 static __inline int
171 vm_map_entry_system_wired_count(vm_map_entry_t entry)
172 {
173 return (entry->wired_count - vm_map_entry_user_wired_count(entry));
174 }
175 #endif /* _KERNEL */
176
177 /*
178 * A map is a set of map entries. These map entries are
179 * organized both as a binary search tree and as a doubly-linked
180 * list. Both structures are ordered based upon the start and
181 * end addresses contained within each map entry.
182 *
183 * Sleator and Tarjan's top-down splay algorithm is employed to
184 * control height imbalance in the binary search tree.
185 *
186 * The map's min offset value is stored in map->header.end, and
187 * its max offset value is stored in map->header.start. These
188 * values act as sentinels for any forward or backward address
189 * scan of the list. The map header has a special value for the
190 * eflags field, MAP_ENTRY_HEADER, that is set initially, is
191 * never changed, and prevents an eflags match of the header
192 * with any other map entry.
193 *
194 * List of locks
195 * (c) const until freed
196 */
197 struct vm_map {
198 struct vm_map_entry header; /* List of entries */
199 struct sx lock; /* Lock for map data */
200 struct mtx system_mtx;
201 int nentries; /* Number of entries */
202 vm_size_t size; /* virtual size */
203 u_int timestamp; /* Version number */
204 u_char needs_wakeup;
205 u_char system_map; /* (c) Am I a system map? */
206 vm_flags_t flags; /* flags for this vm_map */
207 vm_map_entry_t root; /* Root of a binary search tree */
208 pmap_t pmap; /* (c) Physical map */
209 vm_offset_t anon_loc;
210 int busy;
211 };
212
213 /*
214 * vm_flags_t values
215 */
216 #define MAP_WIREFUTURE 0x01 /* wire all future pages */
217 #define MAP_BUSY_WAKEUP 0x02
218 #define MAP_IS_SUB_MAP 0x04 /* has parent */
219 #define MAP_ASLR 0x08 /* enabled ASLR */
220 #define MAP_ASLR_IGNSTART 0x10
221
222 #ifdef _KERNEL
223 #if defined(KLD_MODULE) && !defined(KLD_TIED)
224 #define vm_map_max(map) vm_map_max_KBI((map))
225 #define vm_map_min(map) vm_map_min_KBI((map))
226 #define vm_map_pmap(map) vm_map_pmap_KBI((map))
227 #define vm_map_range_valid(map, start, end) \
228 vm_map_range_valid_KBI((map), (start), (end))
229 #else
230 static __inline vm_offset_t
231 vm_map_max(const struct vm_map *map)
232 {
233
234 return (map->header.start);
235 }
236
237 static __inline vm_offset_t
238 vm_map_min(const struct vm_map *map)
239 {
240
241 return (map->header.end);
242 }
243
244 static __inline pmap_t
245 vm_map_pmap(vm_map_t map)
246 {
247 return (map->pmap);
248 }
249
250 static __inline void
251 vm_map_modflags(vm_map_t map, vm_flags_t set, vm_flags_t clear)
252 {
253 map->flags = (map->flags | set) & ~clear;
254 }
255
256 static inline bool
257 vm_map_range_valid(vm_map_t map, vm_offset_t start, vm_offset_t end)
258 {
259 if (end < start)
260 return (false);
261 if (start < vm_map_min(map) || end > vm_map_max(map))
262 return (false);
263 return (true);
264 }
265
266 #endif /* KLD_MODULE */
267 #endif /* _KERNEL */
268
269 /*
270 * Shareable process virtual address space.
271 *
272 * List of locks
273 * (c) const until freed
274 */
275 struct vmspace {
276 struct vm_map vm_map; /* VM address map */
277 struct shmmap_state *vm_shm; /* SYS5 shared memory private data XXX */
278 segsz_t vm_swrss; /* resident set size before last swap */
279 segsz_t vm_tsize; /* text size (pages) XXX */
280 segsz_t vm_dsize; /* data size (pages) XXX */
281 segsz_t vm_ssize; /* stack size (pages) */
282 caddr_t vm_taddr; /* (c) user virtual address of text */
283 caddr_t vm_daddr; /* (c) user virtual address of data */
284 caddr_t vm_maxsaddr; /* user VA at max stack growth */
285 volatile int vm_refcnt; /* number of references */
286 /*
287 * Keep the PMAP last, so that CPU-specific variations of that
288 * structure on a single architecture don't result in offset
289 * variations of the machine-independent fields in the vmspace.
290 */
291 struct pmap vm_pmap; /* private physical map */
292 };
293
294 #ifdef _KERNEL
295 static __inline pmap_t
296 vmspace_pmap(struct vmspace *vmspace)
297 {
298 return &vmspace->vm_pmap;
299 }
300 #endif /* _KERNEL */
301
302 #ifdef _KERNEL
303 /*
304 * Macros: vm_map_lock, etc.
305 * Function:
306 * Perform locking on the data portion of a map. Note that
307 * these macros mimic procedure calls returning void. The
308 * semicolon is supplied by the user of these macros, not
309 * by the macros themselves. The macros can safely be used
310 * as unbraced elements in a higher level statement.
311 */
312
313 void _vm_map_lock(vm_map_t map, const char *file, int line);
314 void _vm_map_unlock(vm_map_t map, const char *file, int line);
315 int _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line);
316 void _vm_map_lock_read(vm_map_t map, const char *file, int line);
317 void _vm_map_unlock_read(vm_map_t map, const char *file, int line);
318 int _vm_map_trylock(vm_map_t map, const char *file, int line);
319 int _vm_map_trylock_read(vm_map_t map, const char *file, int line);
320 int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line);
321 void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line);
322 int vm_map_locked(vm_map_t map);
323 void vm_map_wakeup(vm_map_t map);
324 void vm_map_busy(vm_map_t map);
325 void vm_map_unbusy(vm_map_t map);
326 void vm_map_wait_busy(vm_map_t map);
327 vm_offset_t vm_map_max_KBI(const struct vm_map *map);
328 vm_offset_t vm_map_min_KBI(const struct vm_map *map);
329 pmap_t vm_map_pmap_KBI(vm_map_t map);
330 bool vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end);
331
332 #define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE)
333 #define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE)
334 #define vm_map_unlock_and_wait(map, timo) \
335 _vm_map_unlock_and_wait(map, timo, LOCK_FILE, LOCK_LINE)
336 #define vm_map_lock_read(map) _vm_map_lock_read(map, LOCK_FILE, LOCK_LINE)
337 #define vm_map_unlock_read(map) _vm_map_unlock_read(map, LOCK_FILE, LOCK_LINE)
338 #define vm_map_trylock(map) _vm_map_trylock(map, LOCK_FILE, LOCK_LINE)
339 #define vm_map_trylock_read(map) \
340 _vm_map_trylock_read(map, LOCK_FILE, LOCK_LINE)
341 #define vm_map_lock_upgrade(map) \
342 _vm_map_lock_upgrade(map, LOCK_FILE, LOCK_LINE)
343 #define vm_map_lock_downgrade(map) \
344 _vm_map_lock_downgrade(map, LOCK_FILE, LOCK_LINE)
345
346 long vmspace_resident_count(struct vmspace *vmspace);
347 #endif /* _KERNEL */
348
349
350 /* XXX: number of kernel maps to statically allocate */
351 #define MAX_KMAP 10
352
353 /*
354 * Copy-on-write flags for vm_map operations
355 */
356 #define MAP_INHERIT_SHARE 0x00000001
357 #define MAP_COPY_ON_WRITE 0x00000002
358 #define MAP_NOFAULT 0x00000004
359 #define MAP_PREFAULT 0x00000008
360 #define MAP_PREFAULT_PARTIAL 0x00000010
361 #define MAP_DISABLE_SYNCER 0x00000020
362 #define MAP_CHECK_EXCL 0x00000040
363 #define MAP_CREATE_GUARD 0x00000080
364 #define MAP_DISABLE_COREDUMP 0x00000100
365 #define MAP_PREFAULT_MADVISE 0x00000200 /* from (user) madvise request */
366 #define MAP_WRITECOUNT 0x00000400
367 #define MAP_REMAP 0x00000800
368 #define MAP_STACK_GROWS_DOWN 0x00001000
369 #define MAP_STACK_GROWS_UP 0x00002000
370 #define MAP_ACC_CHARGED 0x00004000
371 #define MAP_ACC_NO_CHARGE 0x00008000
372 #define MAP_CREATE_STACK_GAP_UP 0x00010000
373 #define MAP_CREATE_STACK_GAP_DN 0x00020000
374 #define MAP_VN_EXEC 0x00040000
375
376 /*
377 * vm_fault option flags
378 */
379 #define VM_FAULT_NORMAL 0 /* Nothing special */
380 #define VM_FAULT_WIRE 1 /* Wire the mapped page */
381 #define VM_FAULT_DIRTY 2 /* Dirty the page; use w/VM_PROT_COPY */
382
383 /*
384 * Initially, mappings are slightly sequential. The maximum window size must
385 * account for the map entry's "read_ahead" field being defined as an uint8_t.
386 */
387 #define VM_FAULT_READ_AHEAD_MIN 7
388 #define VM_FAULT_READ_AHEAD_INIT 15
389 #define VM_FAULT_READ_AHEAD_MAX min(atop(MAXPHYS) - 1, UINT8_MAX)
390
391 /*
392 * The following "find_space" options are supported by vm_map_find().
393 *
394 * For VMFS_ALIGNED_SPACE, the desired alignment is specified to
395 * the macro argument as log base 2 of the desired alignment.
396 */
397 #define VMFS_NO_SPACE 0 /* don't find; use the given range */
398 #define VMFS_ANY_SPACE 1 /* find a range with any alignment */
399 #define VMFS_OPTIMAL_SPACE 2 /* find a range with optimal alignment*/
400 #define VMFS_SUPER_SPACE 3 /* find a superpage-aligned range */
401 #define VMFS_ALIGNED_SPACE(x) ((x) << 8) /* find a range with fixed alignment */
402
403 /*
404 * vm_map_wire and vm_map_unwire option flags
405 */
406 #define VM_MAP_WIRE_SYSTEM 0 /* wiring in a kernel map */
407 #define VM_MAP_WIRE_USER 1 /* wiring in a user map */
408
409 #define VM_MAP_WIRE_NOHOLES 0 /* region must not have holes */
410 #define VM_MAP_WIRE_HOLESOK 2 /* region may have holes */
411
412 #define VM_MAP_WIRE_WRITE 4 /* Validate writable. */
413
414 #ifdef _KERNEL
415 boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
416 vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t);
417 int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t);
418 int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t,
419 vm_offset_t, int, vm_prot_t, vm_prot_t, int);
420 int vm_map_find_min(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *,
421 vm_size_t, vm_offset_t, vm_offset_t, int, vm_prot_t, vm_prot_t, int);
422 int vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length,
423 vm_offset_t max_addr, vm_offset_t alignment);
424 int vm_map_fixed(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_size_t,
425 vm_prot_t, vm_prot_t, int);
426 vm_offset_t vm_map_findspace(vm_map_t, vm_offset_t, vm_size_t);
427 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
428 void vm_map_init(vm_map_t, pmap_t, vm_offset_t, vm_offset_t);
429 int vm_map_insert (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int);
430 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
431 vm_pindex_t *, vm_prot_t *, boolean_t *);
432 int vm_map_lookup_locked(vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
433 vm_pindex_t *, vm_prot_t *, boolean_t *);
434 void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
435 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
436 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
437 int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
438 void vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry);
439 void vm_map_startup (void);
440 int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
441 int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
442 int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int);
443 int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int);
444 int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
445 int flags);
446 int vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags);
447 int vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end,
448 int flags);
449 long vmspace_swap_count(struct vmspace *vmspace);
450 void vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add);
451 #endif /* _KERNEL */
452 #endif /* _VM_MAP_ */
Cache object: a3ca961977a9e94e05fc1ef48fe92cc7
|