FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_map.h
1 /* $OpenBSD: uvm_map.h,v 1.83 2023/01/31 15:18:55 deraadt Exp $ */
2 /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
3
4 /*
5 * Copyright (c) 2011 Ariane van der Steldt <ariane@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 *
19 *
20 * Copyright (c) 1997 Charles D. Cranor and Washington University.
21 * Copyright (c) 1991, 1993, The Regents of the University of California.
22 *
23 * All rights reserved.
24 *
25 * This code is derived from software contributed to Berkeley by
26 * The Mach Operating System project at Carnegie-Mellon University.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. Neither the name of the University nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 *
52 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94
53 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
54 *
55 *
56 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
57 * All rights reserved.
58 *
59 * Permission to use, copy, modify and distribute this software and
60 * its documentation is hereby granted, provided that both the copyright
61 * notice and this permission notice appear in all copies of the
62 * software, derivative works or modified versions, and any portions
63 * thereof, and that both notices appear in supporting documentation.
64 *
65 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
66 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
67 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
68 *
69 * Carnegie Mellon requests users of this software to return to
70 *
71 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
72 * School of Computer Science
73 * Carnegie Mellon University
74 * Pittsburgh PA 15213-3890
75 *
76 * any improvements or extensions that they make and grant Carnegie the
77 * rights to redistribute these changes.
78 */
79
80 #ifndef _UVM_UVM_MAP_H_
81 #define _UVM_UVM_MAP_H_
82
83 #include <sys/mutex.h>
84 #include <sys/rwlock.h>
85
86 #ifdef _KERNEL
87
88 /*
89 * UVM_MAP_CLIP_START: ensure that the entry begins at or after
90 * the starting address, if it doesn't we split the entry.
91 *
92 * => map must be locked by caller
93 */
94
95 #define UVM_MAP_CLIP_START(_map, _entry, _addr) \
96 do { \
97 KASSERT((_entry)->end + (_entry)->fspace > (_addr)); \
98 if ((_entry)->start < (_addr)) \
99 uvm_map_clip_start((_map), (_entry), (_addr)); \
100 } while (0)
101
102 /*
103 * UVM_MAP_CLIP_END: ensure that the entry ends at or before
104 * the ending address, if it doesn't we split the entry.
105 *
106 * => map must be locked by caller
107 */
108
109 #define UVM_MAP_CLIP_END(_map, _entry, _addr) \
110 do { \
111 KASSERT((_entry)->start < (_addr)); \
112 if ((_entry)->end > (_addr)) \
113 uvm_map_clip_end((_map), (_entry), (_addr)); \
114 } while (0)
115
116 /*
117 * extract flags
118 */
119 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */
120
121 #endif /* _KERNEL */
122
123 #include <uvm/uvm_anon.h>
124
125 /*
126 * Address map entries consist of start and end addresses,
127 * a VM object (or sharing map) and offset into that object,
128 * and user-exported inheritance and protection information.
129 * Also included is control information for virtual copy operations.
130 */
131 struct vm_map_entry {
132 union {
133 RBT_ENTRY(vm_map_entry) addr_entry; /* address tree */
134 SLIST_ENTRY(vm_map_entry) addr_kentry;
135 } daddrs;
136
137 union {
138 RBT_ENTRY(vm_map_entry) rbtree; /* Link freespace tree. */
139 TAILQ_ENTRY(vm_map_entry) tailq;/* Link freespace queue. */
140 TAILQ_ENTRY(vm_map_entry) deadq;/* dead entry queue */
141 } dfree;
142
143 #define uvm_map_entry_start_copy start
144 vaddr_t start; /* start address */
145 vaddr_t end; /* end address */
146
147 vsize_t guard; /* bytes in guard */
148 vsize_t fspace; /* free space */
149
150 union {
151 struct uvm_object *uvm_obj; /* uvm object */
152 struct vm_map *sub_map; /* belongs to another map */
153 } object; /* object I point to */
154 voff_t offset; /* offset into object */
155 struct vm_aref aref; /* anonymous overlay */
156 int etype; /* entry type */
157 vm_prot_t protection; /* protection code */
158 vm_prot_t max_protection; /* maximum protection */
159 vm_inherit_t inheritance; /* inheritance */
160 int wired_count; /* can be paged if == 0 */
161 int advice; /* madvise advice */
162 #define uvm_map_entry_stop_copy flags
163 u_int8_t flags; /* flags */
164
165 #define UVM_MAP_STATIC 0x01 /* static map entry */
166 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */
167
168 vsize_t fspace_augment; /* max(fspace) in subtree */
169 };
170
171 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)
172
173 TAILQ_HEAD(uvm_map_deadq, vm_map_entry); /* dead entry queue */
174 RBT_HEAD(uvm_map_addr, vm_map_entry);
175 #ifdef _KERNEL
176 RBT_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry,
177 uvm_mapentry_addrcmp);
178 #endif
179
180 /*
181 * A Map is a rbtree of map entries, kept sorted by address.
182 * In addition, free space entries are also kept in a rbtree,
183 * indexed by free size.
184 *
185 *
186 *
187 * LOCKING PROTOCOL NOTES:
188 * -----------------------
189 *
190 * VM map locking is a little complicated. There are both shared
191 * and exclusive locks on maps. However, it is sometimes required
192 * to downgrade an exclusive lock to a shared lock, and upgrade to
193 * an exclusive lock again (to perform error recovery). However,
194 * another thread *must not* queue itself to receive an exclusive
195 * lock while before we upgrade back to exclusive, otherwise the
196 * error recovery becomes extremely difficult, if not impossible.
197 *
198 * In order to prevent this scenario, we introduce the notion of
199 * a `busy' map. A `busy' map is read-locked, but other threads
200 * attempting to write-lock wait for this flag to clear before
201 * entering the lock manager. A map may only be marked busy
202 * when the map is write-locked (and then the map must be downgraded
203 * to read-locked), and may only be marked unbusy by the thread
204 * which marked it busy (holding *either* a read-lock or a
205 * write-lock, the latter being gained by an upgrade).
206 *
207 * Access to the map `flags' member is controlled by the `flags_lock'
208 * simple lock. Note that some flags are static (set once at map
209 * creation time, and never changed), and thus require no locking
210 * to check those flags. All flags which are r/w must be set or
211 * cleared while the `flags_lock' is asserted. Additional locking
212 * requirements are:
213 *
214 * VM_MAP_PAGEABLE r/o static flag; no locking required
215 *
216 * VM_MAP_INTRSAFE r/o static flag; no locking required
217 *
218 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when
219 * map is write-locked. may be tested
220 * without asserting `flags_lock'.
221 *
222 * VM_MAP_BUSY r/w; may only be set when map is
223 * write-locked, may only be cleared by
224 * thread which set it, map read-locked
225 * or write-locked. must be tested
226 * while `flags_lock' is asserted.
227 *
228 * VM_MAP_WANTLOCK r/w; may only be set when the map
229 * is busy, and thread is attempting
230 * to write-lock. must be tested
231 * while `flags_lock' is asserted.
232 *
233 * VM_MAP_GUARDPAGES r/o; must be specified at map
234 * initialization time.
235 * If set, guards will appear between
236 * automatic allocations.
237 * No locking required.
238 *
239 * VM_MAP_ISVMSPACE r/o; set by uvmspace_alloc.
240 * Signifies that this map is a vmspace.
241 * (The implementation treats all maps
242 * without this bit as kernel maps.)
243 * No locking required.
244 *
245 *
246 * All automatic allocations (uvm_map without MAP_FIXED) will allocate
247 * from vm_map.free.
248 * If that allocation fails:
249 * - vmspace maps will spill over into vm_map.bfree,
250 * - all other maps will call uvm_map_kmem_grow() to increase the arena.
251 *
252 * vmspace maps have their data, brk() and stack arenas automatically
253 * updated when uvm_map() is invoked without MAP_FIXED.
254 * The spill over arena (vm_map.bfree) will contain the space in the brk()
255 * and stack ranges.
256 * Kernel maps never have a bfree arena and this tree will always be empty.
257 *
258 *
259 * read_locks and write_locks are used in lock debugging code.
260 *
261 * Locks used to protect struct members in this file:
262 * a atomic operations
263 * I immutable after creation or exec(2)
264 * v `vm_map_lock' (this map `lock' or `mtx')
265 */
266 struct vm_map {
267 struct pmap *pmap; /* [I] Physical map */
268 u_long sserial; /* [v] # stack changes */
269 u_long wserial; /* [v] # PROT_WRITE increases */
270
271 struct uvm_map_addr addr; /* [v] Entry tree, by addr */
272
273 vsize_t size; /* virtual size */
274 int ref_count; /* [a] Reference count */
275 int flags; /* flags */
276 unsigned int timestamp; /* Version number */
277
278 vaddr_t min_offset; /* [I] First address in map. */
279 vaddr_t max_offset; /* [I] Last address in map. */
280
281 /*
282 * Allocation overflow regions.
283 */
284 vaddr_t b_start; /* [v] Start for brk() alloc. */
285 vaddr_t b_end; /* [v] End for brk() alloc. */
286 vaddr_t s_start; /* [v] Start for stack alloc. */
287 vaddr_t s_end; /* [v] End for stack alloc. */
288
289 /*
290 * Special address selectors.
291 *
292 * The uaddr_exe mapping is used if:
293 * - protX is selected
294 * - the pointer is not NULL
295 *
296 * If uaddr_exe is not used, the other mappings are checked in
297 * order of appearance.
298 * If a hint is given, the selection will only be used if the hint
299 * falls in the range described by the mapping.
300 *
301 * The states are pointers because:
302 * - they may not all be in use
303 * - the struct size for different schemes is variable
304 *
305 * The uaddr_brk_stack selector will select addresses that are in
306 * the brk/stack area of the map.
307 */
308 struct uvm_addr_state *uaddr_exe; /* Executable selector. */
309 struct uvm_addr_state *uaddr_any[4]; /* More selectors. */
310 struct uvm_addr_state *uaddr_brk_stack; /* Brk/stack selector. */
311
312 #define UVM_MAP_CHECK_COPYIN_MAX 4 /* main, sigtramp, ld.so, libc.so */
313 struct uvm_check_copyin {
314 vaddr_t start, end;
315 } check_copyin[UVM_MAP_CHECK_COPYIN_MAX];
316 int check_copyin_count;
317
318 /*
319 * XXX struct mutex changes size because of compile options, so
320 * place after fields which are inspected by libkvm / procmap(8)
321 */
322 struct rwlock lock; /* Non-intrsafe lock */
323 struct mutex mtx; /* Intrsafe lock */
324 struct mutex flags_lock; /* flags lock */
325 };
326
327 /* vm_map flags */
328 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
329 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
330 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
331 #define VM_MAP_BUSY 0x08 /* rw: map is busy */
332 #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
333 #define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */
334 #define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */
335 #define VM_MAP_SYSCALL_ONCE 0x80 /* rw: libc syscall registered */
336
337 /* Number of kernel maps and entries to statically allocate */
338 #define MAX_KMAPENT 1024 /* Sufficient to make it to the scheduler. */
339
340 #ifdef _KERNEL
341 /*
342 * globals:
343 */
344
345 extern vaddr_t uvm_maxkaddr;
346
347 /*
348 * protos: the following prototypes define the interface to vm_map
349 */
350
351 void uvm_map_deallocate(struct vm_map *);
352
353 int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
354 void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
355 vaddr_t);
356 void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
357 vaddr_t);
358 int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
359 vaddr_t *, int);
360 struct vm_map * uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
361 vaddr_t uvm_map_pie(vaddr_t);
362 vaddr_t uvm_map_hint(struct vmspace *, vm_prot_t, vaddr_t, vaddr_t);
363 int uvm_map_check_copyin_add(struct vm_map *, vaddr_t, vaddr_t);
364 int uvm_map_syscall(struct vm_map *, vaddr_t, vaddr_t);
365 int uvm_map_immutable(struct vm_map *, vaddr_t, vaddr_t, int);
366 int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t);
367 int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
368 void uvm_map_init(void);
369 boolean_t uvm_map_lookup_entry(struct vm_map *, vaddr_t, vm_map_entry_t *);
370 boolean_t uvm_map_is_stack_remappable(struct vm_map *, vaddr_t, vsize_t, int);
371 int uvm_map_remap_as_stack(struct proc *, vaddr_t, vsize_t);
372 int uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t,
373 vm_map_entry_t, int);
374 int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
375 vaddr_t *);
376 void uvm_map_setup(struct vm_map *, pmap_t, vaddr_t, vaddr_t, int);
377 int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
378 struct vm_map *);
379 void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t);
380 void uvm_unmap_detach(struct uvm_map_deadq *, int);
381 int uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
382 struct uvm_map_deadq *, boolean_t, boolean_t, boolean_t);
383 void uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**,
384 struct uvm_addr_state*);
385 int uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int);
386
387
388 struct p_inentry;
389
390 int uvm_map_inentry_sp(vm_map_entry_t);
391 int uvm_map_inentry_pc(vm_map_entry_t);
392 boolean_t uvm_map_inentry(struct proc *, struct p_inentry *, vaddr_t addr,
393 const char *fmt, int (*fn)(vm_map_entry_t), u_long serial);
394
395 struct kinfo_vmentry;
396
397 int uvm_map_fill_vmmap(struct vm_map *, struct kinfo_vmentry *,
398 size_t *);
399
400 /*
401 * VM map locking operations:
402 *
403 * These operations perform locking on the data portion of the
404 * map.
405 *
406 * vm_map_lock_try: try to lock a map, failing if it is already locked.
407 *
408 * vm_map_lock: acquire an exclusive (write) lock on a map.
409 *
410 * vm_map_lock_read: acquire a shared (read) lock on a map.
411 *
412 * vm_map_unlock: release an exclusive lock on a map.
413 *
414 * vm_map_unlock_read: release a shared lock on a map.
415 *
416 * vm_map_downgrade: downgrade an exclusive lock to a shared lock.
417 *
418 * vm_map_upgrade: upgrade a shared lock to an exclusive lock.
419 *
420 * vm_map_busy: mark a map as busy.
421 *
422 * vm_map_unbusy: clear busy status on a map.
423 *
424 */
425
426 boolean_t vm_map_lock_try_ln(struct vm_map*, char*, int);
427 void vm_map_lock_ln(struct vm_map*, char*, int);
428 void vm_map_lock_read_ln(struct vm_map*, char*, int);
429 void vm_map_unlock_ln(struct vm_map*, char*, int);
430 void vm_map_unlock_read_ln(struct vm_map*, char*, int);
431 void vm_map_downgrade_ln(struct vm_map*, char*, int);
432 void vm_map_upgrade_ln(struct vm_map*, char*, int);
433 void vm_map_busy_ln(struct vm_map*, char*, int);
434 void vm_map_unbusy_ln(struct vm_map*, char*, int);
435 void vm_map_assert_anylock_ln(struct vm_map*, char*, int);
436 void vm_map_assert_wrlock_ln(struct vm_map*, char*, int);
437
438 #ifdef DIAGNOSTIC
439 #define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__)
440 #define vm_map_lock(map) vm_map_lock_ln(map, __FILE__, __LINE__)
441 #define vm_map_lock_read(map) vm_map_lock_read_ln(map, __FILE__, __LINE__)
442 #define vm_map_unlock(map) vm_map_unlock_ln(map, __FILE__, __LINE__)
443 #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, __FILE__, __LINE__)
444 #define vm_map_downgrade(map) vm_map_downgrade_ln(map, __FILE__, __LINE__)
445 #define vm_map_upgrade(map) vm_map_upgrade_ln(map, __FILE__, __LINE__)
446 #define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__)
447 #define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__)
448 #define vm_map_assert_anylock(map) \
449 vm_map_assert_anylock_ln(map, __FILE__, __LINE__)
450 #define vm_map_assert_wrlock(map) \
451 vm_map_assert_wrlock_ln(map, __FILE__, __LINE__)
452 #else
453 #define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0)
454 #define vm_map_lock(map) vm_map_lock_ln(map, NULL, 0)
455 #define vm_map_lock_read(map) vm_map_lock_read_ln(map, NULL, 0)
456 #define vm_map_unlock(map) vm_map_unlock_ln(map, NULL, 0)
457 #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, NULL, 0)
458 #define vm_map_downgrade(map) vm_map_downgrade_ln(map, NULL, 0)
459 #define vm_map_upgrade(map) vm_map_upgrade_ln(map, NULL, 0)
460 #define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0)
461 #define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0)
462 #define vm_map_assert_anylock(map) vm_map_assert_anylock_ln(map, NULL, 0)
463 #define vm_map_assert_wrlock(map) vm_map_assert_wrlock_ln(map, NULL, 0)
464 #endif
465
466 void uvm_map_lock_entry(struct vm_map_entry *);
467 void uvm_map_unlock_entry(struct vm_map_entry *);
468
469 #endif /* _KERNEL */
470
471 /*
472 * Functions implemented as macros
473 */
474 #define vm_map_min(map) ((map)->min_offset)
475 #define vm_map_max(map) ((map)->max_offset)
476 #define vm_map_pmap(map) ((map)->pmap)
477
478 #endif /* _UVM_UVM_MAP_H_ */
Cache object: 1a246c2e67bd3cac7ca7d40b03cd0b74
|