FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_map.h
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: vm_map.h,v $
29 * Revision 2.18 93/08/10 15:13:47 mrt
30 * Included the projected_on field in VM map entries, to support
31 * projected buffers.
32 * [93/02/16 09:46:01 jcb]
33 *
34 * Revision 2.17 93/01/14 18:01:34 danner
35 * 64bit cleanup.
36 * [92/12/10 20:49:22 af]
37 *
38 * Revision 2.16 92/05/21 17:26:14 jfriedl
39 * Added cast to call of assert_wait in vm_map_entry_wait.
40 * [92/05/16 jfriedl]
41 *
42 * Revision 2.15 92/03/31 15:18:41 rpd
43 * Add extend_cont continuation invocation macro to invoke
44 * continuation without affecting current copy. Declare
45 * vm_map_copy_discard_cont.
46 * [92/03/20 14:15:53 dlb]
47 *
48 * Revision 2.14 92/02/23 19:51:00 elf
49 * Change unused wiring_allowed field to wiring_required
50 * in vm_map data structure.
51 * [92/02/20 15:19:12 dlb]
52 *
53 * Add is_shared bit to map entry to detect sharing.
54 * [92/02/19 14:26:45 dlb]
55 *
56 * Remove all sharing map structure elements.
57 * Make vm_map_verify_done() a macro.
58 * [92/01/07 11:14:16 dlb]
59 *
60 * Revision 2.13 91/12/10 13:27:03 jsb
61 * Simplify page list continuation abort logic. Temporarily
62 * increase size of page lists for NORMA_IPC until it supports
63 * page list continuations.
64 * [91/12/10 12:50:01 dlb]
65 *
66 * Fix type of null pointer in continuation invocation in
67 * vm_map_copy_abort_cont.
68 *
69 * Revision 2.12 91/08/28 11:18:33 jsb
70 * Supplied missing argument to thread_block in vm_map_entry_wait.
71 * [91/08/16 10:36:21 jsb]
72 *
73 * Minor cleanups.
74 * [91/08/06 17:26:24 dlb]
75 *
76 * Discard pages before invoking or aborting a continuation.
77 * [91/08/05 17:51:55 dlb]
78 *
79 * Add declarations for in transition map entries and vm_map_copy
80 * continuations.
81 * [91/07/30 14:18:14 dlb]
82 *
83 * Revision 2.11 91/07/01 08:27:43 jsb
84 * Declarations for multiple-format vm map copy support.
85 * [91/06/29 14:36:42 dlb]
86 *
87 * Revision 2.10 91/05/18 14:41:09 rpd
88 * Added kentry_data and friends, for vm_map_init.
89 * [91/03/22 rpd]
90 *
91 * Revision 2.9 91/05/14 17:50:06 mrt
92 * Correcting copyright
93 *
94 * Revision 2.8 91/03/16 15:06:07 rpd
95 * Removed vm_map_find. Added vm_map_find_entry.
96 * [91/03/03 rpd]
97 *
98 * Revision 2.7 91/02/05 17:59:07 mrt
99 * Changed to new Mach copyright
100 * [91/02/01 16:33:15 mrt]
101 *
102 * Revision 2.6 90/10/12 13:06:08 rpd
103 * Removed copy_on_write field.
104 * [90/10/08 rpd]
105 *
106 * Revision 2.5 90/06/19 23:02:32 rpd
107 * Picked up vm_submap_object.
108 * [90/06/08 rpd]
109 *
110 * Revision 2.4 90/06/02 15:11:24 rpd
111 * New vm_map_pageable, with user_wired_count.
112 * [90/03/26 23:15:58 rpd]
113 *
114 * Revision 2.3 90/02/22 20:06:19 dbg
115 * Combine fields in vm_map and vm_map_copy into a vm_map_header
116 * structure.
117 * [90/01/29 dbg]
118 *
119 * Add changes from mainline:
120 *
121 * Added documentation for exported routines.
122 * Add vm_map_t->wait_for_space field.
123 * Add vm_map_copy_t type, associated routine declarations, and
124 * documentation.
125 * Introduced vm_map_links, which contains those map entry fields
126 * used in the map structure.
127 * [89/08/31 21:13:56 rpd]
128 *
129 * Optimization from NeXT: is_a_map, is_sub_map, copy_on_write,
130 * needs_copy are now bit-fields.
131 * [89/08/19 23:44:53 rpd]
132 *
133 * Revision 2.2 90/01/22 23:09:35 af
134 * Added vm_map_machine_attribute() decl.
135 *
136 * Changes for MACH_KERNEL:
137 * . Added wiring_allowed to map.
138 * [89/04/29 dbg]
139 *
140 * Revision 2.1 89/08/03 16:45:29 rwd
141 * Created.
142 *
143 * Revision 2.9 89/04/18 21:26:14 mwyoung
144 * Reset history. All relevant material is in the documentation
145 * here, and in the implementation file ("vm/vm_map.c").
146 * [89/04/18 mwyoung]
147 *
148 */
149 /*
150 * File: vm/vm_map.h
151 * Author: Avadis Tevanian, Jr., Michael Wayne Young
152 * Date: 1985
153 *
154 * Virtual memory map module definitions.
155 *
156 * Contributors:
157 * avie, dlb, mwyoung
158 */
159
160 #ifndef _VM_VM_MAP_H_
161 #define _VM_VM_MAP_H_
162
163 #include <mach/kern_return.h>
164 #include <mach/boolean.h>
165 #include <mach/machine/vm_types.h>
166 #include <mach/vm_prot.h>
167 #include <mach/vm_inherit.h>
168 #include <vm/pmap.h>
169 #include <vm/vm_object.h>
170 #include <vm/vm_page.h>
171 #include <kern/lock.h>
172 #include <kern/macro_help.h>
173
174 /*
175 * Types defined:
176 *
177 * vm_map_t the high-level address map data structure.
178 * vm_map_entry_t an entry in an address map.
179 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
180 * vm_map_copy_t represents memory copied from an address map,
181 * used for inter-map copy operations
182 */
183
184 /*
185 * Type: vm_map_object_t [internal use only]
186 *
187 * Description:
188 * The target of an address mapping, either a virtual
189 * memory object or a sub map (of the kernel map).
190 */
191 typedef union vm_map_object {
192 struct vm_object *vm_object; /* object object */
193 struct vm_map *sub_map; /* belongs to another map */
194 } vm_map_object_t;
195
196 /*
197 * Type: vm_map_entry_t [internal use only]
198 *
199 * Description:
200 * A single mapping within an address map.
201 *
202 * Implementation:
203 * Address map entries consist of start and end addresses,
204 * a VM object (or sub map) and offset into that object,
205 * and user-exported inheritance and protection information.
206 * Control information for virtual copy operations is also
207 * stored in the address map entry.
208 */
209 struct vm_map_links {
210 struct vm_map_entry *prev; /* previous entry */
211 struct vm_map_entry *next; /* next entry */
212 vm_offset_t start; /* start address */
213 vm_offset_t end; /* end address */
214 };
215
216 struct vm_map_entry {
217 struct vm_map_links links; /* links to other entries */
218 #define vme_prev links.prev
219 #define vme_next links.next
220 #define vme_start links.start
221 #define vme_end links.end
222 union vm_map_object object; /* object I point to */
223 vm_offset_t offset; /* offset into object */
224 unsigned int
225 /* boolean_t */ is_shared:1, /* region is shared */
226 /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
227 /* boolean_t */ in_transition:1, /* Entry being changed */
228 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
229 /* Only used when object is a vm_object: */
230 /* boolean_t */ needs_copy:1; /* does object need to be copied */
231
232 /* Only in task maps: */
233 vm_prot_t protection; /* protection code */
234 vm_prot_t max_protection; /* maximum protection */
235 vm_inherit_t inheritance; /* inheritance */
236 unsigned short wired_count; /* can be paged if = 0 */
237 unsigned short user_wired_count; /* for vm_wire */
238 struct vm_map_entry *projected_on; /* 0 for normal map entry
239 or persistent kernel map projected buffer entry;
240 -1 for non-persistent kernel map projected buffer entry;
241 pointer to corresponding kernel map entry for user map
242 projected buffer entry */
243 };
244
245 typedef struct vm_map_entry *vm_map_entry_t;
246
247 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0)
248
249 /*
250 * Type: struct vm_map_header
251 *
252 * Description:
253 * Header for a vm_map and a vm_map_copy.
254 */
255 struct vm_map_header {
256 struct vm_map_links links; /* first, last, min, max */
257 int nentries; /* Number of entries */
258 boolean_t entries_pageable;
259 /* are map entries pageable? */
260 };
261
262 /*
263 * Type: vm_map_t [exported; contents invisible]
264 *
265 * Description:
266 * An address map -- a directory relating valid
267 * regions of a task's address space to the corresponding
268 * virtual memory objects.
269 *
270 * Implementation:
271 * Maps are doubly-linked lists of map entries, sorted
272 * by address. One hint is used to start
273 * searches again from the last successful search,
274 * insertion, or removal. Another hint is used to
275 * quickly find free space.
276 */
277 typedef struct vm_map {
278 lock_data_t lock; /* Lock for map data */
279 struct vm_map_header hdr; /* Map entry header */
280 #define min_offset hdr.links.start /* start of range */
281 #define max_offset hdr.links.end /* end of range */
282 pmap_t pmap; /* Physical map */
283 vm_size_t size; /* virtual size */
284 int ref_count; /* Reference count */
285 decl_simple_lock_data(, ref_lock) /* Lock for ref_count field */
286 vm_map_entry_t hint; /* hint for quick lookups */
287 decl_simple_lock_data(, hint_lock) /* lock for hint storage */
288 vm_map_entry_t first_free; /* First free space hint */
289 boolean_t wait_for_space; /* Should callers wait
290 for space? */
291 boolean_t wiring_required;/* All memory wired? */
292 unsigned int timestamp; /* Version number */
293 } *vm_map_t;
294
295 #define VM_MAP_NULL ((vm_map_t) 0)
296
297 #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links)
298 #define vm_map_first_entry(map) ((map)->hdr.links.next)
299 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
300
301 /*
302 * Type: vm_map_version_t [exported; contents invisible]
303 *
304 * Description:
305 * Map versions may be used to quickly validate a previous
306 * lookup operation.
307 *
308 * Usage note:
309 * Because they are bulky objects, map versions are usually
310 * passed by reference.
311 *
312 * Implementation:
313 * Just a timestamp for the main map.
314 */
315 typedef struct vm_map_version {
316 unsigned int main_timestamp;
317 } vm_map_version_t;
318
319 /*
320 * Type: vm_map_copy_t [exported; contents invisible]
321 *
322 * Description:
323 * A map copy object represents a region of virtual memory
324 * that has been copied from an address map but is still
325 * in transit.
326 *
327 * A map copy object may only be used by a single thread
328 * at a time.
329 *
330 * Implementation:
331 * There are three formats for map copy objects.
332 * The first is very similar to the main
333 * address map in structure, and as a result, some
334 * of the internal maintenance functions/macros can
335 * be used with either address maps or map copy objects.
336 *
337 * The map copy object contains a header links
338 * entry onto which the other entries that represent
339 * the region are chained.
340 *
341 * The second format is a single vm object. This is used
342 * primarily in the pageout path. The third format is a
343 * list of vm pages. An optional continuation provides
344 * a hook to be called to obtain more of the memory,
345 * or perform other operations. The continuation takes 3
346 * arguments, a saved arg buffer, a pointer to a new vm_map_copy
347 * (returned) and an abort flag (abort if TRUE).
348 */
349
350 #if iPSC386 || iPSC860
351 #define VM_MAP_COPY_PAGE_LIST_MAX 64
352 #else iPSC386 || iPSC860
353 #define VM_MAP_COPY_PAGE_LIST_MAX 8
354 #endif iPSC386 || iPSC860
355
356 typedef struct vm_map_copy {
357 int type;
358 #define VM_MAP_COPY_ENTRY_LIST 1
359 #define VM_MAP_COPY_OBJECT 2
360 #define VM_MAP_COPY_PAGE_LIST 3
361 vm_offset_t offset;
362 vm_size_t size;
363 union {
364 struct vm_map_header hdr; /* ENTRY_LIST */
365 struct { /* OBJECT */
366 vm_object_t object;
367 } c_o;
368 struct { /* PAGE_LIST */
369 vm_page_t page_list[VM_MAP_COPY_PAGE_LIST_MAX];
370 int npages;
371 kern_return_t (*cont)();
372 char *cont_args;
373 } c_p;
374 } c_u;
375 } *vm_map_copy_t;
376
377 #define cpy_hdr c_u.hdr
378
379 #define cpy_object c_u.c_o.object
380
381 #define cpy_page_list c_u.c_p.page_list
382 #define cpy_npages c_u.c_p.npages
383 #define cpy_cont c_u.c_p.cont
384 #define cpy_cont_args c_u.c_p.cont_args
385
386 #define VM_MAP_COPY_NULL ((vm_map_copy_t) 0)
387
388 /*
389 * Useful macros for entry list copy objects
390 */
391
392 #define vm_map_copy_to_entry(copy) \
393 ((struct vm_map_entry *) &(copy)->cpy_hdr.links)
394 #define vm_map_copy_first_entry(copy) \
395 ((copy)->cpy_hdr.links.next)
396 #define vm_map_copy_last_entry(copy) \
397 ((copy)->cpy_hdr.links.prev)
398
399 /*
400 * Continuation macros for page list copy objects
401 */
402
403 #define vm_map_copy_invoke_cont(old_copy, new_copy, result) \
404 MACRO_BEGIN \
405 vm_map_copy_page_discard(old_copy); \
406 *result = (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
407 new_copy); \
408 (old_copy)->cpy_cont = (kern_return_t (*)()) 0; \
409 MACRO_END
410
411 #define vm_map_copy_invoke_extend_cont(old_copy, new_copy, result) \
412 MACRO_BEGIN \
413 *result = (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
414 new_copy); \
415 (old_copy)->cpy_cont = (kern_return_t (*)()) 0; \
416 MACRO_END
417
418 #define vm_map_copy_abort_cont(old_copy) \
419 MACRO_BEGIN \
420 vm_map_copy_page_discard(old_copy); \
421 (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
422 (vm_map_copy_t *) 0); \
423 (old_copy)->cpy_cont = (kern_return_t (*)()) 0; \
424 (old_copy)->cpy_cont_args = (char *) 0; \
425 MACRO_END
426
427 #define vm_map_copy_has_cont(copy) \
428 (((copy)->cpy_cont) != (kern_return_t (*)()) 0)
429
430 /*
431 * Continuation structures for vm_map_copyin_page_list.
432 */
433
434 typedef struct {
435 vm_map_t map;
436 vm_offset_t src_addr;
437 vm_size_t src_len;
438 vm_offset_t destroy_addr;
439 vm_size_t destroy_len;
440 boolean_t steal_pages;
441 } vm_map_copyin_args_data_t, *vm_map_copyin_args_t;
442
443 #define VM_MAP_COPYIN_ARGS_NULL ((vm_map_copyin_args_t) 0)
444
445 /*
446 * Macros: vm_map_lock, etc. [internal use only]
447 * Description:
448 * Perform locking on the data portion of a map.
449 */
450
451 #define vm_map_lock_init(map) \
452 MACRO_BEGIN \
453 lock_init(&(map)->lock, TRUE); \
454 (map)->timestamp = 0; \
455 MACRO_END
456
457 #define vm_map_lock(map) \
458 MACRO_BEGIN \
459 lock_write(&(map)->lock); \
460 (map)->timestamp++; \
461 MACRO_END
462
463 #define vm_map_unlock(map) lock_write_done(&(map)->lock)
464 #define vm_map_lock_read(map) lock_read(&(map)->lock)
465 #define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
466 #define vm_map_lock_write_to_read(map) \
467 lock_write_to_read(&(map)->lock)
468 #define vm_map_lock_read_to_write(map) \
469 (lock_read_to_write(&(map)->lock) || (((map)->timestamp++), 0))
470 #define vm_map_lock_set_recursive(map) \
471 lock_set_recursive(&(map)->lock)
472 #define vm_map_lock_clear_recursive(map) \
473 lock_clear_recursive(&(map)->lock)
474
475 /*
476 * Exported procedures that operate on vm_map_t.
477 */
478
479 extern vm_offset_t kentry_data;
480 extern vm_offset_t kentry_data_size;
481 extern int kentry_count;
482 extern void vm_map_init(); /* Initialize the module */
483
484 extern vm_map_t vm_map_create(); /* Create an empty map */
485 extern vm_map_t vm_map_fork(); /* Create a map in the image
486 * of an existing map */
487
488 extern void vm_map_reference(); /* Gain a reference to
489 * an existing map */
490 extern void vm_map_deallocate(); /* Lose a reference */
491
492 extern kern_return_t vm_map_enter(); /* Enter a mapping */
493 extern kern_return_t vm_map_find_entry(); /* Enter a mapping primitive */
494 extern kern_return_t vm_map_remove(); /* Deallocate a region */
495 extern kern_return_t vm_map_protect(); /* Change protection */
496 extern kern_return_t vm_map_inherit(); /* Change inheritance */
497
498 extern void vm_map_print(); /* Debugging: print a map */
499
500 extern kern_return_t vm_map_lookup(); /* Look up an address */
501 extern boolean_t vm_map_verify(); /* Verify that a previous
502 * lookup is still valid */
503 /* vm_map_verify_done is now a macro -- see below */
504 extern kern_return_t vm_map_copyin(); /* Make a copy of a region */
505 extern kern_return_t vm_map_copyin_page_list();/* Make a copy of a region
506 * using a page list copy */
507 extern kern_return_t vm_map_copyout(); /* Place a copy into a map */
508 extern kern_return_t vm_map_copy_overwrite();/* Overwrite existing memory
509 * with a copy */
510 extern void vm_map_copy_discard(); /* Discard a copy without
511 * using it */
512 extern kern_return_t vm_map_copy_discard_cont();/* Page list continuation
513 * version of previous */
514
515 extern kern_return_t vm_map_machine_attribute();
516 /* Add or remove machine-
517 dependent attributes from
518 map regions */
519
520 /*
521 * Functions implemented as macros
522 */
523 #define vm_map_min(map) ((map)->min_offset)
524 /* Lowest valid address in
525 * a map */
526
527 #define vm_map_max(map) ((map)->max_offset)
528 /* Highest valid address */
529
530 #define vm_map_pmap(map) ((map)->pmap)
531 /* Physical map associated
532 * with this address map */
533
534 #define vm_map_verify_done(map, version) (vm_map_unlock_read(map))
535 /* Operation that required
536 * a verified lookup is
537 * now complete */
538 /*
539 * Pageability functions. Includes macro to preserve old interface.
540 */
541 extern kern_return_t vm_map_pageable_common();
542
543 #define vm_map_pageable(map, s, e, access) \
544 vm_map_pageable_common(map, s, e, access, FALSE)
545
546 #define vm_map_pageable_user(map, s, e, access) \
547 vm_map_pageable_common(map, s, e, access, TRUE)
548
549 /*
550 * Submap object. Must be used to create memory to be put
551 * in a submap by vm_map_submap.
552 */
553 extern vm_object_t vm_submap_object;
554
555 /*
556 * Wait and wakeup macros for in_transition map entries.
557 */
558 #define vm_map_entry_wait(map, interruptible) \
559 MACRO_BEGIN \
560 assert_wait((event_t)&(map)->hdr, interruptible); \
561 vm_map_unlock(map); \
562 thread_block((void (*)()) 0); \
563 MACRO_END
564
565 #define vm_map_entry_wakeup(map) thread_wakeup((event_t)&(map)->hdr)
566
567 #endif _VM_VM_MAP_H_
Cache object: c8e52a0a988f6e7e6f34b9a30b20305b
|