FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_map.h
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: vm_map.h,v $
29 * Revision 2.19 93/11/17 18:55:43 dbg
30 * Conditionalized projected buffer support under NET_ATM.
31 * [93/09/10 dbg]
32 * ANSI-fied.
33 * [93/06/16 dbg]
34 *
35 * Revision 2.18 93/08/10 15:13:47 mrt
36 * Included the projected_on field in VM map entries, to support
37 * projected buffers.
38 * [93/02/16 09:46:01 jcb]
39 *
40 * Revision 2.17 93/01/14 18:01:34 danner
41 * 64bit cleanup.
42 * [92/12/10 20:49:22 af]
43 *
44 * Revision 2.16 92/05/21 17:26:14 jfriedl
45 * Added cast to call of assert_wait in vm_map_entry_wait.
46 * [92/05/16 jfriedl]
47 *
48 * Revision 2.15 92/03/31 15:18:41 rpd
49 * Add extend_cont continuation invocation macro to invoke
50 * continuation without affecting current copy. Declare
51 * vm_map_copy_discard_cont.
52 * [92/03/20 14:15:53 dlb]
53 *
54 * Revision 2.14 92/02/23 19:51:00 elf
55 * Change unused wiring_allowed field to wiring_required
56 * in vm_map data structure.
57 * [92/02/20 15:19:12 dlb]
58 *
59 * Add is_shared bit to map entry to detect sharing.
60 * [92/02/19 14:26:45 dlb]
61 *
62 * Remove all sharing map structure elements.
63 * Make vm_map_verify_done() a macro.
64 * [92/01/07 11:14:16 dlb]
65 *
66 * Revision 2.13 91/12/10 13:27:03 jsb
67 * Simplify page list continuation abort logic. Temporarily
68 * increase size of page lists for NORMA_IPC until it supports
69 * page list continuations.
70 * [91/12/10 12:50:01 dlb]
71 *
72 * Fix type of null pointer in continuation invocation in
73 * vm_map_copy_abort_cont.
74 *
75 * Revision 2.12 91/08/28 11:18:33 jsb
76 * Supplied missing argument to thread_block in vm_map_entry_wait.
77 * [91/08/16 10:36:21 jsb]
78 *
79 * Minor cleanups.
80 * [91/08/06 17:26:24 dlb]
81 *
82 * Discard pages before invoking or aborting a continuation.
83 * [91/08/05 17:51:55 dlb]
84 *
85 * Add declarations for in transition map entries and vm_map_copy
86 * continuations.
87 * [91/07/30 14:18:14 dlb]
88 *
89 * Revision 2.11 91/07/01 08:27:43 jsb
90 * Declarations for multiple-format vm map copy support.
91 * [91/06/29 14:36:42 dlb]
92 *
93 * Revision 2.10 91/05/18 14:41:09 rpd
94 * Added kentry_data and friends, for vm_map_init.
95 * [91/03/22 rpd]
96 *
97 * Revision 2.9 91/05/14 17:50:06 mrt
98 * Correcting copyright
99 *
100 * Revision 2.8 91/03/16 15:06:07 rpd
101 * Removed vm_map_find. Added vm_map_find_entry.
102 * [91/03/03 rpd]
103 *
104 * Revision 2.7 91/02/05 17:59:07 mrt
105 * Changed to new Mach copyright
106 * [91/02/01 16:33:15 mrt]
107 *
108 * Revision 2.6 90/10/12 13:06:08 rpd
109 * Removed copy_on_write field.
110 * [90/10/08 rpd]
111 *
112 * Revision 2.5 90/06/19 23:02:32 rpd
113 * Picked up vm_submap_object.
114 * [90/06/08 rpd]
115 *
116 * Revision 2.4 90/06/02 15:11:24 rpd
117 * New vm_map_pageable, with user_wired_count.
118 * [90/03/26 23:15:58 rpd]
119 *
120 * Revision 2.3 90/02/22 20:06:19 dbg
121 * Combine fields in vm_map and vm_map_copy into a vm_map_header
122 * structure.
123 * [90/01/29 dbg]
124 *
125 * Add changes from mainline:
126 *
127 * Added documentation for exported routines.
128 * Add vm_map_t->wait_for_space field.
129 * Add vm_map_copy_t type, associated routine declarations, and
130 * documentation.
131 * Introduced vm_map_links, which contains those map entry fields
132 * used in the map structure.
133 * [89/08/31 21:13:56 rpd]
134 *
135 * Optimization from NeXT: is_a_map, is_sub_map, copy_on_write,
136 * needs_copy are now bit-fields.
137 * [89/08/19 23:44:53 rpd]
138 *
139 * Revision 2.2 90/01/22 23:09:35 af
140 * Added vm_map_machine_attribute() decl.
141 *
142 * Changes for MACH_KERNEL:
143 * . Added wiring_allowed to map.
144 * [89/04/29 dbg]
145 *
146 * Revision 2.1 89/08/03 16:45:29 rwd
147 * Created.
148 *
149 * Revision 2.9 89/04/18 21:26:14 mwyoung
150 * Reset history. All relevant material is in the documentation
151 * here, and in the implementation file ("vm/vm_map.c").
152 * [89/04/18 mwyoung]
153 *
154 */
155 /*
156 * File: vm/vm_map.h
157 * Author: Avadis Tevanian, Jr., Michael Wayne Young
158 * Date: 1985
159 *
160 * Virtual memory map module definitions.
161 *
162 * Contributors:
163 * avie, dlb, mwyoung
164 */
165
166 #ifndef _VM_VM_MAP_H_
167 #define _VM_VM_MAP_H_
168
169 #include <net_atm.h>
170
171 #include <mach/kern_return.h>
172 #include <mach/boolean.h>
173 #include <mach/machine/vm_types.h>
174 #include <mach/vm_attributes.h>
175 #include <mach/vm_inherit.h>
176 #include <mach/vm_prot.h>
177 #include <vm/pmap.h>
178 #include <vm/vm_object.h>
179 #include <vm/vm_page.h>
180 #include <kern/lock.h>
181 #include <kern/macro_help.h>
182 #include <kern/sched_prim.h>
183
184 /*
185 * Types defined:
186 *
187 * vm_map_t the high-level address map data structure.
188 * vm_map_entry_t an entry in an address map.
189 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
190 * vm_map_copy_t represents memory copied from an address map,
191 * used for inter-map copy operations
192 */
193
194 /*
195 * Type: vm_map_object_t [internal use only]
196 *
197 * Description:
198 * The target of an address mapping, either a virtual
199 * memory object or a sub map (of the kernel map).
200 */
201 typedef union vm_map_object {
202 struct vm_object *vm_object; /* object object */
203 struct vm_map *sub_map; /* belongs to another map */
204 } vm_map_object_t;
205
206 /*
207 * Type: vm_map_entry_t [internal use only]
208 *
209 * Description:
210 * A single mapping within an address map.
211 *
212 * Implementation:
213 * Address map entries consist of start and end addresses,
214 * a VM object (or sub map) and offset into that object,
215 * and user-exported inheritance and protection information.
216 * Control information for virtual copy operations is also
217 * stored in the address map entry.
218 */
219 struct vm_map_links {
220 struct vm_map_entry *prev; /* previous entry */
221 struct vm_map_entry *next; /* next entry */
222 vm_offset_t start; /* start address */
223 vm_offset_t end; /* end address */
224 };
225
226 struct vm_map_entry {
227 struct vm_map_links links; /* links to other entries */
228 #define vme_prev links.prev
229 #define vme_next links.next
230 #define vme_start links.start
231 #define vme_end links.end
232 union vm_map_object object; /* object I point to */
233 vm_offset_t offset; /* offset into object */
234 unsigned int
235 /* boolean_t */ is_shared:1, /* region is shared */
236 /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
237 /* boolean_t */ in_transition:1, /* Entry being changed */
238 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
239 /* Only used when object is a vm_object: */
240 /* boolean_t */ needs_copy:1; /* does object need to be
241 copied */
242 /* Only in task maps: */
243 vm_prot_t protection; /* protection code */
244 vm_prot_t max_protection; /* maximum protection */
245 vm_inherit_t inheritance; /* inheritance */
246 unsigned short wired_count; /* can be paged if = 0 */
247 unsigned short user_wired_count; /* for vm_wire */
248 #if NET_ATM
249 struct vm_map_entry *projected_on; /* 0 for normal map entry
250 or persistent kernel map projected buffer entry;
251 -1 for non-persistent kernel map projected buffer entry;
252 pointer to corresponding kernel map entry for user map
253 projected buffer entry */
254 #endif /* NET_ATM */
255 };
256
257 typedef struct vm_map_entry *vm_map_entry_t;
258
259 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0)
260
261 /*
262 * Type: struct vm_map_header
263 *
264 * Description:
265 * Header for a vm_map and a vm_map_copy.
266 */
267 struct vm_map_header {
268 struct vm_map_links links; /* first, last, min, max */
269 int nentries; /* Number of entries */
270 boolean_t entries_pageable;
271 /* are map entries pageable? */
272 };
273
274 /*
275 * Type: vm_map_t [exported; contents invisible]
276 *
277 * Description:
278 * An address map -- a directory relating valid
279 * regions of a task's address space to the corresponding
280 * virtual memory objects.
281 *
282 * Implementation:
283 * Maps are doubly-linked lists of map entries, sorted
284 * by address. One hint is used to start
285 * searches again from the last successful search,
286 * insertion, or removal. Another hint is used to
287 * quickly find free space.
288 */
289 typedef struct vm_map {
290 lock_data_t lock; /* Lock for map data */
291 struct vm_map_header hdr; /* Map entry header */
292 #define min_offset hdr.links.start /* start of range */
293 #define max_offset hdr.links.end /* end of range */
294 pmap_t pmap; /* Physical map */
295 vm_size_t size; /* virtual size */
296 int ref_count; /* Reference count */
297 decl_simple_lock_data(, ref_lock) /* Lock for ref_count field */
298 vm_map_entry_t hint; /* hint for quick lookups */
299 decl_simple_lock_data(, hint_lock) /* lock for hint storage */
300 vm_map_entry_t first_free; /* First free space hint */
301 boolean_t wait_for_space; /* Should callers wait
302 for space? */
303 boolean_t wiring_required;/* All memory wired? */
304 unsigned int timestamp; /* Version number */
305 } *vm_map_t;
306
307 #define VM_MAP_NULL ((vm_map_t) 0)
308
309 #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links)
310 #define vm_map_first_entry(map) ((map)->hdr.links.next)
311 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
312
313 /*
314 * Type: vm_map_version_t [exported; contents invisible]
315 *
316 * Description:
317 * Map versions may be used to quickly validate a previous
318 * lookup operation.
319 *
320 * Usage note:
321 * Because they are bulky objects, map versions are usually
322 * passed by reference.
323 *
324 * Implementation:
325 * Just a timestamp for the main map.
326 */
327 typedef struct vm_map_version {
328 unsigned int main_timestamp;
329 } vm_map_version_t;
330
331 /*
332 * Type: vm_map_copy_t [exported; contents invisible]
333 *
334 * Description:
335 * A map copy object represents a region of virtual memory
336 * that has been copied from an address map but is still
337 * in transit.
338 *
339 * A map copy object may only be used by a single thread
340 * at a time.
341 *
342 * Implementation:
343 * There are three formats for map copy objects.
344 * The first is very similar to the main
345 * address map in structure, and as a result, some
346 * of the internal maintenance functions/macros can
347 * be used with either address maps or map copy objects.
348 *
349 * The map copy object contains a header links
350 * entry onto which the other entries that represent
351 * the region are chained.
352 *
353 * The second format is a single vm object. This is used
354 * primarily in the pageout path. The third format is a
355 * list of vm pages. An optional continuation provides
356 * a hook to be called to obtain more of the memory,
357 * or perform other operations. The continuation takes 2
358 * arguments: a saved argument buffer and a pointer to a
359 * new vm_map_copy (returned).
360 */
361
362 #if iPSC386 || iPSC860
363 #define VM_MAP_COPY_PAGE_LIST_MAX 64
364 #else /* iPSC386 || iPSC860 */
365 #define VM_MAP_COPY_PAGE_LIST_MAX 8
366 #endif /* iPSC386 || iPSC860 */
367
368 typedef struct vm_map_copy *vm_map_copy_t; /* forward declaration */
369
370 /*
371 * Function to be called as continuation.
372 * The first argument is a generic pointer:
373 * it currently may be a vm_map_copy_t or
374 * a vm_map_copyin_args_t (below).
375 */
376 typedef kern_return_t (*vm_map_copyin_cont_t)(void *, vm_map_copy_t *);
377
378 struct vm_map_copy {
379 int type;
380 #define VM_MAP_COPY_ENTRY_LIST 1
381 #define VM_MAP_COPY_OBJECT 2
382 #define VM_MAP_COPY_PAGE_LIST 3
383 vm_offset_t offset;
384 vm_size_t size;
385 union {
386 struct vm_map_header hdr; /* ENTRY_LIST */
387 struct { /* OBJECT */
388 vm_object_t object;
389 } c_o;
390 struct { /* PAGE_LIST */
391 vm_page_t page_list[VM_MAP_COPY_PAGE_LIST_MAX];
392 int npages;
393 vm_map_copyin_cont_t cont;
394 char *cont_args;
395 } c_p;
396 } c_u;
397 };
398
399 #define cpy_hdr c_u.hdr
400
401 #define cpy_object c_u.c_o.object
402
403 #define cpy_page_list c_u.c_p.page_list
404 #define cpy_npages c_u.c_p.npages
405 #define cpy_cont c_u.c_p.cont
406 #define cpy_cont_args c_u.c_p.cont_args
407
408 #define VM_MAP_COPY_NULL ((vm_map_copy_t) 0)
409
410 /*
411 * Useful macros for entry list copy objects
412 */
413
414 #define vm_map_copy_to_entry(copy) \
415 ((struct vm_map_entry *) &(copy)->cpy_hdr.links)
416 #define vm_map_copy_first_entry(copy) \
417 ((copy)->cpy_hdr.links.next)
418 #define vm_map_copy_last_entry(copy) \
419 ((copy)->cpy_hdr.links.prev)
420
421 /*
422 * Continuation macros for page list copy objects
423 */
424
425 #define vm_map_copy_invoke_cont(old_copy, new_copy, result) \
426 MACRO_BEGIN \
427 vm_map_copy_page_discard(old_copy); \
428 *result = (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
429 new_copy); \
430 (old_copy)->cpy_cont = (vm_map_copyin_cont_t) 0; \
431 MACRO_END
432
433 #define vm_map_copy_invoke_extend_cont(old_copy, new_copy, result) \
434 MACRO_BEGIN \
435 *result = (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
436 new_copy); \
437 (old_copy)->cpy_cont = (vm_map_copyin_cont_t) 0; \
438 MACRO_END
439
440 #define vm_map_copy_abort_cont(old_copy) \
441 MACRO_BEGIN \
442 vm_map_copy_page_discard(old_copy); \
443 (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
444 (vm_map_copy_t *) 0); \
445 (old_copy)->cpy_cont = (vm_map_copyin_cont_t) 0; \
446 (old_copy)->cpy_cont_args = (char *) 0; \
447 MACRO_END
448
449 #define vm_map_copy_has_cont(copy) \
450 (((copy)->cpy_cont) != (vm_map_copyin_cont_t) 0)
451
452 /*
453 * Continuation structures for vm_map_copyin_page_list.
454 */
455 typedef struct {
456 vm_map_t map;
457 vm_offset_t src_addr;
458 vm_size_t src_len;
459 vm_offset_t destroy_addr;
460 vm_size_t destroy_len;
461 boolean_t steal_pages;
462 } vm_map_copyin_args_data_t, *vm_map_copyin_args_t;
463
464 #define VM_MAP_COPYIN_ARGS_NULL ((vm_map_copyin_args_t) 0)
465
466 /*
467 * Macros: vm_map_lock, etc. [internal use only]
468 * Description:
469 * Perform locking on the data portion of a map.
470 */
471
472 #define vm_map_lock_init(map) \
473 MACRO_BEGIN \
474 lock_init(&(map)->lock, TRUE); \
475 (map)->timestamp = 0; \
476 MACRO_END
477
478 #define vm_map_lock(map) \
479 MACRO_BEGIN \
480 lock_write(&(map)->lock); \
481 (map)->timestamp++; \
482 MACRO_END
483
484 #define vm_map_unlock(map) lock_write_done(&(map)->lock)
485 #define vm_map_lock_read(map) lock_read(&(map)->lock)
486 #define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
487 #define vm_map_lock_write_to_read(map) \
488 lock_write_to_read(&(map)->lock)
489 #define vm_map_lock_read_to_write(map) \
490 (lock_read_to_write(&(map)->lock) || (((map)->timestamp++), 0))
491 #define vm_map_lock_set_recursive(map) \
492 lock_set_recursive(&(map)->lock)
493 #define vm_map_lock_clear_recursive(map) \
494 lock_clear_recursive(&(map)->lock)
495
496 /*
497 * Exported procedures that operate on vm_map_t.
498 */
499
500 extern vm_offset_t kentry_data;
501 extern vm_offset_t kentry_data_size;
502 extern int kentry_count;
503 extern void vm_map_init(void); /* Initialize the module */
504
505 extern vm_map_t vm_map_create( /* Create an empty map */
506 pmap_t pmap,
507 vm_offset_t min,
508 vm_offset_t max,
509 boolean_t pageable);
510
511 extern vm_map_t vm_map_fork(vm_map_t); /* Create a map in the image
512 * of an existing map */
513
514 extern void vm_map_reference(vm_map_t);
515 /* Gain a reference to
516 * an existing map */
517 extern void vm_map_deallocate(vm_map_t);
518 /* Lose a reference */
519
520 extern kern_return_t vm_map_enter( /* Enter a mapping */
521 vm_map_t map,
522 vm_offset_t *address,
523 vm_size_t size,
524 vm_offset_t mask,
525 boolean_t anywhere,
526 vm_object_t object,
527 vm_offset_t offset,
528 boolean_t needs_copy,
529 vm_prot_t cur_protection,
530 vm_prot_t max_protection,
531 vm_inherit_t inheritance);
532
533 extern kern_return_t vm_map_find_entry( /* Find space for a mapping */
534 vm_map_t map, /* and add the entry */
535 vm_offset_t *address,
536 vm_size_t size,
537 vm_offset_t mask,
538 vm_object_t object,
539 vm_map_entry_t *o_entry);
540
541 extern kern_return_t vm_map_remove( /* Deallocate a region */
542 vm_map_t map,
543 vm_offset_t start,
544 vm_offset_t end);
545
546 extern kern_return_t vm_map_protect( /* Change protection */
547 vm_map_t map,
548 vm_offset_t start,
549 vm_offset_t end,
550 vm_prot_t new_prot,
551 boolean_t set_max);
552
553 extern kern_return_t vm_map_inherit( /* Change inheritance */
554 vm_map_t map,
555 vm_offset_t start,
556 vm_offset_t end,
557 vm_inherit_t new_inheritance);
558
559 extern void vm_map_print(vm_map_t); /* Debugging: print a map */
560
561 extern kern_return_t vm_map_lookup( /* Look up an address */
562 vm_map_t *var_map,
563 vm_offset_t addr,
564 vm_prot_t fault_type,
565 vm_map_version_t *version,
566 vm_object_t *object,
567 vm_offset_t *offset,
568 vm_prot_t *prot,
569 boolean_t *wired);
570
571 extern boolean_t vm_map_verify(vm_map_t, vm_map_version_t *);
572 /* Verify that a previous
573 * lookup is still valid */
574
575 /* vm_map_verify_done is now a macro -- see below */
576
577 extern kern_return_t vm_map_copyin( /* Make a copy of a region */
578 vm_map_t src_map,
579 vm_offset_t src_addr,
580 vm_size_t len,
581 boolean_t src_destroy,
582 vm_map_copy_t *copy_result);
583
584 extern kern_return_t vm_map_copyin_page_list(
585 /* Make a copy of a region
586 * using a page list copy */
587 vm_map_t src_map,
588 vm_offset_t src_addr,
589 vm_size_t len,
590 boolean_t src_destroy,
591 boolean_t steal_pages,
592 vm_map_copy_t *copy_result,
593 boolean_t is_cont);
594
595 extern kern_return_t vm_map_copyin_object( /* Turn an object region
596 into a copy */
597 vm_object_t object,
598 vm_offset_t offset,
599 vm_size_t size,
600 vm_map_copy_t *copy_result);
601
602 extern kern_return_t vm_map_copyout( /* Place a copy into a map */
603 vm_map_t dst_map,
604 vm_offset_t *dst_addr,
605 vm_map_copy_t copy);
606
607 extern kern_return_t vm_map_copy_overwrite( /* Overwrite existing memory
608 * with a copy */
609 vm_map_t dst_map,
610 vm_offset_t dst_addr,
611 vm_map_copy_t copy,
612 boolean_t interruptible);
613
614 extern void vm_map_copy_discard(vm_map_copy_t);
615 /* Discard a copy without
616 * using it */
617
618 extern void vm_map_copy_page_discard(vm_map_copy_t);
619 /* Discard the pages in
620 a page-list copy */
621
622 extern kern_return_t vm_map_copy_discard_cont(
623 void * args,
624 vm_map_copy_t *result); /* Page list continuation
625 * version of previous */
626
627 vm_map_copy_t vm_map_copy_copy(vm_map_copy_t);
628 /* Move the information in
629 a vm_map_copy_t to a new
630 one, leaving the old one
631 empty (for deallocation) */
632
633 extern kern_return_t vm_map_machine_attribute(
634 vm_map_t map,
635 vm_offset_t address,
636 vm_size_t size,
637 vm_machine_attribute_t attribute,
638 vm_machine_attribute_val_t *value);
639 /* Add or remove machine-
640 dependent attributes from
641 map regions */
642
643 /*
644 * Functions implemented as macros
645 */
646 #define vm_map_min(map) ((map)->min_offset)
647 /* Lowest valid address in
648 * a map */
649
650 #define vm_map_max(map) ((map)->max_offset)
651 /* Highest valid address */
652
653 #define vm_map_pmap(map) ((map)->pmap)
654 /* Physical map associated
655 * with this address map */
656
657 #define vm_map_verify_done(map, version) (vm_map_unlock_read(map))
658 /* Operation that required
659 * a verified lookup is
660 * now complete */
661 /*
662 * Pageability functions. Includes macro to preserve old interface.
663 */
664 extern kern_return_t vm_map_pageable_common(
665 vm_map_t map,
666 vm_offset_t start,
667 vm_offset_t end,
668 vm_prot_t access,
669 boolean_t user_wire);
670
671 #define vm_map_pageable(map, s, e, access) \
672 vm_map_pageable_common(map, s, e, access, FALSE)
673
674 #define vm_map_pageable_user(map, s, e, access) \
675 vm_map_pageable_common(map, s, e, access, TRUE)
676
677 /*
678 * Submap object. Must be used to create memory to be put
679 * in a submap by vm_map_submap.
680 */
681 extern vm_object_t vm_submap_object;
682
683 kern_return_t vm_map_submap(
684 vm_map_t map,
685 vm_offset_t start,
686 vm_offset_t end,
687 vm_map_t submap); /* mark range as handled by
688 a submap */
689
690 /*
691 * Lookup a map entry. Map must be locked.
692 *
693 * Exported only for use by vm_kern.c.
694 */
695 boolean_t vm_map_lookup_entry(
696 vm_map_t map,
697 vm_offset_t address,
698 vm_map_entry_t *entry); /* OUT */
699
700
701 /*
702 * Wait and wakeup macros for in_transition map entries.
703 */
704 #define vm_map_entry_wait(map, interruptible) \
705 MACRO_BEGIN \
706 assert_wait((event_t)&(map)->hdr, interruptible); \
707 vm_map_unlock(map); \
708 thread_block(CONTINUE_NULL); \
709 MACRO_END
710
711 #define vm_map_entry_wakeup(map) thread_wakeup((event_t)&(map)->hdr)
712
713 #endif /* _VM_VM_MAP_H_ */
Cache object: f6f41d4aaf966b103e83e104c5351fe5
|