FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_map.h
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.h 8.9 (Berkeley) 5/17/95
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $
65 */
66
67 /*
68 * Virtual memory map module definitions.
69 */
70
71 #ifndef _VM_VM_MAP_H_
72 #define _VM_VM_MAP_H_
73
74 #ifndef _SYS_TYPES_H_
75 #include <sys/types.h>
76 #endif
77 #ifdef _KERNEL
78 #ifndef _SYS_KERNEL_H_
79 #include <sys/kernel.h> /* ticks */
80 #endif
81 #endif
82 #ifndef _SYS_TREE_H_
83 #include <sys/tree.h>
84 #endif
85 #ifndef _SYS_SYSREF_H_
86 #include <sys/sysref.h>
87 #endif
88 #ifndef _SYS_LOCK_H_
89 #include <sys/lock.h>
90 #endif
91 #ifndef _SYS_VKERNEL_H_
92 #include <sys/vkernel.h>
93 #endif
94 #ifndef _VM_VM_H_
95 #include <vm/vm.h>
96 #endif
97 #ifndef _MACHINE_PMAP_H_
98 #include <machine/pmap.h>
99 #endif
100 #ifndef _VM_VM_OBJECT_H_
101 #include <vm/vm_object.h>
102 #endif
103 #ifndef _SYS_NULL_H_
104 #include <sys/_null.h>
105 #endif
106
107 struct vm_map_rb_tree;
108 RB_PROTOTYPE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
109
110 /*
111 * Types defined:
112 *
113 * vm_map_t the high-level address map data structure.
114 * vm_map_entry_t an entry in an address map.
115 */
116
117 typedef u_int vm_flags_t;
118 typedef u_int vm_eflags_t;
119
120 /*
121 * Objects which live in maps may be either VM objects, or
122 * another map (called a "sharing map") which denotes read-write
123 * sharing with other maps.
124 */
125 union vm_map_object {
126 struct vm_object *vm_object; /* object object */
127 struct vm_map *sub_map; /* belongs to another map */
128 };
129
130 union vm_map_aux {
131 vm_offset_t avail_ssize; /* amt can grow if this is a stack */
132 vpte_t master_pde; /* virtual page table root */
133 };
134
135 /*
136 * Address map entries consist of start and end addresses,
137 * a VM object (or sharing map) and offset into that object,
138 * and user-exported inheritance and protection information.
139 * Also included is control information for virtual copy operations.
140 *
141 * When used with MAP_STACK, avail_ssize is used to determine the
142 * limits of stack growth.
143 *
144 * When used with VM_MAPTYPE_VPAGETABLE, avail_ssize stores the
145 * page directory index.
146 */
147 struct vm_map_entry {
148 struct vm_map_entry *prev; /* previous entry */
149 struct vm_map_entry *next; /* next entry */
150 RB_ENTRY(vm_map_entry) rb_entry;
151 vm_offset_t start; /* start address */
152 vm_offset_t end; /* end address */
153 union vm_map_aux aux; /* auxillary data */
154 union vm_map_object object; /* object I point to */
155 vm_ooffset_t offset; /* offset into object */
156 vm_eflags_t eflags; /* map entry flags */
157 vm_maptype_t maptype; /* type of VM mapping */
158 vm_prot_t protection; /* protection code */
159 vm_prot_t max_protection; /* maximum protection */
160 vm_inherit_t inheritance; /* inheritance */
161 int wired_count; /* can be paged if = 0 */
162 };
163
164 #define MAP_ENTRY_NOSYNC 0x0001
165 #define MAP_ENTRY_STACK 0x0002
166 #define MAP_ENTRY_COW 0x0004
167 #define MAP_ENTRY_NEEDS_COPY 0x0008
168 #define MAP_ENTRY_NOFAULT 0x0010
169 #define MAP_ENTRY_USER_WIRED 0x0020
170
171 #define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */
172 #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */
173 #define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */
174 #define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */
175
176 #define MAP_ENTRY_BEHAV_MASK 0x00C0
177
178 #define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */
179 #define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiter's in transition */
180 #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */
181 #define MAP_ENTRY_KSTACK 0x0800 /* guarded kernel stack */
182
183 /*
184 * flags for vm_map_[un]clip_range()
185 */
186 #define MAP_CLIP_NO_HOLES 0x0001
187
188 /*
189 * This reserve count for vm_map_entry_reserve() should cover all nominal
190 * single-insertion operations, including any necessary clipping.
191 */
192 #define MAP_RESERVE_COUNT 4
193 #define MAP_RESERVE_SLOP 32
194
195 static __inline u_char
196 vm_map_entry_behavior(struct vm_map_entry *entry)
197 {
198 return entry->eflags & MAP_ENTRY_BEHAV_MASK;
199 }
200
201 static __inline void
202 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
203 {
204 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
205 (behavior & MAP_ENTRY_BEHAV_MASK);
206 }
207
208 /*
209 * Maps are doubly-linked lists of map entries, kept sorted by address.
210 * A single hint is provided to start searches again from the last
211 * successful search, insertion, or removal.
212 *
213 * NOTE: The lock structure cannot be the first element of vm_map
214 * because this can result in a running lockup between two or more
215 * system processes trying to kmem_alloc_wait() due to kmem_alloc_wait()
216 * and free tsleep/waking up 'map' and the underlying lockmgr also
217 * sleeping and waking up on 'map'. The lockup occurs when the map fills
218 * up. The 'exec' map, for example.
219 *
220 * NOTE: The vm_map structure can be hard-locked with the lockmgr lock
221 * or soft-serialized with the token, or both.
222 */
223 struct vm_map {
224 struct vm_map_entry header; /* List of entries */
225 RB_HEAD(vm_map_rb_tree, vm_map_entry) rb_root;
226 struct lock lock; /* Lock for map data */
227 int nentries; /* Number of entries */
228 vm_size_t size; /* virtual size */
229 u_char system_map; /* Am I a system map? */
230 vm_map_entry_t hint; /* hint for quick lookups */
231 unsigned int timestamp; /* Version number */
232 vm_map_entry_t first_free; /* First free space hint */
233 vm_flags_t flags; /* flags for this vm_map */
234 struct pmap *pmap; /* Physical map */
235 u_int president_cache; /* Remember president count */
236 u_int president_ticks; /* Save ticks for cache */
237 struct lwkt_token token; /* Soft serializer */
238 #define min_offset header.start
239 #define max_offset header.end
240 };
241
242 /*
243 * vm_flags_t values
244 */
245 #define MAP_WIREFUTURE 0x0001 /* wire all future pages */
246
247 /*
248 * Shareable process virtual address space.
249 *
250 * Refd pointers from vmresident, proc
251 */
252 struct vmspace {
253 struct vm_map vm_map; /* VM address map */
254 struct pmap vm_pmap; /* private physical map */
255 int vm_flags;
256 caddr_t vm_shm; /* SYS5 shared memory private data XXX */
257 /* we copy from vm_startcopy to the end of the structure on fork */
258 #define vm_startcopy vm_rssize
259 segsz_t vm_rssize; /* current resident set size in pages */
260 segsz_t vm_swrss; /* resident set size before last swap */
261 segsz_t vm_tsize; /* text size (pages) XXX */
262 segsz_t vm_dsize; /* data size (pages) XXX */
263 segsz_t vm_ssize; /* stack size (pages) */
264 caddr_t vm_taddr; /* user virtual address of text XXX */
265 caddr_t vm_daddr; /* user virtual address of data XXX */
266 caddr_t vm_maxsaddr; /* user VA at max stack growth */
267 caddr_t vm_minsaddr; /* user VA at max stack growth */
268 #define vm_endcopy vm_exitingcnt
269 int vm_exitingcnt; /* exit/wait context reaping */
270 int vm_unused01; /* for future fields */
271 int vm_pagesupply;
272 u_int vm_holdcount;
273 void *vm_unused02; /* for future fields */
274 struct sysref vm_sysref; /* sysref, refcnt, etc */
275 };
276
277 #define VMSPACE_EXIT1 0x0001 /* partial exit */
278 #define VMSPACE_EXIT2 0x0002 /* full exit */
279
280 /*
281 * Resident executable holding structure. A user program can take a snapshot
282 * of just its VM address space (typically done just after dynamic link
283 * libraries have completed loading) and register it as a resident
284 * executable associated with the program binary's vnode, which is also
285 * locked into memory. Future execs of the vnode will start with a copy
286 * of the resident vmspace instead of running the binary from scratch,
287 * avoiding both the kernel ELF loader *AND* all shared library mapping and
288 * relocation code, and will call a different entry point (the stack pointer
289 * is reset to the top of the stack) supplied when the vmspace was registered.
290 */
291 struct vmresident {
292 struct vnode *vr_vnode; /* associated vnode */
293 TAILQ_ENTRY(vmresident) vr_link; /* linked list of res sts */
294 struct vmspace *vr_vmspace; /* vmspace to fork */
295 intptr_t vr_entry_addr; /* registered entry point */
296 struct sysentvec *vr_sysent; /* system call vects */
297 int vr_id; /* registration id */
298 int vr_refs; /* temporary refs */
299 };
300
301 #ifdef _KERNEL
302 /*
303 * Macros: vm_map_lock, etc.
304 * Function:
305 * Perform locking on the data portion of a map. Note that
306 * these macros mimic procedure calls returning void. The
307 * semicolon is supplied by the user of these macros, not
308 * by the macros themselves. The macros can safely be used
309 * as unbraced elements in a higher level statement.
310 */
311
312 #define ASSERT_VM_MAP_LOCKED(map) KKASSERT(lockowned(&(map)->lock))
313
314 #ifdef DIAGNOSTIC
315 /* #define MAP_LOCK_DIAGNOSTIC 1 */
316 #ifdef MAP_LOCK_DIAGNOSTIC
317 #define vm_map_lock(map) \
318 do { \
319 kprintf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \
320 if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \
321 panic("vm_map_lock: failed to get lock"); \
322 } \
323 (map)->timestamp++; \
324 } while(0)
325 #else
326 #define vm_map_lock(map) \
327 do { \
328 if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \
329 panic("vm_map_lock: failed to get lock"); \
330 } \
331 (map)->timestamp++; \
332 } while(0)
333 #endif
334 #else
335 #define vm_map_lock(map) \
336 do { \
337 lockmgr(&(map)->lock, LK_EXCLUSIVE); \
338 (map)->timestamp++; \
339 } while(0)
340 #endif /* DIAGNOSTIC */
341
342 #if defined(MAP_LOCK_DIAGNOSTIC)
343 #define vm_map_unlock(map) \
344 do { \
345 kprintf ("locking map LK_RELEASE: 0x%x\n", map); \
346 lockmgr(&(map)->lock, LK_RELEASE); \
347 } while (0)
348 #define vm_map_lock_read(map) \
349 do { \
350 kprintf ("locking map LK_SHARED: 0x%x\n", map); \
351 lockmgr(&(map)->lock, LK_SHARED); \
352 } while (0)
353 #define vm_map_unlock_read(map) \
354 do { \
355 kprintf ("locking map LK_RELEASE: 0x%x\n", map); \
356 lockmgr(&(map)->lock, LK_RELEASE); \
357 } while (0)
358 #else
359 #define vm_map_unlock(map) \
360 lockmgr(&(map)->lock, LK_RELEASE)
361 #define vm_map_lock_read(map) \
362 lockmgr(&(map)->lock, LK_SHARED)
363 #define vm_map_unlock_read(map) \
364 lockmgr(&(map)->lock, LK_RELEASE)
365 #endif
366
367 #define vm_map_lock_read_try(map) \
368 lockmgr(&(map)->lock, LK_SHARED | LK_NOWAIT)
369
370 static __inline__ int
371 vm_map_lock_read_to(vm_map_t map)
372 {
373 int error;
374
375 #if defined(MAP_LOCK_DIAGNOSTIC)
376 kprintf ("locking map LK_SHARED: 0x%x\n", map);
377 #endif
378 error = lockmgr(&(map)->lock, LK_SHARED | LK_TIMELOCK);
379 return error;
380 }
381
382 static __inline__ int
383 vm_map_lock_upgrade(vm_map_t map) {
384 int error;
385 #if defined(MAP_LOCK_DIAGNOSTIC)
386 kprintf("locking map LK_EXCLUPGRADE: 0x%x\n", map);
387 #endif
388 error = lockmgr(&map->lock, LK_EXCLUPGRADE);
389 if (error == 0)
390 map->timestamp++;
391 return error;
392 }
393
394 #if defined(MAP_LOCK_DIAGNOSTIC)
395 #define vm_map_lock_downgrade(map) \
396 do { \
397 kprintf ("locking map LK_DOWNGRADE: 0x%x\n", map); \
398 lockmgr(&(map)->lock, LK_DOWNGRADE); \
399 } while (0)
400 #else
401 #define vm_map_lock_downgrade(map) \
402 lockmgr(&(map)->lock, LK_DOWNGRADE)
403 #endif
404
405 #endif /* _KERNEL */
406
407 /*
408 * Functions implemented as macros
409 */
410 #define vm_map_min(map) ((map)->min_offset)
411 #define vm_map_max(map) ((map)->max_offset)
412 #define vm_map_pmap(map) ((map)->pmap)
413
414 /*
415 * Must not block
416 */
417 static __inline struct pmap *
418 vmspace_pmap(struct vmspace *vmspace)
419 {
420 return &vmspace->vm_pmap;
421 }
422
423 /*
424 * Caller must hold the vmspace->vm_map.token
425 */
426 static __inline long
427 vmspace_resident_count(struct vmspace *vmspace)
428 {
429 return pmap_resident_count(vmspace_pmap(vmspace));
430 }
431
432 /*
433 * Calculates the proportional RSS and returning the
434 * accrued result. This is a loose value for statistics/display
435 * purposes only and will only be updated if we can acquire
436 * a non-blocking map lock.
437 *
438 * (used by userland or the kernel)
439 */
440 static __inline u_int
441 vmspace_president_count(struct vmspace *vmspace)
442 {
443 vm_map_t map = &vmspace->vm_map;
444 vm_map_entry_t cur;
445 vm_object_t object;
446 u_int count = 0;
447 u_int n;
448
449 #ifdef _KERNEL
450 if (map->president_ticks == ticks / hz || vm_map_lock_read_try(map))
451 return(map->president_cache);
452 #endif
453
454 for (cur = map->header.next; cur != &map->header; cur = cur->next) {
455 switch(cur->maptype) {
456 case VM_MAPTYPE_NORMAL:
457 case VM_MAPTYPE_VPAGETABLE:
458 if ((object = cur->object.vm_object) == NULL)
459 break;
460 if (object->type != OBJT_DEFAULT &&
461 object->type != OBJT_SWAP) {
462 break;
463 }
464 /*
465 * synchronize non-zero case, contents of field
466 * can change at any time due to pmap ops.
467 */
468 if ((n = object->agg_pv_list_count) != 0) {
469 #ifdef _KERNEL
470 cpu_ccfence();
471 #endif
472 count += object->resident_page_count / n;
473 }
474 break;
475 default:
476 break;
477 }
478 }
479 #ifdef _KERNEL
480 map->president_cache = count;
481 map->president_ticks = ticks / hz;
482 vm_map_unlock_read(map);
483 #endif
484
485 return(count);
486 }
487
488 /*
489 * Number of kernel maps and entries to statically allocate, required
490 * during boot to bootstrap the VM system.
491 */
492 #define MAX_KMAP 10
493 #define MAX_MAPENT 2048 /* required to support up to 64 cpus */
494
495 /*
496 * Copy-on-write flags for vm_map operations
497 */
498 #define MAP_UNUSED_01 0x0001
499 #define MAP_COPY_ON_WRITE 0x0002
500 #define MAP_NOFAULT 0x0004
501 #define MAP_PREFAULT 0x0008
502 #define MAP_PREFAULT_PARTIAL 0x0010
503 #define MAP_DISABLE_SYNCER 0x0020
504 #define MAP_IS_STACK 0x0040
505 #define MAP_IS_KSTACK 0x0080
506 #define MAP_DISABLE_COREDUMP 0x0100
507 #define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */
508 #define MAP_PREFAULT_RELOCK 0x0200
509
510 /*
511 * vm_fault option flags
512 */
513 #define VM_FAULT_NORMAL 0x00 /* Nothing special */
514 #define VM_FAULT_CHANGE_WIRING 0x01 /* Change the wiring as appropriate */
515 #define VM_FAULT_USER_WIRE 0x02 /* Likewise, but for user purposes */
516 #define VM_FAULT_BURST 0x04 /* Burst fault can be done */
517 #define VM_FAULT_DIRTY 0x08 /* Dirty the page */
518 #define VM_FAULT_UNSWAP 0x10 /* Remove backing store from the page */
519 #define VM_FAULT_BURST_QUICK 0x20 /* Special case shared vm_object */
520 #define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
521
522 #ifdef _KERNEL
523
524 extern struct sysref_class vmspace_sysref_class;
525
526 boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t,
527 vm_prot_t, boolean_t);
528 struct pmap;
529 struct globaldata;
530 void vm_map_entry_allocate_object(vm_map_entry_t);
531 void vm_map_entry_reserve_cpu_init(struct globaldata *gd);
532 int vm_map_entry_reserve(int);
533 int vm_map_entry_kreserve(int);
534 void vm_map_entry_release(int);
535 void vm_map_entry_krelease(int);
536 vm_map_t vm_map_create (vm_map_t, struct pmap *, vm_offset_t, vm_offset_t);
537 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *);
538 int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t,
539 vm_offset_t *, vm_size_t, vm_size_t,
540 boolean_t, vm_maptype_t,
541 vm_prot_t, vm_prot_t,
542 int);
543 int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_size_t,
544 int, vm_offset_t *);
545 vm_offset_t vm_map_hint(struct proc *, vm_offset_t, vm_prot_t);
546 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
547 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t);
548 int vm_map_insert (vm_map_t, int *, vm_object_t, vm_ooffset_t,
549 vm_offset_t, vm_offset_t,
550 vm_maptype_t,
551 vm_prot_t, vm_prot_t,
552 int);
553 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
554 vm_pindex_t *, vm_prot_t *, boolean_t *);
555 void vm_map_lookup_done (vm_map_t, vm_map_entry_t, int);
556 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
557 int vm_map_wire (vm_map_t, vm_offset_t, vm_offset_t, int);
558 int vm_map_unwire (vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
559 int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
560 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
561 int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
562 void vm_map_startup (void);
563 int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
564 int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int, off_t);
565 void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *);
566 void vm_init2 (void);
567 int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *);
568 int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, int,
569 vm_prot_t, vm_prot_t, int);
570 int vm_map_growstack (struct proc *p, vm_offset_t addr);
571 int vmspace_swap_count (struct vmspace *vmspace);
572 int vmspace_anonymous_count (struct vmspace *vmspace);
573 void vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *);
574 void vm_map_transition_wait(vm_map_t map);
575
576 #if defined(__x86_64__) && defined(_KERNEL_VIRTUAL)
577 int vkernel_module_memory_alloc(vm_offset_t *, size_t);
578 void vkernel_module_memory_free(vm_offset_t, size_t);
579 #endif
580
581 #endif
582 #endif /* _VM_VM_MAP_H_ */
Cache object: d0981413763ae8957bdd585fa78dc25a
|