The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_map.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by the University of
   19  *      California, Berkeley and its contributors.
   20  * 4. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      @(#)vm_map.h    8.9 (Berkeley) 5/17/95
   37  *
   38  *
   39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   40  * All rights reserved.
   41  *
   42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   43  *
   44  * Permission to use, copy, modify and distribute this software and
   45  * its documentation is hereby granted, provided that both the copyright
   46  * notice and this permission notice appear in all copies of the
   47  * software, derivative works or modified versions, and any portions
   48  * thereof, and that both notices appear in supporting documentation.
   49  *
   50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   53  *
   54  * Carnegie Mellon requests users of this software to return to
   55  *
   56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   57  *  School of Computer Science
   58  *  Carnegie Mellon University
   59  *  Pittsburgh PA 15213-3890
   60  *
   61  * any improvements or extensions that they make and grant Carnegie the
   62  * rights to redistribute these changes.
   63  *
   64  * $FreeBSD$
   65  */
   66 
   67 /*
   68  *      Virtual memory map module definitions.
   69  */
   70 
   71 #ifndef _VM_MAP_
   72 #define _VM_MAP_
   73 
   74 /*
   75  *      Types defined:
   76  *
   77  *      vm_map_t                the high-level address map data structure.
   78  *      vm_map_entry_t          an entry in an address map.
   79  */
   80 
   81 typedef u_int vm_eflags_t;
   82 
   83 /*
   84  *      Objects which live in maps may be either VM objects, or
   85  *      another map (called a "sharing map") which denotes read-write
   86  *      sharing with other maps.
   87  */
   88 
   89 union vm_map_object {
   90         struct vm_object *vm_object;    /* object object */
   91         struct vm_map *sub_map;         /* belongs to another map */
   92 };
   93 
   94 /*
   95  *      Address map entries consist of start and end addresses,
   96  *      a VM object (or sharing map) and offset into that object,
   97  *      and user-exported inheritance and protection information.
   98  *      Also included is control information for virtual copy operations.
   99  */
  100 struct vm_map_entry {
  101         struct vm_map_entry *prev;      /* previous entry */
  102         struct vm_map_entry *next;      /* next entry */
  103         vm_offset_t start;              /* start address */
  104         vm_offset_t end;                /* end address */
  105         vm_offset_t avail_ssize;        /* amt can grow if this is a stack */
  106         union vm_map_object object;     /* object I point to */
  107         vm_ooffset_t offset;            /* offset into object */
  108         vm_eflags_t eflags;             /* map entry flags */
  109         /* Only in task maps: */
  110         vm_prot_t protection;           /* protection code */
  111         vm_prot_t max_protection;       /* maximum protection */
  112         vm_inherit_t inheritance;       /* inheritance */
  113         int wired_count;                /* can be paged if = 0 */
  114         vm_pindex_t lastr;              /* last read */
  115 };
  116 
  117 #define MAP_ENTRY_NOSYNC                0x0001
  118 #define MAP_ENTRY_IS_SUB_MAP            0x0002
  119 #define MAP_ENTRY_COW                   0x0004
  120 #define MAP_ENTRY_NEEDS_COPY            0x0008
  121 #define MAP_ENTRY_NOFAULT               0x0010
  122 #define MAP_ENTRY_USER_WIRED            0x0020
  123 
  124 #define MAP_ENTRY_BEHAV_NORMAL          0x0000  /* default behavior */
  125 #define MAP_ENTRY_BEHAV_SEQUENTIAL      0x0040  /* expect sequential access */
  126 #define MAP_ENTRY_BEHAV_RANDOM          0x0080  /* expect random access */
  127 #define MAP_ENTRY_BEHAV_RESERVED        0x00C0  /* future use */
  128 
  129 #define MAP_ENTRY_BEHAV_MASK            0x00C0
  130 
  131 #define MAP_ENTRY_IN_TRANSITION         0x0100  /* entry being changed */
  132 #define MAP_ENTRY_NEEDS_WAKEUP          0x0200  /* waiter's in transition */
  133 #define MAP_ENTRY_NOCOREDUMP            0x0400  /* don't include in a core */
  134 
  135 /*
  136  * flags for vm_map_[un]clip_range()
  137  */
  138 #define MAP_CLIP_NO_HOLES               0x0001
  139 
  140 static __inline u_char   
  141 vm_map_entry_behavior(struct vm_map_entry *entry)
  142 {                  
  143         return entry->eflags & MAP_ENTRY_BEHAV_MASK;
  144 }
  145 
  146 static __inline void
  147 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
  148 {              
  149         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
  150                 (behavior & MAP_ENTRY_BEHAV_MASK);
  151 }                       
  152 
  153 /*
  154  *      Maps are doubly-linked lists of map entries, kept sorted
  155  *      by address.  A single hint is provided to start
  156  *      searches again from the last successful search,
  157  *      insertion, or removal.
  158  *
  159  *      Note: the lock structure cannot be the first element of vm_map
  160  *      because this can result in a running lockup between two or more
  161  *      system processes trying to kmem_alloc_wait() due to kmem_alloc_wait()
  162  *      and free tsleep/waking up 'map' and the underlying lockmgr also
  163  *      sleeping and waking up on 'map'.  The lockup occurs when the map fills
  164  *      up.  The 'exec' map, for example.
  165  */
  166 struct vm_map {
  167         struct vm_map_entry header;     /* List of entries */
  168         struct lock lock;               /* Lock for map data */
  169         int nentries;                   /* Number of entries */
  170         vm_size_t size;                 /* virtual size */
  171         u_char system_map;              /* Am I a system map? */
  172         u_char infork;                  /* Am I in fork processing? */
  173         vm_map_entry_t hint;            /* hint for quick lookups */
  174         unsigned int timestamp;         /* Version number */
  175         vm_map_entry_t first_free;      /* First free space hint */
  176         struct pmap *pmap;              /* Physical map */
  177 #define min_offset              header.start
  178 #define max_offset              header.end
  179 };
  180 
  181 /* 
  182  * Shareable process virtual address space.
  183  * May eventually be merged with vm_map.
  184  * Several fields are temporary (text, data stuff).
  185  */
  186 struct vmspace {
  187         struct vm_map vm_map;   /* VM address map */
  188         struct pmap vm_pmap;    /* private physical map */
  189         int vm_refcnt;          /* number of references */
  190         caddr_t vm_shm;         /* SYS5 shared memory private data XXX */
  191 /* we copy between vm_startcopy and vm_endcopy on fork */
  192 #define vm_startcopy vm_rssize
  193         segsz_t vm_rssize;      /* current resident set size in pages */
  194         segsz_t vm_swrss;       /* resident set size before last swap */
  195         segsz_t vm_tsize;       /* text size (pages) XXX */
  196         segsz_t vm_dsize;       /* data size (pages) XXX */
  197         segsz_t vm_ssize;       /* stack size (pages) */
  198         caddr_t vm_taddr;       /* user virtual address of text XXX */
  199         caddr_t vm_daddr;       /* user virtual address of data XXX */
  200         caddr_t vm_maxsaddr;    /* user VA at max stack growth */
  201         caddr_t vm_minsaddr;    /* user VA at max stack growth */
  202 #define vm_endcopy vm_exitingcnt
  203         int     vm_exitingcnt;  /* several procsses zombied in exit1 */
  204 };
  205 
  206 /*
  207  *      Macros:         vm_map_lock, etc.
  208  *      Function:
  209  *              Perform locking on the data portion of a map.  Note that
  210  *              these macros mimic procedure calls returning void.  The
  211  *              semicolon is supplied by the user of these macros, not
  212  *              by the macros themselves.  The macros can safely be used
  213  *              as unbraced elements in a higher level statement.
  214  */
  215 
  216 #define vm_map_lock_drain_interlock(map) \
  217         do { \
  218                 lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \
  219                         &(map)->ref_lock, curproc); \
  220                 (map)->timestamp++; \
  221         } while(0)
  222 
  223 #ifdef DIAGNOSTIC
  224 /* #define MAP_LOCK_DIAGNOSTIC 1 */
  225 #ifdef MAP_LOCK_DIAGNOSTIC
  226 #define vm_map_lock(map) \
  227         do { \
  228                 printf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \
  229                 if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc) != 0) { \
  230                         panic("vm_map_lock: failed to get lock"); \
  231                 } \
  232                 (map)->timestamp++; \
  233         } while(0)
  234 #else
  235 #define vm_map_lock(map) \
  236         do { \
  237                 if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc) != 0) { \
  238                         panic("vm_map_lock: failed to get lock"); \
  239                 } \
  240                 (map)->timestamp++; \
  241         } while(0)
  242 #endif
  243 #else
  244 #define vm_map_lock(map) \
  245         do { \
  246                 lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc); \
  247                 (map)->timestamp++; \
  248         } while(0)
  249 #endif /* DIAGNOSTIC */
  250 
  251 #if defined(MAP_LOCK_DIAGNOSTIC)
  252 #define vm_map_unlock(map) \
  253         do { \
  254                 printf ("locking map LK_RELEASE: 0x%x\n", map); \
  255                 lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc); \
  256         } while (0)
  257 #define vm_map_lock_read(map) \
  258         do { \
  259                 printf ("locking map LK_SHARED: 0x%x\n", map); \
  260                 lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc); \
  261         } while (0)
  262 #define vm_map_unlock_read(map) \
  263         do { \
  264                 printf ("locking map LK_RELEASE: 0x%x\n", map); \
  265                 lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc); \
  266         } while (0)
  267 #else
  268 #define vm_map_unlock(map) \
  269         lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
  270 #define vm_map_lock_read(map) \
  271         lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc) 
  272 #define vm_map_unlock_read(map) \
  273         lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
  274 #endif
  275 
  276 static __inline__ int
  277 _vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
  278         int error;
  279 #if defined(MAP_LOCK_DIAGNOSTIC)
  280         printf("locking map LK_EXCLUPGRADE: 0x%x\n", map); 
  281 #endif
  282         error = lockmgr(&map->lock, LK_EXCLUPGRADE, (void *)0, p);
  283         if (error == 0)
  284                 map->timestamp++;
  285         return error;
  286 }
  287 
  288 #define vm_map_lock_upgrade(map) _vm_map_lock_upgrade(map, curproc)
  289 
  290 #if defined(MAP_LOCK_DIAGNOSTIC)
  291 #define vm_map_lock_downgrade(map) \
  292         do { \
  293                 printf ("locking map LK_DOWNGRADE: 0x%x\n", map); \
  294                 lockmgr(&(map)->lock, LK_DOWNGRADE, (void *)0, curproc); \
  295         } while (0)
  296 #else
  297 #define vm_map_lock_downgrade(map) \
  298         lockmgr(&(map)->lock, LK_DOWNGRADE, (void *)0, curproc)
  299 #endif
  300 
  301 #define vm_map_set_recursive(map) \
  302         do { \
  303                 simple_lock(&(map)->lock.lk_interlock); \
  304                 (map)->lock.lk_flags |= LK_CANRECURSE; \
  305                 simple_unlock(&(map)->lock.lk_interlock); \
  306         } while(0)
  307 #define vm_map_clear_recursive(map) \
  308         do { \
  309                 simple_lock(&(map)->lock.lk_interlock); \
  310                 (map)->lock.lk_flags &= ~LK_CANRECURSE; \
  311                 simple_unlock(&(map)->lock.lk_interlock); \
  312         } while(0)
  313 
  314 /*
  315  *      Functions implemented as macros
  316  */
  317 #define         vm_map_min(map)         ((map)->min_offset)
  318 #define         vm_map_max(map)         ((map)->max_offset)
  319 #define         vm_map_pmap(map)        ((map)->pmap)
  320 
  321 static __inline struct pmap *
  322 vmspace_pmap(struct vmspace *vmspace)
  323 {
  324         return &vmspace->vm_pmap;
  325 }
  326 
  327 static __inline long
  328 vmspace_resident_count(struct vmspace *vmspace)
  329 {
  330         return pmap_resident_count(vmspace_pmap(vmspace));
  331 }
  332 
  333 /* XXX: number of kernel maps and entries to statically allocate */
  334 #define MAX_KMAP        10
  335 #define MAX_KMAPENT     128
  336 #define MAX_MAPENT      128
  337 
  338 /*
  339  * Copy-on-write flags for vm_map operations
  340  */
  341 #define MAP_UNUSED_01           0x0001
  342 #define MAP_COPY_ON_WRITE       0x0002
  343 #define MAP_NOFAULT             0x0004
  344 #define MAP_PREFAULT            0x0008
  345 #define MAP_PREFAULT_PARTIAL    0x0010
  346 #define MAP_DISABLE_SYNCER      0x0020
  347 #define MAP_DISABLE_COREDUMP    0x0100
  348 #define MAP_PREFAULT_MADVISE    0x0200  /* from (user) madvise request */
  349 
  350 /*
  351  * vm_fault option flags
  352  */
  353 #define VM_FAULT_NORMAL 0               /* Nothing special */
  354 #define VM_FAULT_CHANGE_WIRING 1        /* Change the wiring as appropriate */
  355 #define VM_FAULT_USER_WIRE 2            /* Likewise, but for user purposes */
  356 #define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
  357 #define VM_FAULT_HOLD 4                 /* Hold the page */
  358 #define VM_FAULT_DIRTY 8                /* Dirty the page */
  359 
  360 #ifdef _KERNEL
  361 boolean_t vm_map_check_protection __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t));
  362 struct pmap;
  363 vm_map_t vm_map_create __P((struct pmap *, vm_offset_t, vm_offset_t));
  364 int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
  365 int vm_map_find __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int));
  366 int vm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *));
  367 int vm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t));
  368 void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t));
  369 int vm_map_insert __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int));
  370 int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
  371     vm_pindex_t *, vm_prot_t *, boolean_t *));
  372 void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
  373 boolean_t vm_map_lookup_entry __P((vm_map_t, vm_offset_t, vm_map_entry_t *));
  374 int vm_map_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
  375 int vm_map_user_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
  376 int vm_map_clean __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t));
  377 int vm_map_protect __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
  378 int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
  379 void vm_map_startup __P((void));
  380 int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
  381 int vm_map_madvise __P((vm_map_t, vm_offset_t, vm_offset_t, int));
  382 void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
  383 void vm_init2 __P((void));
  384 int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *));
  385 void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t));
  386 int vm_map_stack __P((vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int));
  387 int vm_map_growstack __P((struct proc *p, vm_offset_t addr));
  388 int vmspace_swap_count __P((struct vmspace *vmspace));
  389 
  390 #endif
  391 #endif                          /* _VM_MAP_ */

Cache object: bef73232fc7cc3f6f8a566750bb4401d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.