The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_amap.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: uvm_amap.h,v 1.33 2021/01/19 13:21:36 mpi Exp $       */
    2 /*      $NetBSD: uvm_amap.h,v 1.14 2001/02/18 21:19:08 chs Exp $        */
    3 
    4 /*
    5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #ifndef _UVM_UVM_AMAP_H_
   30 #define _UVM_UVM_AMAP_H_
   31 
   32 /*
   33  * uvm_amap.h: general amap interface and amap implementation-specific info
   34  */
   35 
   36 /*
   37  * an amap structure contains pointers to a set of anons that are
   38  * mapped together in virtual memory (an anon is a single page of
   39  * anonymous virtual memory -- see uvm_anon.h).  in uvm we hide the
   40  * details of the implementation of amaps behind a general amap
   41  * interface.  this allows us to change the amap implementation
   42  * without having to touch the rest of the code.  this file is divided
   43  * into two parts: the definition of the uvm amap interface and the
   44  * amap implementation-specific definitions.
   45  */
   46 
   47 #ifdef _KERNEL
   48 
   49 /*
   50  * part 1: amap interface
   51  */
   52 
   53 /*
   54  * forward definition of vm_amap structure.  only amap
   55  * implementation-specific code should directly access the fields of
   56  * this structure.  
   57  */
   58 
   59 struct vm_amap;
   60 
   61 /*
   62  * prototypes for the amap interface 
   63  */
   64 
   65                                         /* ensure amap can store anon */
   66 void            amap_populate(struct vm_aref *, vaddr_t);
   67                                         /* add an anon to an amap */
   68 int             amap_add(struct vm_aref *, vaddr_t, struct vm_anon *,
   69                     boolean_t);
   70                                         /* allocate a new amap */
   71 struct vm_amap  *amap_alloc(vaddr_t, int, int);
   72                                         /* clear amap needs-copy flag */
   73 void            amap_copy(vm_map_t, vm_map_entry_t, int, boolean_t, vaddr_t,
   74                     vaddr_t);
   75                                         /* resolve all COW faults now */
   76 void            amap_cow_now(vm_map_t, vm_map_entry_t);
   77                                         /* free amap */
   78 void            amap_free(struct vm_amap *);
   79                                         /* init amap module (at boot time) */
   80 void            amap_init(void);
   81                                         /* lookup an anon @ offset in amap */
   82 struct vm_anon  *amap_lookup(struct vm_aref *, vaddr_t);
   83                                         /* lookup multiple anons */
   84 void            amap_lookups(struct vm_aref *, vaddr_t, struct vm_anon **, int);
   85                                         /* add a reference to an amap */
   86 void            amap_ref(struct vm_amap *, vaddr_t, vsize_t, int);
   87                                         /* split reference to amap into two */
   88 void            amap_splitref(struct vm_aref *, struct vm_aref *, vaddr_t);
   89                                         /* remove an anon from an amap */
   90 void            amap_unadd(struct vm_aref *, vaddr_t);
   91                                         /* drop reference to an amap */
   92 void            amap_unref(struct vm_amap *, vaddr_t, vsize_t, int);
   93                                         /* remove all anons from amap */
   94 void            amap_wipeout(struct vm_amap *);
   95 boolean_t       amap_swap_off(int, int);
   96 
   97 /*
   98  * amap flag values
   99  */
  100 
  101 #define AMAP_SHARED     0x1     /* amap is shared */
  102 #define AMAP_REFALL     0x2     /* amap_ref: reference entire amap */
  103 #define AMAP_SWAPOFF    0x4     /* amap_swap_off() is in progress */
  104 
  105 #endif /* _KERNEL */
  106 
  107 /**********************************************************************/
  108 
  109 /*
  110  * part 2: amap implementation-specific info
  111  */
  112 
  113 /*
  114  * we currently provide an array-based amap implementation.  in this
  115  * implementation we provide the option of tracking split references
  116  * so that we don't lose track of references during partial unmaps
  117  * ... this is enabled with the "UVM_AMAP_PPREF" define.
  118  */
  119 
  120 #define UVM_AMAP_PPREF          /* track partial references */
  121 
  122 /*
  123  * here is the definition of the vm_amap structure and helper structures for
  124  * this implementation.
  125  */
  126 
  127 struct vm_amap_chunk {
  128         TAILQ_ENTRY(vm_amap_chunk) ac_list;
  129         int ac_baseslot;
  130         uint16_t ac_usedmap;
  131         uint16_t ac_nslot;
  132         struct vm_anon *ac_anon[];
  133 };
  134 
  135 struct vm_amap {
  136         struct rwlock *am_lock; /* lock for all vm_amap flags */
  137         int am_ref;             /* reference count */
  138         int am_flags;           /* flags */
  139         int am_nslot;           /* # of slots currently in map */
  140         int am_nused;           /* # of slots currently in use */
  141 #ifdef UVM_AMAP_PPREF
  142         int *am_ppref;          /* per page reference count (if !NULL) */
  143 #endif
  144         LIST_ENTRY(vm_amap) am_list;
  145 
  146         union {
  147                 struct {
  148                         struct vm_amap_chunk **amn_buckets;
  149                         TAILQ_HEAD(, vm_amap_chunk) amn_chunks;
  150                         int amn_nbuckets; /* # of buckets */
  151                         int amn_ncused; /* # of chunkers currently in use */
  152                         int amn_hashshift; /* shift count to hash slot to bucket */
  153                 } ami_normal;
  154 
  155                 /*
  156                  * MUST be last element in vm_amap because it contains a
  157                  * variably sized array element.
  158                  */
  159                 struct vm_amap_chunk ami_small;
  160         } am_impl;
  161 
  162 #define am_buckets      am_impl.ami_normal.amn_buckets
  163 #define am_chunks       am_impl.ami_normal.amn_chunks
  164 #define am_nbuckets     am_impl.ami_normal.amn_nbuckets
  165 #define am_ncused       am_impl.ami_normal.amn_ncused
  166 #define am_hashshift    am_impl.ami_normal.amn_hashshift
  167 
  168 #define am_small        am_impl.ami_small
  169 };
  170 
  171 /*
  172  * The entries in an amap are called slots. For example an amap that
  173  * covers four pages is said to have four slots.
  174  *
  175  * The slots of an amap are clustered into chunks of UVM_AMAP_CHUNK
  176  * slots each. The data structure of a chunk is vm_amap_chunk.
  177  * Every chunk contains an array of pointers to vm_anon, and a bitmap
  178  * is used to represent which of the slots are in use.
  179  *
  180  * Small amaps of up to UVM_AMAP_CHUNK slots have the chunk directly
  181  * embedded in the amap structure.
  182  *
  183  * amaps with more slots are normal amaps and organize chunks in a hash
  184  * table. The hash table is organized as an array of buckets.
  185  * All chunks of the amap are additionally stored in a linked list.
  186  * Chunks that belong to the same hash bucket are stored in the list
  187  * consecutively. When all slots in a chunk are unused, the chunk is freed.
  188  *
  189  * For large amaps, the bucket array can grow large. See the description
  190  * below how large bucket arrays are avoided.
  191  */
  192 
  193 /*
  194  * defines for handling of large sparce amaps:
  195  * 
  196  * one of the problems of array-based amaps is that if you allocate a
  197  * large sparcely-used area of virtual memory you end up allocating
  198  * large arrays that, for the most part, don't get used.  this is a
  199  * problem for BSD in that the kernel likes to make these types of
  200  * allocations to "reserve" memory for possible future use.
  201  *
  202  * for example, the kernel allocates (reserves) a large chunk of user
  203  * VM for possible stack growth.  most of the time only a page or two
  204  * of this VM is actually used.  since the stack is anonymous memory
  205  * it makes sense for it to live in an amap, but if we allocated an
  206  * amap for the entire stack range we could end up wasting a large
  207  * amount of malloc'd KVM.
  208  * 
  209  * for example, on the i386 at boot time we allocate two amaps for the stack 
  210  * of /sbin/init: 
  211  *  1. a 7680 slot amap at protection PROT_NONE (reserve space for stack)
  212  *  2. a 512 slot amap at protection PROT_READ|PROT_WRITE (top of stack)
  213  *
  214  * most of the array allocated for the amaps for this is never used.  
  215  * the amap interface provides a way for us to avoid this problem by
  216  * allowing amap_copy() to break larger amaps up into smaller sized 
  217  * chunks (controlled by the "canchunk" option).   we use this feature
  218  * to reduce our memory usage with the BSD stack management.  if we
  219  * are asked to create an amap with more than UVM_AMAP_LARGE slots in it,
  220  * we attempt to break it up into a UVM_AMAP_CHUNK sized amap if the
  221  * "canchunk" flag is set.
  222  *
  223  * so, in the i386 example, the 7680 slot area is never referenced so
  224  * nothing gets allocated (amap_copy is never called because the protection
  225  * is zero).   the 512 slot area for the top of the stack is referenced.
  226  * the chunking code breaks it up into 16 slot chunks (hopefully a single
  227  * 16 slot chunk is enough to handle the whole stack).
  228  */
  229 
  230 #define UVM_AMAP_LARGE  256     /* # of slots in "large" amap */
  231 #define UVM_AMAP_CHUNK  16      /* # of slots to chunk large amaps in */
  232 
  233 #define UVM_AMAP_SMALL(amap)            ((amap)->am_nslot <= UVM_AMAP_CHUNK)
  234 #define UVM_AMAP_SLOTIDX(slot)          ((slot) % UVM_AMAP_CHUNK)
  235 #define UVM_AMAP_BUCKET(amap, slot)                             \
  236         (((slot) / UVM_AMAP_CHUNK) >> (amap)->am_hashshift)
  237 
  238 #ifdef _KERNEL
  239 
  240 /*
  241  * macros
  242  */
  243 
  244 /* AMAP_B2SLOT: convert byte offset to slot */
  245 #define AMAP_B2SLOT(S,B) {                                              \
  246         KASSERT(((B) & (PAGE_SIZE - 1)) == 0);                          \
  247         (S) = (B) >> PAGE_SHIFT;                                        \
  248 }
  249 
  250 #define AMAP_CHUNK_FOREACH(chunk, amap)                                 \
  251         for (chunk = (UVM_AMAP_SMALL(amap) ?                            \
  252             &(amap)->am_small : TAILQ_FIRST(&(amap)->am_chunks));       \
  253             (chunk) != NULL; (chunk) = TAILQ_NEXT(chunk, ac_list))
  254 
  255 #define AMAP_BASE_SLOT(slot)                                            \
  256         (((slot) / UVM_AMAP_CHUNK) * UVM_AMAP_CHUNK)
  257 
  258 /*
  259  * flags macros
  260  */
  261 
  262 #define amap_flags(AMAP)        ((AMAP)->am_flags)
  263 #define amap_refs(AMAP)         ((AMAP)->am_ref)
  264 
  265 #define amap_lock(AMAP)         rw_enter_write((AMAP)->am_lock)
  266 #define amap_unlock(AMAP)       rw_exit_write((AMAP)->am_lock)
  267 
  268 #endif /* _KERNEL */
  269 
  270 #endif /* _UVM_UVM_AMAP_H_ */

Cache object: 8c18d4e95b5a838f7e5239c0c9351ca5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.