The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/uma_int.h

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
    5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice unmodified, this list of conditions, and the following
   13  *    disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   28  *
   29  * $FreeBSD: head/sys/vm/uma_int.h 344042 2019-02-12 04:33:05Z pfg $
   30  *
   31  */
   32 
   33 #include <sys/counter.h>
   34 #include <sys/_bitset.h>
   35 #include <sys/_domainset.h>
   36 #include <sys/_task.h>
   37 
   38 /* 
   39  * This file includes definitions, structures, prototypes, and inlines that
   40  * should not be used outside of the actual implementation of UMA.
   41  */
   42 
   43 /* 
   44  * The brief summary;  Zones describe unique allocation types.  Zones are
   45  * organized into per-CPU caches which are filled by buckets.  Buckets are
   46  * organized according to memory domains.  Buckets are filled from kegs which
   47  * are also organized according to memory domains.  Kegs describe a unique
   48  * allocation type, backend memory provider, and layout.  Kegs are associated
   49  * with one or more zones and zones reference one or more kegs.  Kegs provide
   50  * slabs which are virtually contiguous collections of pages.  Each slab is
   51  * broken down int one or more items that will satisfy an individual allocation.
   52  *
   53  * Allocation is satisfied in the following order:
   54  * 1) Per-CPU cache
   55  * 2) Per-domain cache of buckets
   56  * 3) Slab from any of N kegs
   57  * 4) Backend page provider
   58  *
   59  * More detail on individual objects is contained below:
   60  *
   61  * Kegs contain lists of slabs which are stored in either the full bin, empty
   62  * bin, or partially allocated bin, to reduce fragmentation.  They also contain
   63  * the user supplied value for size, which is adjusted for alignment purposes
   64  * and rsize is the result of that.  The Keg also stores information for
   65  * managing a hash of page addresses that maps pages to uma_slab_t structures
   66  * for pages that don't have embedded uma_slab_t's.
   67  *
   68  * Keg slab lists are organized by memory domain to support NUMA allocation
   69  * policies.  By default allocations are spread across domains to reduce the
   70  * potential for hotspots.  Special keg creation flags may be specified to
   71  * prefer location allocation.  However there is no strict enforcement as frees
   72  * may happen on any CPU and these are returned to the CPU-local cache
   73  * regardless of the originating domain.
   74  *  
   75  * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
   76  * be allocated off the page from a special slab zone.  The free list within a
   77  * slab is managed with a bitmask.  For item sizes that would yield more than
   78  * 10% memory waste we potentially allocate a separate uma_slab_t if this will
   79  * improve the number of items per slab that will fit.  
   80  *
   81  * The only really gross cases, with regards to memory waste, are for those
   82  * items that are just over half the page size.   You can get nearly 50% waste,
   83  * so you fall back to the memory footprint of the power of two allocator. I
   84  * have looked at memory allocation sizes on many of the machines available to
   85  * me, and there does not seem to be an abundance of allocations at this range
   86  * so at this time it may not make sense to optimize for it.  This can, of 
   87  * course, be solved with dynamic slab sizes.
   88  *
   89  * Kegs may serve multiple Zones but by far most of the time they only serve
   90  * one.  When a Zone is created, a Keg is allocated and setup for it.  While
   91  * the backing Keg stores slabs, the Zone caches Buckets of items allocated
   92  * from the slabs.  Each Zone is equipped with an init/fini and ctor/dtor
   93  * pair, as well as with its own set of small per-CPU caches, layered above
   94  * the Zone's general Bucket cache.
   95  *
   96  * The PCPU caches are protected by critical sections, and may be accessed
   97  * safely only from their associated CPU, while the Zones backed by the same
   98  * Keg all share a common Keg lock (to coalesce contention on the backing
   99  * slabs).  The backing Keg typically only serves one Zone but in the case of
  100  * multiple Zones, one of the Zones is considered the Master Zone and all
  101  * Zone-related stats from the Keg are done in the Master Zone.  For an
  102  * example of a Multi-Zone setup, refer to the Mbuf allocation code.
  103  */
  104 
  105 /*
  106  *      This is the representation for normal (Non OFFPAGE slab)
  107  *
  108  *      i == item
  109  *      s == slab pointer
  110  *
  111  *      <----------------  Page (UMA_SLAB_SIZE) ------------------>
  112  *      ___________________________________________________________
  113  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   ___________ |
  114  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
  115  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| 
  116  *     |___________________________________________________________|
  117  *
  118  *
  119  *      This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
  120  *
  121  *      ___________________________________________________________
  122  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   |
  123  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i|  |
  124  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_|  |
  125  *     |___________________________________________________________|
  126  *       ___________    ^
  127  *      |slab header|   |
  128  *      |___________|---*
  129  *
  130  */
  131 
  132 #ifndef VM_UMA_INT_H
  133 #define VM_UMA_INT_H
  134 
  135 #define UMA_SLAB_SIZE   PAGE_SIZE       /* How big are our slabs? */
  136 #define UMA_SLAB_MASK   (PAGE_SIZE - 1) /* Mask to get back to the page */
  137 #define UMA_SLAB_SHIFT  PAGE_SHIFT      /* Number of bits PAGE_MASK */
  138 
  139 /* Max waste percentage before going to off page slab management */
  140 #define UMA_MAX_WASTE   10
  141 
  142 /*
  143  * Actual size of uma_slab when it is placed at an end of a page
  144  * with pointer sized alignment requirement.
  145  */
  146 #define SIZEOF_UMA_SLAB ((sizeof(struct uma_slab) & UMA_ALIGN_PTR) ?      \
  147                             (sizeof(struct uma_slab) & ~UMA_ALIGN_PTR) +  \
  148                             (UMA_ALIGN_PTR + 1) : sizeof(struct uma_slab))
  149 
  150 /*
  151  * Size of memory in a not offpage single page slab available for actual items.
  152  */
  153 #define UMA_SLAB_SPACE  (PAGE_SIZE - SIZEOF_UMA_SLAB)
  154 
  155 /*
  156  * I doubt there will be many cases where this is exceeded. This is the initial
  157  * size of the hash table for uma_slabs that are managed off page. This hash
  158  * does expand by powers of two.  Currently it doesn't get smaller.
  159  */
  160 #define UMA_HASH_SIZE_INIT      32              
  161 
  162 /* 
  163  * I should investigate other hashing algorithms.  This should yield a low
  164  * number of collisions if the pages are relatively contiguous.
  165  */
  166 
  167 #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
  168 
  169 #define UMA_HASH_INSERT(h, s, mem)                                      \
  170                 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h),      \
  171                     (mem))], (s), us_hlink)
  172 #define UMA_HASH_REMOVE(h, s, mem)                                      \
  173                 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h),           \
  174                     (mem))], (s), uma_slab, us_hlink)
  175 
  176 /* Hash table for freed address -> slab translation */
  177 
  178 SLIST_HEAD(slabhead, uma_slab);
  179 
  180 struct uma_hash {
  181         struct slabhead *uh_slab_hash;  /* Hash table for slabs */
  182         u_int           uh_hashsize;    /* Current size of the hash table */
  183         u_int           uh_hashmask;    /* Mask used during hashing */
  184 };
  185 
  186 /*
  187  * align field or structure to cache line
  188  */
  189 #if defined(__amd64__) || defined(__powerpc64__)
  190 #define UMA_ALIGN       __aligned(128)
  191 #else
  192 #define UMA_ALIGN
  193 #endif
  194 
  195 /*
  196  * Structures for per cpu queues.
  197  */
  198 
  199 struct uma_bucket {
  200         LIST_ENTRY(uma_bucket)  ub_link;        /* Link into the zone */
  201         int16_t ub_cnt;                         /* Count of items in bucket. */
  202         int16_t ub_entries;                     /* Max items. */
  203         void    *ub_bucket[];                   /* actual allocation storage */
  204 };
  205 
  206 typedef struct uma_bucket * uma_bucket_t;
  207 
  208 struct uma_cache {
  209         uma_bucket_t    uc_freebucket;  /* Bucket we're freeing to */
  210         uma_bucket_t    uc_allocbucket; /* Bucket to allocate from */
  211         uint64_t        uc_allocs;      /* Count of allocations */
  212         uint64_t        uc_frees;       /* Count of frees */
  213 } UMA_ALIGN;
  214 
  215 typedef struct uma_cache * uma_cache_t;
  216 
  217 /*
  218  * Per-domain memory list.  Embedded in the kegs.
  219  */
  220 struct uma_domain {
  221         LIST_HEAD(,uma_slab)    ud_part_slab;   /* partially allocated slabs */
  222         LIST_HEAD(,uma_slab)    ud_free_slab;   /* empty slab list */
  223         LIST_HEAD(,uma_slab)    ud_full_slab;   /* full slabs */
  224 };
  225 
  226 typedef struct uma_domain * uma_domain_t;
  227 
  228 /*
  229  * Keg management structure
  230  *
  231  * TODO: Optimize for cache line size
  232  *
  233  */
  234 struct uma_keg {
  235         struct mtx      uk_lock;        /* Lock for the keg must be first.
  236                                          * See shared uz_keg/uz_lockptr
  237                                          * member of struct uma_zone. */
  238         struct uma_hash uk_hash;
  239         LIST_HEAD(,uma_zone)    uk_zones;       /* Keg's zones */
  240 
  241         struct domainset_ref uk_dr;     /* Domain selection policy. */
  242         uint32_t        uk_align;       /* Alignment mask */
  243         uint32_t        uk_pages;       /* Total page count */
  244         uint32_t        uk_free;        /* Count of items free in slabs */
  245         uint32_t        uk_reserve;     /* Number of reserved items. */
  246         uint32_t        uk_size;        /* Requested size of each item */
  247         uint32_t        uk_rsize;       /* Real size of each item */
  248 
  249         uma_init        uk_init;        /* Keg's init routine */
  250         uma_fini        uk_fini;        /* Keg's fini routine */
  251         uma_alloc       uk_allocf;      /* Allocation function */
  252         uma_free        uk_freef;       /* Free routine */
  253 
  254         u_long          uk_offset;      /* Next free offset from base KVA */
  255         vm_offset_t     uk_kva;         /* Zone base KVA */
  256         uma_zone_t      uk_slabzone;    /* Slab zone backing us, if OFFPAGE */
  257 
  258         uint32_t        uk_pgoff;       /* Offset to uma_slab struct */
  259         uint16_t        uk_ppera;       /* pages per allocation from backend */
  260         uint16_t        uk_ipers;       /* Items per slab */
  261         uint32_t        uk_flags;       /* Internal flags */
  262 
  263         /* Least used fields go to the last cache line. */
  264         const char      *uk_name;               /* Name of creating zone. */
  265         LIST_ENTRY(uma_keg)     uk_link;        /* List of all kegs */
  266 
  267         /* Must be last, variable sized. */
  268         struct uma_domain       uk_domain[];    /* Keg's slab lists. */
  269 };
  270 typedef struct uma_keg  * uma_keg_t;
  271 
  272 /*
  273  * Free bits per-slab.
  274  */
  275 #define SLAB_SETSIZE    (PAGE_SIZE / UMA_SMALLEST_UNIT)
  276 BITSET_DEFINE(slabbits, SLAB_SETSIZE);
  277 
  278 /*
  279  * The slab structure manages a single contiguous allocation from backing
  280  * store and subdivides it into individually allocatable items.
  281  */
  282 struct uma_slab {
  283         uma_keg_t       us_keg;                 /* Keg we live in */
  284         union {
  285                 LIST_ENTRY(uma_slab)    _us_link;       /* slabs in zone */
  286                 unsigned long   _us_size;       /* Size of allocation */
  287         } us_type;
  288         SLIST_ENTRY(uma_slab)   us_hlink;       /* Link for hash table */
  289         uint8_t         *us_data;               /* First item */
  290         struct slabbits us_free;                /* Free bitmask. */
  291 #ifdef INVARIANTS
  292         struct slabbits us_debugfree;           /* Debug bitmask. */
  293 #endif
  294         uint16_t        us_freecount;           /* How many are free? */
  295         uint8_t         us_flags;               /* Page flags see uma.h */
  296         uint8_t         us_domain;              /* Backing NUMA domain. */
  297 };
  298 
  299 #define us_link us_type._us_link
  300 #define us_size us_type._us_size
  301 
  302 #if MAXMEMDOM >= 255
  303 #error "Slab domain type insufficient"
  304 #endif
  305 
  306 typedef struct uma_slab * uma_slab_t;
  307 
  308 struct uma_zone_domain {
  309         LIST_HEAD(,uma_bucket)  uzd_buckets;    /* full buckets */
  310         long            uzd_nitems;     /* total item count */
  311         long            uzd_imax;       /* maximum item count this period */
  312         long            uzd_imin;       /* minimum item count this period */
  313         long            uzd_wss;        /* working set size estimate */
  314 };
  315 
  316 typedef struct uma_zone_domain * uma_zone_domain_t;
  317 
  318 /*
  319  * Zone management structure 
  320  *
  321  * TODO: Optimize for cache line size
  322  *
  323  */
  324 struct uma_zone {
  325         /* Offset 0, used in alloc/free fast/medium fast path and const. */
  326         union {
  327                 uma_keg_t       uz_keg;         /* This zone's keg */
  328                 struct mtx      *uz_lockptr;    /* To keg or to self */
  329         };
  330         struct uma_zone_domain  *uz_domain;     /* per-domain buckets */
  331         uint32_t        uz_flags;       /* Flags inherited from kegs */
  332         uint32_t        uz_size;        /* Size inherited from kegs */
  333         uma_ctor        uz_ctor;        /* Constructor for each allocation */
  334         uma_dtor        uz_dtor;        /* Destructor */
  335         uint64_t        uz_items;       /* Total items count */
  336         uint64_t        uz_max_items;   /* Maximum number of items to alloc */
  337         uint32_t        uz_sleepers;    /* Number of sleepers on memory */
  338         uint16_t        uz_count;       /* Amount of items in full bucket */
  339         uint16_t        uz_count_max;   /* Maximum amount of items there */
  340 
  341         /* Offset 64, used in bucket replenish. */
  342         uma_import      uz_import;      /* Import new memory to cache. */
  343         uma_release     uz_release;     /* Release memory from cache. */
  344         void            *uz_arg;        /* Import/release argument. */
  345         uma_init        uz_init;        /* Initializer for each item */
  346         uma_fini        uz_fini;        /* Finalizer for each item. */
  347         void            *uz_spare;
  348         uint64_t        uz_bkt_count;    /* Items in bucket cache */
  349         uint64_t        uz_bkt_max;     /* Maximum bucket cache size */
  350 
  351         /* Offset 128 Rare. */
  352         /*
  353          * The lock is placed here to avoid adjacent line prefetcher
  354          * in fast paths and to take up space near infrequently accessed
  355          * members to reduce alignment overhead.
  356          */
  357         struct mtx      uz_lock;        /* Lock for the zone */
  358         LIST_ENTRY(uma_zone) uz_link;   /* List of all zones in keg */
  359         const char      *uz_name;       /* Text name of the zone */
  360         /* The next two fields are used to print a rate-limited warnings. */
  361         const char      *uz_warning;    /* Warning to print on failure */
  362         struct timeval  uz_ratecheck;   /* Warnings rate-limiting */
  363         struct task     uz_maxaction;   /* Task to run when at limit */
  364         uint16_t        uz_count_min;   /* Minimal amount of items in bucket */
  365 
  366         /* Offset 256, stats. */
  367         counter_u64_t   uz_allocs;      /* Total number of allocations */
  368         counter_u64_t   uz_frees;       /* Total number of frees */
  369         counter_u64_t   uz_fails;       /* Total number of alloc failures */
  370         uint64_t        uz_sleeps;      /* Total number of alloc sleeps */
  371 
  372         /*
  373          * This HAS to be the last item because we adjust the zone size
  374          * based on NCPU and then allocate the space for the zones.
  375          */
  376         struct uma_cache        uz_cpu[]; /* Per cpu caches */
  377 
  378         /* uz_domain follows here. */
  379 };
  380 
  381 /*
  382  * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
  383  */
  384 #define UMA_ZFLAG_CACHE         0x04000000      /* uma_zcache_create()d it */
  385 #define UMA_ZFLAG_DRAINING      0x08000000      /* Running zone_drain. */
  386 #define UMA_ZFLAG_BUCKET        0x10000000      /* Bucket zone. */
  387 #define UMA_ZFLAG_INTERNAL      0x20000000      /* No offpage no PCPU. */
  388 #define UMA_ZFLAG_CACHEONLY     0x80000000      /* Don't ask VM for buckets. */
  389 
  390 #define UMA_ZFLAG_INHERIT                                               \
  391     (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET)
  392 
  393 #undef UMA_ALIGN
  394 
  395 #ifdef _KERNEL
  396 /* Internal prototypes */
  397 static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
  398 void *uma_large_malloc(vm_size_t size, int wait);
  399 void *uma_large_malloc_domain(vm_size_t size, int domain, int wait);
  400 void uma_large_free(uma_slab_t slab);
  401 
  402 /* Lock Macros */
  403 
  404 #define KEG_LOCK_INIT(k, lc)                                    \
  405         do {                                                    \
  406                 if ((lc))                                       \
  407                         mtx_init(&(k)->uk_lock, (k)->uk_name,   \
  408                             (k)->uk_name, MTX_DEF | MTX_DUPOK); \
  409                 else                                            \
  410                         mtx_init(&(k)->uk_lock, (k)->uk_name,   \
  411                             "UMA zone", MTX_DEF | MTX_DUPOK);   \
  412         } while (0)
  413 
  414 #define KEG_LOCK_FINI(k)        mtx_destroy(&(k)->uk_lock)
  415 #define KEG_LOCK(k)     mtx_lock(&(k)->uk_lock)
  416 #define KEG_UNLOCK(k)   mtx_unlock(&(k)->uk_lock)
  417 #define KEG_LOCK_ASSERT(k)      mtx_assert(&(k)->uk_lock, MA_OWNED)
  418 
  419 #define KEG_GET(zone, keg) do {                                 \
  420         (keg) = (zone)->uz_keg;                                 \
  421         KASSERT((void *)(keg) != (void *)&(zone)->uz_lock,      \
  422             ("%s: Invalid zone %p type", __func__, (zone)));    \
  423         } while (0)
  424 
  425 #define ZONE_LOCK_INIT(z, lc)                                   \
  426         do {                                                    \
  427                 if ((lc))                                       \
  428                         mtx_init(&(z)->uz_lock, (z)->uz_name,   \
  429                             (z)->uz_name, MTX_DEF | MTX_DUPOK); \
  430                 else                                            \
  431                         mtx_init(&(z)->uz_lock, (z)->uz_name,   \
  432                             "UMA zone", MTX_DEF | MTX_DUPOK);   \
  433         } while (0)
  434 
  435 #define ZONE_LOCK(z)    mtx_lock((z)->uz_lockptr)
  436 #define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lockptr)
  437 #define ZONE_UNLOCK(z)  mtx_unlock((z)->uz_lockptr)
  438 #define ZONE_LOCK_FINI(z)       mtx_destroy(&(z)->uz_lock)
  439 #define ZONE_LOCK_ASSERT(z)     mtx_assert((z)->uz_lockptr, MA_OWNED)
  440 
  441 /*
  442  * Find a slab within a hash table.  This is used for OFFPAGE zones to lookup
  443  * the slab structure.
  444  *
  445  * Arguments:
  446  *      hash  The hash table to search.
  447  *      data  The base page of the item.
  448  *
  449  * Returns:
  450  *      A pointer to a slab if successful, else NULL.
  451  */
  452 static __inline uma_slab_t
  453 hash_sfind(struct uma_hash *hash, uint8_t *data)
  454 {
  455         uma_slab_t slab;
  456         u_int hval;
  457 
  458         hval = UMA_HASH(hash, data);
  459 
  460         SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
  461                 if ((uint8_t *)slab->us_data == data)
  462                         return (slab);
  463         }
  464         return (NULL);
  465 }
  466 
  467 static __inline uma_slab_t
  468 vtoslab(vm_offset_t va)
  469 {
  470         vm_page_t p;
  471 
  472         p = PHYS_TO_VM_PAGE(pmap_kextract(va));
  473         return ((uma_slab_t)p->plinks.s.pv);
  474 }
  475 
  476 static __inline void
  477 vsetslab(vm_offset_t va, uma_slab_t slab)
  478 {
  479         vm_page_t p;
  480 
  481         p = PHYS_TO_VM_PAGE(pmap_kextract(va));
  482         p->plinks.s.pv = slab;
  483 }
  484 
  485 /*
  486  * The following two functions may be defined by architecture specific code
  487  * if they can provide more efficient allocation functions.  This is useful
  488  * for using direct mapped addresses.
  489  */
  490 void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
  491     uint8_t *pflag, int wait);
  492 void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
  493 
  494 /* Set a global soft limit on UMA managed memory. */
  495 void uma_set_limit(unsigned long limit);
  496 #endif /* _KERNEL */
  497 
  498 #endif /* VM_UMA_INT_H */

Cache object: 503caf33439be36b81bb60b7127ec75c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.