The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/uma_core.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
    3  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
    4  * Copyright (c) 2004-2006 Robert N. M. Watson
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice unmodified, this list of conditions, and the following
   12  *    disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 /*
   30  * uma_core.c  Implementation of the Universal Memory allocator
   31  *
   32  * This allocator is intended to replace the multitude of similar object caches
   33  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
   34  * efficient.  A primary design goal is to return unused memory to the rest of
   35  * the system.  This will make the system as a whole more flexible due to the
   36  * ability to move memory to subsystems which most need it instead of leaving
   37  * pools of reserved memory unused.
   38  *
   39  * The basic ideas stem from similar slab/zone based allocators whose algorithms
   40  * are well known.
   41  *
   42  */
   43 
   44 /*
   45  * TODO:
   46  *      - Improve memory usage for large allocations
   47  *      - Investigate cache size adjustments
   48  */
   49 
   50 #include <sys/cdefs.h>
   51 __FBSDID("$FreeBSD$");
   52 
   53 /* I should really use ktr.. */
   54 /*
   55 #define UMA_DEBUG 1
   56 #define UMA_DEBUG_ALLOC 1
   57 #define UMA_DEBUG_ALLOC_1 1
   58 */
   59 
   60 #include "opt_ddb.h"
   61 #include "opt_param.h"
   62 #include "opt_vm.h"
   63 
   64 #include <sys/param.h>
   65 #include <sys/systm.h>
   66 #include <sys/bitset.h>
   67 #include <sys/eventhandler.h>
   68 #include <sys/kernel.h>
   69 #include <sys/types.h>
   70 #include <sys/queue.h>
   71 #include <sys/malloc.h>
   72 #include <sys/ktr.h>
   73 #include <sys/lock.h>
   74 #include <sys/sysctl.h>
   75 #include <sys/mutex.h>
   76 #include <sys/proc.h>
   77 #include <sys/random.h>
   78 #include <sys/rwlock.h>
   79 #include <sys/sbuf.h>
   80 #include <sys/sched.h>
   81 #include <sys/smp.h>
   82 #include <sys/taskqueue.h>
   83 #include <sys/vmmeter.h>
   84 
   85 #include <vm/vm.h>
   86 #include <vm/vm_object.h>
   87 #include <vm/vm_page.h>
   88 #include <vm/vm_pageout.h>
   89 #include <vm/vm_param.h>
   90 #include <vm/vm_map.h>
   91 #include <vm/vm_kern.h>
   92 #include <vm/vm_extern.h>
   93 #include <vm/uma.h>
   94 #include <vm/uma_int.h>
   95 #include <vm/uma_dbg.h>
   96 
   97 #include <ddb/ddb.h>
   98 
   99 #ifdef DEBUG_MEMGUARD
  100 #include <vm/memguard.h>
  101 #endif
  102 
  103 /*
  104  * This is the zone and keg from which all zones are spawned.  The idea is that
  105  * even the zone & keg heads are allocated from the allocator, so we use the
  106  * bss section to bootstrap us.
  107  */
  108 static struct uma_keg masterkeg;
  109 static struct uma_zone masterzone_k;
  110 static struct uma_zone masterzone_z;
  111 static uma_zone_t kegs = &masterzone_k;
  112 static uma_zone_t zones = &masterzone_z;
  113 
  114 /* This is the zone from which all of uma_slab_t's are allocated. */
  115 static uma_zone_t slabzone;
  116 
  117 /*
  118  * The initial hash tables come out of this zone so they can be allocated
  119  * prior to malloc coming up.
  120  */
  121 static uma_zone_t hashzone;
  122 
  123 /* The boot-time adjusted value for cache line alignment. */
  124 int uma_align_cache = 64 - 1;
  125 
  126 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
  127 
  128 /*
  129  * Are we allowed to allocate buckets?
  130  */
  131 static int bucketdisable = 1;
  132 
  133 /* Linked list of all kegs in the system */
  134 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
  135 
  136 /* Linked list of all cache-only zones in the system */
  137 static LIST_HEAD(,uma_zone) uma_cachezones =
  138     LIST_HEAD_INITIALIZER(uma_cachezones);
  139 
  140 /* This RW lock protects the keg list */
  141 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
  142 
  143 /* Linked list of boot time pages */
  144 static LIST_HEAD(,uma_slab) uma_boot_pages =
  145     LIST_HEAD_INITIALIZER(uma_boot_pages);
  146 
  147 /* This mutex protects the boot time pages list */
  148 static struct mtx_padalign uma_boot_pages_mtx;
  149 
  150 static struct sx uma_drain_lock;
  151 
  152 /* Is the VM done starting up? */
  153 static int booted = 0;
  154 #define UMA_STARTUP     1
  155 #define UMA_STARTUP2    2
  156 #define UMA_SHUTDOWN    3
  157 
  158 /*
  159  * This is the handle used to schedule events that need to happen
  160  * outside of the allocation fast path.
  161  */
  162 static struct callout uma_callout;
  163 #define UMA_TIMEOUT     20              /* Seconds for callout interval. */
  164 
  165 /*
  166  * This structure is passed as the zone ctor arg so that I don't have to create
  167  * a special allocation function just for zones.
  168  */
  169 struct uma_zctor_args {
  170         const char *name;
  171         size_t size;
  172         uma_ctor ctor;
  173         uma_dtor dtor;
  174         uma_init uminit;
  175         uma_fini fini;
  176         uma_import import;
  177         uma_release release;
  178         void *arg;
  179         uma_keg_t keg;
  180         int align;
  181         uint32_t flags;
  182 };
  183 
  184 struct uma_kctor_args {
  185         uma_zone_t zone;
  186         size_t size;
  187         uma_init uminit;
  188         uma_fini fini;
  189         int align;
  190         uint32_t flags;
  191 };
  192 
  193 struct uma_bucket_zone {
  194         uma_zone_t      ubz_zone;
  195         char            *ubz_name;
  196         int             ubz_entries;    /* Number of items it can hold. */
  197         int             ubz_maxsize;    /* Maximum allocation size per-item. */
  198 };
  199 
  200 /*
  201  * Compute the actual number of bucket entries to pack them in power
  202  * of two sizes for more efficient space utilization.
  203  */
  204 #define BUCKET_SIZE(n)                                          \
  205     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
  206 
  207 #define BUCKET_MAX      BUCKET_SIZE(256)
  208 
  209 struct uma_bucket_zone bucket_zones[] = {
  210         { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
  211         { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
  212         { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
  213         { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
  214         { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
  215         { NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
  216         { NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
  217         { NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
  218         { NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
  219         { NULL, NULL, 0}
  220 };
  221 
  222 /*
  223  * Flags and enumerations to be passed to internal functions.
  224  */
  225 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
  226 
  227 /* Prototypes.. */
  228 
  229 static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
  230 static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
  231 static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
  232 static void page_free(void *, vm_size_t, uint8_t);
  233 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
  234 static void cache_drain(uma_zone_t);
  235 static void bucket_drain(uma_zone_t, uma_bucket_t);
  236 static void bucket_cache_drain(uma_zone_t zone);
  237 static int keg_ctor(void *, int, void *, int);
  238 static void keg_dtor(void *, int, void *);
  239 static int zone_ctor(void *, int, void *, int);
  240 static void zone_dtor(void *, int, void *);
  241 static int zero_init(void *, int, int);
  242 static void keg_small_init(uma_keg_t keg);
  243 static void keg_large_init(uma_keg_t keg);
  244 static void zone_foreach(void (*zfunc)(uma_zone_t));
  245 static void zone_timeout(uma_zone_t zone);
  246 static int hash_alloc(struct uma_hash *, u_int);
  247 static int hash_expand(struct uma_hash *, struct uma_hash *);
  248 static void hash_free(struct uma_hash *hash);
  249 static void uma_timeout(void *);
  250 static void uma_startup3(void);
  251 static void uma_shutdown(void);
  252 static void *zone_alloc_item(uma_zone_t, void *, int);
  253 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
  254 static void bucket_enable(void);
  255 static void bucket_init(void);
  256 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
  257 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
  258 static void bucket_zone_drain(void);
  259 static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
  260 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
  261 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
  262 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
  263 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
  264 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
  265     uma_fini fini, int align, uint32_t flags);
  266 static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
  267 static void zone_release(uma_zone_t zone, void **bucket, int cnt);
  268 static void uma_zero_item(void *item, uma_zone_t zone);
  269 
  270 void uma_print_zone(uma_zone_t);
  271 void uma_print_stats(void);
  272 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
  273 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
  274 
  275 #ifdef INVARIANTS
  276 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
  277 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
  278 #endif
  279 
  280 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
  281 
  282 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
  283     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
  284 
  285 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
  286     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
  287 
  288 static int zone_warnings = 1;
  289 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
  290     "Warn when UMA zones becomes full");
  291 
  292 /*
  293  * This routine checks to see whether or not it's safe to enable buckets.
  294  */
  295 static void
  296 bucket_enable(void)
  297 {
  298         bucketdisable = vm_page_count_min();
  299 }
  300 
  301 /*
  302  * Initialize bucket_zones, the array of zones of buckets of various sizes.
  303  *
  304  * For each zone, calculate the memory required for each bucket, consisting
  305  * of the header and an array of pointers.
  306  */
  307 static void
  308 bucket_init(void)
  309 {
  310         struct uma_bucket_zone *ubz;
  311         int size;
  312 
  313         for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
  314                 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
  315                 size += sizeof(void *) * ubz->ubz_entries;
  316                 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
  317                     NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
  318                     UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
  319         }
  320 }
  321 
  322 /*
  323  * Given a desired number of entries for a bucket, return the zone from which
  324  * to allocate the bucket.
  325  */
  326 static struct uma_bucket_zone *
  327 bucket_zone_lookup(int entries)
  328 {
  329         struct uma_bucket_zone *ubz;
  330 
  331         for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
  332                 if (ubz->ubz_entries >= entries)
  333                         return (ubz);
  334         ubz--;
  335         return (ubz);
  336 }
  337 
  338 static int
  339 bucket_select(int size)
  340 {
  341         struct uma_bucket_zone *ubz;
  342 
  343         ubz = &bucket_zones[0];
  344         if (size > ubz->ubz_maxsize)
  345                 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
  346 
  347         for (; ubz->ubz_entries != 0; ubz++)
  348                 if (ubz->ubz_maxsize < size)
  349                         break;
  350         ubz--;
  351         return (ubz->ubz_entries);
  352 }
  353 
  354 static uma_bucket_t
  355 bucket_alloc(uma_zone_t zone, void *udata, int flags)
  356 {
  357         struct uma_bucket_zone *ubz;
  358         uma_bucket_t bucket;
  359 
  360         /*
  361          * This is to stop us from allocating per cpu buckets while we're
  362          * running out of vm.boot_pages.  Otherwise, we would exhaust the
  363          * boot pages.  This also prevents us from allocating buckets in
  364          * low memory situations.
  365          */
  366         if (bucketdisable)
  367                 return (NULL);
  368         /*
  369          * To limit bucket recursion we store the original zone flags
  370          * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
  371          * NOVM flag to persist even through deep recursions.  We also
  372          * store ZFLAG_BUCKET once we have recursed attempting to allocate
  373          * a bucket for a bucket zone so we do not allow infinite bucket
  374          * recursion.  This cookie will even persist to frees of unused
  375          * buckets via the allocation path or bucket allocations in the
  376          * free path.
  377          */
  378         if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
  379                 udata = (void *)(uintptr_t)zone->uz_flags;
  380         else {
  381                 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
  382                         return (NULL);
  383                 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
  384         }
  385         if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
  386                 flags |= M_NOVM;
  387         ubz = bucket_zone_lookup(zone->uz_count);
  388         if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
  389                 ubz++;
  390         bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
  391         if (bucket) {
  392 #ifdef INVARIANTS
  393                 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
  394 #endif
  395                 bucket->ub_cnt = 0;
  396                 bucket->ub_entries = ubz->ubz_entries;
  397         }
  398 
  399         return (bucket);
  400 }
  401 
  402 static void
  403 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
  404 {
  405         struct uma_bucket_zone *ubz;
  406 
  407         KASSERT(bucket->ub_cnt == 0,
  408             ("bucket_free: Freeing a non free bucket."));
  409         if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
  410                 udata = (void *)(uintptr_t)zone->uz_flags;
  411         ubz = bucket_zone_lookup(bucket->ub_entries);
  412         uma_zfree_arg(ubz->ubz_zone, bucket, udata);
  413 }
  414 
  415 static void
  416 bucket_zone_drain(void)
  417 {
  418         struct uma_bucket_zone *ubz;
  419 
  420         for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
  421                 zone_drain(ubz->ubz_zone);
  422 }
  423 
  424 static void
  425 zone_log_warning(uma_zone_t zone)
  426 {
  427         static const struct timeval warninterval = { 300, 0 };
  428 
  429         if (!zone_warnings || zone->uz_warning == NULL)
  430                 return;
  431 
  432         if (ratecheck(&zone->uz_ratecheck, &warninterval))
  433                 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
  434 }
  435 
  436 static inline void
  437 zone_maxaction(uma_zone_t zone)
  438 {
  439 
  440         if (zone->uz_maxaction.ta_func != NULL)
  441                 taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
  442 }
  443 
  444 static void
  445 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
  446 {
  447         uma_klink_t klink;
  448 
  449         LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
  450                 kegfn(klink->kl_keg);
  451 }
  452 
  453 /*
  454  * Routine called by timeout which is used to fire off some time interval
  455  * based calculations.  (stats, hash size, etc.)
  456  *
  457  * Arguments:
  458  *      arg   Unused
  459  *
  460  * Returns:
  461  *      Nothing
  462  */
  463 static void
  464 uma_timeout(void *unused)
  465 {
  466         bucket_enable();
  467         zone_foreach(zone_timeout);
  468 
  469         /* Reschedule this event */
  470         callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
  471 }
  472 
  473 /*
  474  * Routine to perform timeout driven calculations.  This expands the
  475  * hashes and does per cpu statistics aggregation.
  476  *
  477  *  Returns nothing.
  478  */
  479 static void
  480 keg_timeout(uma_keg_t keg)
  481 {
  482         u_int slabs;
  483 
  484         KEG_LOCK(keg);
  485         /*
  486          * Expand the keg hash table.
  487          *
  488          * This is done if the number of slabs is larger than the hash size.
  489          * What I'm trying to do here is completely reduce collisions.  This
  490          * may be a little aggressive.  Should I allow for two collisions max?
  491          */
  492         if (keg->uk_flags & UMA_ZONE_HASH &&
  493             (slabs = keg->uk_pages / keg->uk_ppera) >
  494              keg->uk_hash.uh_hashsize) {
  495                 struct uma_hash newhash;
  496                 struct uma_hash oldhash;
  497                 int ret;
  498 
  499                 /*
  500                  * This is so involved because allocating and freeing
  501                  * while the keg lock is held will lead to deadlock.
  502                  * I have to do everything in stages and check for
  503                  * races.
  504                  */
  505                 KEG_UNLOCK(keg);
  506                 ret = hash_alloc(&newhash, 1 << fls(slabs));
  507                 KEG_LOCK(keg);
  508                 if (ret) {
  509                         if (hash_expand(&keg->uk_hash, &newhash)) {
  510                                 oldhash = keg->uk_hash;
  511                                 keg->uk_hash = newhash;
  512                         } else
  513                                 oldhash = newhash;
  514 
  515                         KEG_UNLOCK(keg);
  516                         hash_free(&oldhash);
  517                         return;
  518                 }
  519         }
  520         KEG_UNLOCK(keg);
  521 }
  522 
  523 static void
  524 zone_timeout(uma_zone_t zone)
  525 {
  526 
  527         zone_foreach_keg(zone, &keg_timeout);
  528 }
  529 
  530 /*
  531  * Allocate and zero fill the next sized hash table from the appropriate
  532  * backing store.
  533  *
  534  * Arguments:
  535  *      hash  A new hash structure with the old hash size in uh_hashsize
  536  *
  537  * Returns:
  538  *      1 on success and 0 on failure.
  539  */
  540 static int
  541 hash_alloc(struct uma_hash *hash, u_int size)
  542 {
  543         size_t alloc;
  544 
  545         KASSERT(powerof2(size), ("hash size must be power of 2"));
  546         if (size > UMA_HASH_SIZE_INIT)  {
  547                 hash->uh_hashsize = size;
  548                 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
  549                 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
  550                     M_UMAHASH, M_NOWAIT);
  551         } else {
  552                 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
  553                 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
  554                     M_WAITOK);
  555                 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
  556         }
  557         if (hash->uh_slab_hash) {
  558                 bzero(hash->uh_slab_hash, alloc);
  559                 hash->uh_hashmask = hash->uh_hashsize - 1;
  560                 return (1);
  561         }
  562 
  563         return (0);
  564 }
  565 
  566 /*
  567  * Expands the hash table for HASH zones.  This is done from zone_timeout
  568  * to reduce collisions.  This must not be done in the regular allocation
  569  * path, otherwise, we can recurse on the vm while allocating pages.
  570  *
  571  * Arguments:
  572  *      oldhash  The hash you want to expand
  573  *      newhash  The hash structure for the new table
  574  *
  575  * Returns:
  576  *      Nothing
  577  *
  578  * Discussion:
  579  */
  580 static int
  581 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
  582 {
  583         uma_slab_t slab;
  584         u_int hval;
  585         u_int idx;
  586 
  587         if (!newhash->uh_slab_hash)
  588                 return (0);
  589 
  590         if (oldhash->uh_hashsize >= newhash->uh_hashsize)
  591                 return (0);
  592 
  593         /*
  594          * I need to investigate hash algorithms for resizing without a
  595          * full rehash.
  596          */
  597 
  598         for (idx = 0; idx < oldhash->uh_hashsize; idx++)
  599                 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
  600                         slab = SLIST_FIRST(&oldhash->uh_slab_hash[idx]);
  601                         SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[idx], us_hlink);
  602                         hval = UMA_HASH(newhash, slab->us_data);
  603                         SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
  604                             slab, us_hlink);
  605                 }
  606 
  607         return (1);
  608 }
  609 
  610 /*
  611  * Free the hash bucket to the appropriate backing store.
  612  *
  613  * Arguments:
  614  *      slab_hash  The hash bucket we're freeing
  615  *      hashsize   The number of entries in that hash bucket
  616  *
  617  * Returns:
  618  *      Nothing
  619  */
  620 static void
  621 hash_free(struct uma_hash *hash)
  622 {
  623         if (hash->uh_slab_hash == NULL)
  624                 return;
  625         if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
  626                 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
  627         else
  628                 free(hash->uh_slab_hash, M_UMAHASH);
  629 }
  630 
  631 /*
  632  * Frees all outstanding items in a bucket
  633  *
  634  * Arguments:
  635  *      zone   The zone to free to, must be unlocked.
  636  *      bucket The free/alloc bucket with items, cpu queue must be locked.
  637  *
  638  * Returns:
  639  *      Nothing
  640  */
  641 
  642 static void
  643 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
  644 {
  645         int i;
  646 
  647         if (bucket == NULL)
  648                 return;
  649 
  650         if (zone->uz_fini)
  651                 for (i = 0; i < bucket->ub_cnt; i++) 
  652                         zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
  653         zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
  654         bucket->ub_cnt = 0;
  655 }
  656 
  657 /*
  658  * Drains the per cpu caches for a zone.
  659  *
  660  * NOTE: This may only be called while the zone is being turn down, and not
  661  * during normal operation.  This is necessary in order that we do not have
  662  * to migrate CPUs to drain the per-CPU caches.
  663  *
  664  * Arguments:
  665  *      zone     The zone to drain, must be unlocked.
  666  *
  667  * Returns:
  668  *      Nothing
  669  */
  670 static void
  671 cache_drain(uma_zone_t zone)
  672 {
  673         uma_cache_t cache;
  674         int cpu;
  675 
  676         /*
  677          * XXX: It is safe to not lock the per-CPU caches, because we're
  678          * tearing down the zone anyway.  I.e., there will be no further use
  679          * of the caches at this point.
  680          *
  681          * XXX: It would good to be able to assert that the zone is being
  682          * torn down to prevent improper use of cache_drain().
  683          *
  684          * XXX: We lock the zone before passing into bucket_cache_drain() as
  685          * it is used elsewhere.  Should the tear-down path be made special
  686          * there in some form?
  687          */
  688         CPU_FOREACH(cpu) {
  689                 cache = &zone->uz_cpu[cpu];
  690                 bucket_drain(zone, cache->uc_allocbucket);
  691                 bucket_drain(zone, cache->uc_freebucket);
  692                 if (cache->uc_allocbucket != NULL)
  693                         bucket_free(zone, cache->uc_allocbucket, NULL);
  694                 if (cache->uc_freebucket != NULL)
  695                         bucket_free(zone, cache->uc_freebucket, NULL);
  696                 cache->uc_allocbucket = cache->uc_freebucket = NULL;
  697         }
  698         ZONE_LOCK(zone);
  699         bucket_cache_drain(zone);
  700         ZONE_UNLOCK(zone);
  701 }
  702 
  703 static void
  704 cache_shrink(uma_zone_t zone)
  705 {
  706 
  707         if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
  708                 return;
  709 
  710         ZONE_LOCK(zone);
  711         zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
  712         ZONE_UNLOCK(zone);
  713 }
  714 
  715 static void
  716 cache_drain_safe_cpu(uma_zone_t zone)
  717 {
  718         uma_cache_t cache;
  719         uma_bucket_t b1, b2;
  720 
  721         if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
  722                 return;
  723 
  724         b1 = b2 = NULL;
  725         ZONE_LOCK(zone);
  726         critical_enter();
  727         cache = &zone->uz_cpu[curcpu];
  728         if (cache->uc_allocbucket) {
  729                 if (cache->uc_allocbucket->ub_cnt != 0)
  730                         LIST_INSERT_HEAD(&zone->uz_buckets,
  731                             cache->uc_allocbucket, ub_link);
  732                 else
  733                         b1 = cache->uc_allocbucket;
  734                 cache->uc_allocbucket = NULL;
  735         }
  736         if (cache->uc_freebucket) {
  737                 if (cache->uc_freebucket->ub_cnt != 0)
  738                         LIST_INSERT_HEAD(&zone->uz_buckets,
  739                             cache->uc_freebucket, ub_link);
  740                 else
  741                         b2 = cache->uc_freebucket;
  742                 cache->uc_freebucket = NULL;
  743         }
  744         critical_exit();
  745         ZONE_UNLOCK(zone);
  746         if (b1)
  747                 bucket_free(zone, b1, NULL);
  748         if (b2)
  749                 bucket_free(zone, b2, NULL);
  750 }
  751 
  752 /*
  753  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
  754  * This is an expensive call because it needs to bind to all CPUs
  755  * one by one and enter a critical section on each of them in order
  756  * to safely access their cache buckets.
  757  * Zone lock must not be held on call this function.
  758  */
  759 static void
  760 cache_drain_safe(uma_zone_t zone)
  761 {
  762         int cpu;
  763 
  764         /*
  765          * Polite bucket sizes shrinking was not enouth, shrink aggressively.
  766          */
  767         if (zone)
  768                 cache_shrink(zone);
  769         else
  770                 zone_foreach(cache_shrink);
  771 
  772         CPU_FOREACH(cpu) {
  773                 thread_lock(curthread);
  774                 sched_bind(curthread, cpu);
  775                 thread_unlock(curthread);
  776 
  777                 if (zone)
  778                         cache_drain_safe_cpu(zone);
  779                 else
  780                         zone_foreach(cache_drain_safe_cpu);
  781         }
  782         thread_lock(curthread);
  783         sched_unbind(curthread);
  784         thread_unlock(curthread);
  785 }
  786 
  787 /*
  788  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
  789  */
  790 static void
  791 bucket_cache_drain(uma_zone_t zone)
  792 {
  793         uma_bucket_t bucket;
  794 
  795         /*
  796          * Drain the bucket queues and free the buckets, we just keep two per
  797          * cpu (alloc/free).
  798          */
  799         while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
  800                 LIST_REMOVE(bucket, ub_link);
  801                 ZONE_UNLOCK(zone);
  802                 bucket_drain(zone, bucket);
  803                 bucket_free(zone, bucket, NULL);
  804                 ZONE_LOCK(zone);
  805         }
  806 
  807         /*
  808          * Shrink further bucket sizes.  Price of single zone lock collision
  809          * is probably lower then price of global cache drain.
  810          */
  811         if (zone->uz_count > zone->uz_count_min)
  812                 zone->uz_count--;
  813 }
  814 
  815 static void
  816 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
  817 {
  818         uint8_t *mem;
  819         int i;
  820         uint8_t flags;
  821 
  822         mem = slab->us_data;
  823         flags = slab->us_flags;
  824         i = start;
  825         if (keg->uk_fini != NULL) {
  826                 for (i--; i > -1; i--)
  827                         keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
  828                             keg->uk_size);
  829         }
  830         if (keg->uk_flags & UMA_ZONE_OFFPAGE)
  831                 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
  832 #ifdef UMA_DEBUG
  833         printf("%s: Returning %d bytes.\n", keg->uk_name,
  834             PAGE_SIZE * keg->uk_ppera);
  835 #endif
  836         keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
  837 }
  838 
  839 /*
  840  * Frees pages from a keg back to the system.  This is done on demand from
  841  * the pageout daemon.
  842  *
  843  * Returns nothing.
  844  */
  845 static void
  846 keg_drain(uma_keg_t keg)
  847 {
  848         struct slabhead freeslabs = { 0 };
  849         uma_slab_t slab, tmp;
  850 
  851         /*
  852          * We don't want to take pages from statically allocated kegs at this
  853          * time
  854          */
  855         if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
  856                 return;
  857 
  858 #ifdef UMA_DEBUG
  859         printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
  860 #endif
  861         KEG_LOCK(keg);
  862         if (keg->uk_free == 0)
  863                 goto finished;
  864 
  865         LIST_FOREACH_SAFE(slab, &keg->uk_free_slab, us_link, tmp) {
  866                 /* We have nowhere to free these to. */
  867                 if (slab->us_flags & UMA_SLAB_BOOT)
  868                         continue;
  869 
  870                 LIST_REMOVE(slab, us_link);
  871                 keg->uk_pages -= keg->uk_ppera;
  872                 keg->uk_free -= keg->uk_ipers;
  873 
  874                 if (keg->uk_flags & UMA_ZONE_HASH)
  875                         UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
  876 
  877                 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
  878         }
  879 finished:
  880         KEG_UNLOCK(keg);
  881 
  882         while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
  883                 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
  884                 keg_free_slab(keg, slab, keg->uk_ipers);
  885         }
  886 }
  887 
  888 static void
  889 zone_drain_wait(uma_zone_t zone, int waitok)
  890 {
  891 
  892         /*
  893          * Set draining to interlock with zone_dtor() so we can release our
  894          * locks as we go.  Only dtor() should do a WAITOK call since it
  895          * is the only call that knows the structure will still be available
  896          * when it wakes up.
  897          */
  898         ZONE_LOCK(zone);
  899         while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
  900                 if (waitok == M_NOWAIT)
  901                         goto out;
  902                 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
  903         }
  904         zone->uz_flags |= UMA_ZFLAG_DRAINING;
  905         bucket_cache_drain(zone);
  906         ZONE_UNLOCK(zone);
  907         /*
  908          * The DRAINING flag protects us from being freed while
  909          * we're running.  Normally the uma_rwlock would protect us but we
  910          * must be able to release and acquire the right lock for each keg.
  911          */
  912         zone_foreach_keg(zone, &keg_drain);
  913         ZONE_LOCK(zone);
  914         zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
  915         wakeup(zone);
  916 out:
  917         ZONE_UNLOCK(zone);
  918 }
  919 
  920 void
  921 zone_drain(uma_zone_t zone)
  922 {
  923 
  924         zone_drain_wait(zone, M_NOWAIT);
  925 }
  926 
  927 /*
  928  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
  929  *
  930  * Arguments:
  931  *      wait  Shall we wait?
  932  *
  933  * Returns:
  934  *      The slab that was allocated or NULL if there is no memory and the
  935  *      caller specified M_NOWAIT.
  936  */
  937 static uma_slab_t
  938 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
  939 {
  940         uma_alloc allocf;
  941         uma_slab_t slab;
  942         uint8_t *mem;
  943         uint8_t flags;
  944         int i;
  945 
  946         mtx_assert(&keg->uk_lock, MA_OWNED);
  947         slab = NULL;
  948         mem = NULL;
  949 
  950 #ifdef UMA_DEBUG
  951         printf("alloc_slab:  Allocating a new slab for %s\n", keg->uk_name);
  952 #endif
  953         allocf = keg->uk_allocf;
  954         KEG_UNLOCK(keg);
  955 
  956         if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
  957                 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
  958                 if (slab == NULL)
  959                         goto out;
  960         }
  961 
  962         /*
  963          * This reproduces the old vm_zone behavior of zero filling pages the
  964          * first time they are added to a zone.
  965          *
  966          * Malloced items are zeroed in uma_zalloc.
  967          */
  968 
  969         if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
  970                 wait |= M_ZERO;
  971         else
  972                 wait &= ~M_ZERO;
  973 
  974         if (keg->uk_flags & UMA_ZONE_NODUMP)
  975                 wait |= M_NODUMP;
  976 
  977         /* zone is passed for legacy reasons. */
  978         mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
  979         if (mem == NULL) {
  980                 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
  981                         zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
  982                 slab = NULL;
  983                 goto out;
  984         }
  985 
  986         /* Point the slab into the allocated memory */
  987         if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
  988                 slab = (uma_slab_t )(mem + keg->uk_pgoff);
  989 
  990         if (keg->uk_flags & UMA_ZONE_VTOSLAB)
  991                 for (i = 0; i < keg->uk_ppera; i++)
  992                         vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
  993 
  994         slab->us_keg = keg;
  995         slab->us_data = mem;
  996         slab->us_freecount = keg->uk_ipers;
  997         slab->us_flags = flags;
  998         BIT_FILL(SLAB_SETSIZE, &slab->us_free);
  999 #ifdef INVARIANTS
 1000         BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
 1001 #endif
 1002 
 1003         if (keg->uk_init != NULL) {
 1004                 for (i = 0; i < keg->uk_ipers; i++)
 1005                         if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
 1006                             keg->uk_size, wait) != 0)
 1007                                 break;
 1008                 if (i != keg->uk_ipers) {
 1009                         keg_free_slab(keg, slab, i);
 1010                         slab = NULL;
 1011                         goto out;
 1012                 }
 1013         }
 1014 out:
 1015         KEG_LOCK(keg);
 1016 
 1017         if (slab != NULL) {
 1018                 if (keg->uk_flags & UMA_ZONE_HASH)
 1019                         UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
 1020 
 1021                 keg->uk_pages += keg->uk_ppera;
 1022                 keg->uk_free += keg->uk_ipers;
 1023         }
 1024 
 1025         return (slab);
 1026 }
 1027 
 1028 /*
 1029  * This function is intended to be used early on in place of page_alloc() so
 1030  * that we may use the boot time page cache to satisfy allocations before
 1031  * the VM is ready.
 1032  */
 1033 static void *
 1034 startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
 1035 {
 1036         uma_keg_t keg;
 1037         uma_slab_t tmps;
 1038         int pages, check_pages;
 1039 
 1040         keg = zone_first_keg(zone);
 1041         pages = howmany(bytes, PAGE_SIZE);
 1042         check_pages = pages - 1;
 1043         KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
 1044 
 1045         /*
 1046          * Check our small startup cache to see if it has pages remaining.
 1047          */
 1048         mtx_lock(&uma_boot_pages_mtx);
 1049 
 1050         /* First check if we have enough room. */
 1051         tmps = LIST_FIRST(&uma_boot_pages);
 1052         while (tmps != NULL && check_pages-- > 0)
 1053                 tmps = LIST_NEXT(tmps, us_link);
 1054         if (tmps != NULL) {
 1055                 /*
 1056                  * It's ok to lose tmps references.  The last one will
 1057                  * have tmps->us_data pointing to the start address of
 1058                  * "pages" contiguous pages of memory.
 1059                  */
 1060                 while (pages-- > 0) {
 1061                         tmps = LIST_FIRST(&uma_boot_pages);
 1062                         LIST_REMOVE(tmps, us_link);
 1063                 }
 1064                 mtx_unlock(&uma_boot_pages_mtx);
 1065                 *pflag = tmps->us_flags;
 1066                 return (tmps->us_data);
 1067         }
 1068         mtx_unlock(&uma_boot_pages_mtx);
 1069         if (booted < UMA_STARTUP2)
 1070                 panic("UMA: Increase vm.boot_pages");
 1071         /*
 1072          * Now that we've booted reset these users to their real allocator.
 1073          */
 1074 #ifdef UMA_MD_SMALL_ALLOC
 1075         keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
 1076 #else
 1077         keg->uk_allocf = page_alloc;
 1078 #endif
 1079         return keg->uk_allocf(zone, bytes, pflag, wait);
 1080 }
 1081 
 1082 /*
 1083  * Allocates a number of pages from the system
 1084  *
 1085  * Arguments:
 1086  *      bytes  The number of bytes requested
 1087  *      wait  Shall we wait?
 1088  *
 1089  * Returns:
 1090  *      A pointer to the alloced memory or possibly
 1091  *      NULL if M_NOWAIT is set.
 1092  */
 1093 static void *
 1094 page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
 1095 {
 1096         void *p;        /* Returned page */
 1097 
 1098         *pflag = UMA_SLAB_KMEM;
 1099         p = (void *) kmem_malloc(kmem_arena, bytes, wait);
 1100 
 1101         return (p);
 1102 }
 1103 
 1104 /*
 1105  * Allocates a number of pages from within an object
 1106  *
 1107  * Arguments:
 1108  *      bytes  The number of bytes requested
 1109  *      wait   Shall we wait?
 1110  *
 1111  * Returns:
 1112  *      A pointer to the alloced memory or possibly
 1113  *      NULL if M_NOWAIT is set.
 1114  */
 1115 static void *
 1116 noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
 1117 {
 1118         TAILQ_HEAD(, vm_page) alloctail;
 1119         u_long npages;
 1120         vm_offset_t retkva, zkva;
 1121         vm_page_t p, p_next;
 1122         uma_keg_t keg;
 1123 
 1124         TAILQ_INIT(&alloctail);
 1125         keg = zone_first_keg(zone);
 1126 
 1127         npages = howmany(bytes, PAGE_SIZE);
 1128         while (npages > 0) {
 1129                 p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
 1130                     VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
 1131                     ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
 1132                     VM_ALLOC_NOWAIT));
 1133                 if (p != NULL) {
 1134                         /*
 1135                          * Since the page does not belong to an object, its
 1136                          * listq is unused.
 1137                          */
 1138                         TAILQ_INSERT_TAIL(&alloctail, p, listq);
 1139                         npages--;
 1140                         continue;
 1141                 }
 1142                 /*
 1143                  * Page allocation failed, free intermediate pages and
 1144                  * exit.
 1145                  */
 1146                 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
 1147                         vm_page_unwire(p, PQ_NONE);
 1148                         vm_page_free(p); 
 1149                 }
 1150                 return (NULL);
 1151         }
 1152         *flags = UMA_SLAB_PRIV;
 1153         zkva = keg->uk_kva +
 1154             atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
 1155         retkva = zkva;
 1156         TAILQ_FOREACH(p, &alloctail, listq) {
 1157                 pmap_qenter(zkva, &p, 1);
 1158                 zkva += PAGE_SIZE;
 1159         }
 1160 
 1161         return ((void *)retkva);
 1162 }
 1163 
 1164 /*
 1165  * Frees a number of pages to the system
 1166  *
 1167  * Arguments:
 1168  *      mem   A pointer to the memory to be freed
 1169  *      size  The size of the memory being freed
 1170  *      flags The original p->us_flags field
 1171  *
 1172  * Returns:
 1173  *      Nothing
 1174  */
 1175 static void
 1176 page_free(void *mem, vm_size_t size, uint8_t flags)
 1177 {
 1178         struct vmem *vmem;
 1179 
 1180         if (flags & UMA_SLAB_KMEM)
 1181                 vmem = kmem_arena;
 1182         else if (flags & UMA_SLAB_KERNEL)
 1183                 vmem = kernel_arena;
 1184         else
 1185                 panic("UMA: page_free used with invalid flags %d", flags);
 1186 
 1187         kmem_free(vmem, (vm_offset_t)mem, size);
 1188 }
 1189 
 1190 /*
 1191  * Zero fill initializer
 1192  *
 1193  * Arguments/Returns follow uma_init specifications
 1194  */
 1195 static int
 1196 zero_init(void *mem, int size, int flags)
 1197 {
 1198         bzero(mem, size);
 1199         return (0);
 1200 }
 1201 
 1202 /*
 1203  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
 1204  *
 1205  * Arguments
 1206  *      keg  The zone we should initialize
 1207  *
 1208  * Returns
 1209  *      Nothing
 1210  */
 1211 static void
 1212 keg_small_init(uma_keg_t keg)
 1213 {
 1214         u_int rsize;
 1215         u_int memused;
 1216         u_int wastedspace;
 1217         u_int shsize;
 1218         u_int slabsize;
 1219 
 1220         if (keg->uk_flags & UMA_ZONE_PCPU) {
 1221                 u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
 1222 
 1223                 slabsize = sizeof(struct pcpu);
 1224                 keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
 1225                     PAGE_SIZE);
 1226         } else {
 1227                 slabsize = UMA_SLAB_SIZE;
 1228                 keg->uk_ppera = 1;
 1229         }
 1230 
 1231         /*
 1232          * Calculate the size of each allocation (rsize) according to
 1233          * alignment.  If the requested size is smaller than we have
 1234          * allocation bits for we round it up.
 1235          */
 1236         rsize = keg->uk_size;
 1237         if (rsize < slabsize / SLAB_SETSIZE)
 1238                 rsize = slabsize / SLAB_SETSIZE;
 1239         if (rsize & keg->uk_align)
 1240                 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
 1241         keg->uk_rsize = rsize;
 1242 
 1243         KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
 1244             keg->uk_rsize < sizeof(struct pcpu),
 1245             ("%s: size %u too large", __func__, keg->uk_rsize));
 1246 
 1247         if (keg->uk_flags & UMA_ZONE_OFFPAGE)
 1248                 shsize = 0;
 1249         else 
 1250                 shsize = sizeof(struct uma_slab);
 1251 
 1252         if (rsize <= slabsize - shsize)
 1253                 keg->uk_ipers = (slabsize - shsize) / rsize;
 1254         else {
 1255                 /* Handle special case when we have 1 item per slab, so
 1256                  * alignment requirement can be relaxed. */
 1257                 KASSERT(keg->uk_size <= slabsize - shsize,
 1258                     ("%s: size %u greater than slab", __func__, keg->uk_size));
 1259                 keg->uk_ipers = 1;
 1260         }
 1261         KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
 1262             ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
 1263 
 1264         memused = keg->uk_ipers * rsize + shsize;
 1265         wastedspace = slabsize - memused;
 1266 
 1267         /*
 1268          * We can't do OFFPAGE if we're internal or if we've been
 1269          * asked to not go to the VM for buckets.  If we do this we
 1270          * may end up going to the VM  for slabs which we do not
 1271          * want to do if we're UMA_ZFLAG_CACHEONLY as a result
 1272          * of UMA_ZONE_VM, which clearly forbids it.
 1273          */
 1274         if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
 1275             (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
 1276                 return;
 1277 
 1278         /*
 1279          * See if using an OFFPAGE slab will limit our waste.  Only do
 1280          * this if it permits more items per-slab.
 1281          *
 1282          * XXX We could try growing slabsize to limit max waste as well.
 1283          * Historically this was not done because the VM could not
 1284          * efficiently handle contiguous allocations.
 1285          */
 1286         if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
 1287             (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
 1288                 keg->uk_ipers = slabsize / keg->uk_rsize;
 1289                 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
 1290                     ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
 1291 #ifdef UMA_DEBUG
 1292                 printf("UMA decided we need offpage slab headers for "
 1293                     "keg: %s, calculated wastedspace = %d, "
 1294                     "maximum wasted space allowed = %d, "
 1295                     "calculated ipers = %d, "
 1296                     "new wasted space = %d\n", keg->uk_name, wastedspace,
 1297                     slabsize / UMA_MAX_WASTE, keg->uk_ipers,
 1298                     slabsize - keg->uk_ipers * keg->uk_rsize);
 1299 #endif
 1300                 keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1301         }
 1302 
 1303         if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
 1304             (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
 1305                 keg->uk_flags |= UMA_ZONE_HASH;
 1306 }
 1307 
 1308 /*
 1309  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
 1310  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
 1311  * more complicated.
 1312  *
 1313  * Arguments
 1314  *      keg  The keg we should initialize
 1315  *
 1316  * Returns
 1317  *      Nothing
 1318  */
 1319 static void
 1320 keg_large_init(uma_keg_t keg)
 1321 {
 1322         u_int shsize;
 1323 
 1324         KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
 1325         KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
 1326             ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
 1327         KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
 1328             ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
 1329 
 1330         keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
 1331         keg->uk_ipers = 1;
 1332         keg->uk_rsize = keg->uk_size;
 1333 
 1334         /* Check whether we have enough space to not do OFFPAGE. */
 1335         if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
 1336                 shsize = sizeof(struct uma_slab);
 1337                 if (shsize & UMA_ALIGN_PTR)
 1338                         shsize = (shsize & ~UMA_ALIGN_PTR) +
 1339                             (UMA_ALIGN_PTR + 1);
 1340 
 1341                 if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) {
 1342                         /*
 1343                          * We can't do OFFPAGE if we're internal, in which case
 1344                          * we need an extra page per allocation to contain the
 1345                          * slab header.
 1346                          */
 1347                         if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
 1348                                 keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1349                         else
 1350                                 keg->uk_ppera++;
 1351                 }
 1352         }
 1353 
 1354         if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
 1355             (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
 1356                 keg->uk_flags |= UMA_ZONE_HASH;
 1357 }
 1358 
 1359 static void
 1360 keg_cachespread_init(uma_keg_t keg)
 1361 {
 1362         int alignsize;
 1363         int trailer;
 1364         int pages;
 1365         int rsize;
 1366 
 1367         KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
 1368             ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
 1369 
 1370         alignsize = keg->uk_align + 1;
 1371         rsize = keg->uk_size;
 1372         /*
 1373          * We want one item to start on every align boundary in a page.  To
 1374          * do this we will span pages.  We will also extend the item by the
 1375          * size of align if it is an even multiple of align.  Otherwise, it
 1376          * would fall on the same boundary every time.
 1377          */
 1378         if (rsize & keg->uk_align)
 1379                 rsize = (rsize & ~keg->uk_align) + alignsize;
 1380         if ((rsize & alignsize) == 0)
 1381                 rsize += alignsize;
 1382         trailer = rsize - keg->uk_size;
 1383         pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
 1384         pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
 1385         keg->uk_rsize = rsize;
 1386         keg->uk_ppera = pages;
 1387         keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
 1388         keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
 1389         KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
 1390             ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
 1391             keg->uk_ipers));
 1392 }
 1393 
 1394 /*
 1395  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
 1396  * the keg onto the global keg list.
 1397  *
 1398  * Arguments/Returns follow uma_ctor specifications
 1399  *      udata  Actually uma_kctor_args
 1400  */
 1401 static int
 1402 keg_ctor(void *mem, int size, void *udata, int flags)
 1403 {
 1404         struct uma_kctor_args *arg = udata;
 1405         uma_keg_t keg = mem;
 1406         uma_zone_t zone;
 1407 
 1408         bzero(keg, size);
 1409         keg->uk_size = arg->size;
 1410         keg->uk_init = arg->uminit;
 1411         keg->uk_fini = arg->fini;
 1412         keg->uk_align = arg->align;
 1413         keg->uk_free = 0;
 1414         keg->uk_reserve = 0;
 1415         keg->uk_pages = 0;
 1416         keg->uk_flags = arg->flags;
 1417         keg->uk_allocf = page_alloc;
 1418         keg->uk_freef = page_free;
 1419         keg->uk_slabzone = NULL;
 1420 
 1421         /*
 1422          * The master zone is passed to us at keg-creation time.
 1423          */
 1424         zone = arg->zone;
 1425         keg->uk_name = zone->uz_name;
 1426 
 1427         if (arg->flags & UMA_ZONE_VM)
 1428                 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
 1429 
 1430         if (arg->flags & UMA_ZONE_ZINIT)
 1431                 keg->uk_init = zero_init;
 1432 
 1433         if (arg->flags & UMA_ZONE_MALLOC)
 1434                 keg->uk_flags |= UMA_ZONE_VTOSLAB;
 1435 
 1436         if (arg->flags & UMA_ZONE_PCPU)
 1437 #ifdef SMP
 1438                 keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1439 #else
 1440                 keg->uk_flags &= ~UMA_ZONE_PCPU;
 1441 #endif
 1442 
 1443         if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
 1444                 keg_cachespread_init(keg);
 1445         } else {
 1446                 if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
 1447                         keg_large_init(keg);
 1448                 else
 1449                         keg_small_init(keg);
 1450         }
 1451 
 1452         if (keg->uk_flags & UMA_ZONE_OFFPAGE)
 1453                 keg->uk_slabzone = slabzone;
 1454 
 1455         /*
 1456          * If we haven't booted yet we need allocations to go through the
 1457          * startup cache until the vm is ready.
 1458          */
 1459         if (keg->uk_ppera == 1) {
 1460 #ifdef UMA_MD_SMALL_ALLOC
 1461                 keg->uk_allocf = uma_small_alloc;
 1462                 keg->uk_freef = uma_small_free;
 1463 
 1464                 if (booted < UMA_STARTUP)
 1465                         keg->uk_allocf = startup_alloc;
 1466 #else
 1467                 if (booted < UMA_STARTUP2)
 1468                         keg->uk_allocf = startup_alloc;
 1469 #endif
 1470         } else if (booted < UMA_STARTUP2 &&
 1471             (keg->uk_flags & UMA_ZFLAG_INTERNAL))
 1472                 keg->uk_allocf = startup_alloc;
 1473 
 1474         /*
 1475          * Initialize keg's lock
 1476          */
 1477         KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
 1478 
 1479         /*
 1480          * If we're putting the slab header in the actual page we need to
 1481          * figure out where in each page it goes.  This calculates a right
 1482          * justified offset into the memory on an ALIGN_PTR boundary.
 1483          */
 1484         if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
 1485                 u_int totsize;
 1486 
 1487                 /* Size of the slab struct and free list */
 1488                 totsize = sizeof(struct uma_slab);
 1489 
 1490                 if (totsize & UMA_ALIGN_PTR)
 1491                         totsize = (totsize & ~UMA_ALIGN_PTR) +
 1492                             (UMA_ALIGN_PTR + 1);
 1493                 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
 1494 
 1495                 /*
 1496                  * The only way the following is possible is if with our
 1497                  * UMA_ALIGN_PTR adjustments we are now bigger than
 1498                  * UMA_SLAB_SIZE.  I haven't checked whether this is
 1499                  * mathematically possible for all cases, so we make
 1500                  * sure here anyway.
 1501                  */
 1502                 totsize = keg->uk_pgoff + sizeof(struct uma_slab);
 1503                 if (totsize > PAGE_SIZE * keg->uk_ppera) {
 1504                         printf("zone %s ipers %d rsize %d size %d\n",
 1505                             zone->uz_name, keg->uk_ipers, keg->uk_rsize,
 1506                             keg->uk_size);
 1507                         panic("UMA slab won't fit.");
 1508                 }
 1509         }
 1510 
 1511         if (keg->uk_flags & UMA_ZONE_HASH)
 1512                 hash_alloc(&keg->uk_hash, 0);
 1513 
 1514 #ifdef UMA_DEBUG
 1515         printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n",
 1516             zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
 1517             keg->uk_ipers, keg->uk_ppera,
 1518             (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
 1519             keg->uk_free);
 1520 #endif
 1521 
 1522         LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
 1523 
 1524         rw_wlock(&uma_rwlock);
 1525         LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
 1526         rw_wunlock(&uma_rwlock);
 1527         return (0);
 1528 }
 1529 
 1530 /*
 1531  * Zone header ctor.  This initializes all fields, locks, etc.
 1532  *
 1533  * Arguments/Returns follow uma_ctor specifications
 1534  *      udata  Actually uma_zctor_args
 1535  */
 1536 static int
 1537 zone_ctor(void *mem, int size, void *udata, int flags)
 1538 {
 1539         struct uma_zctor_args *arg = udata;
 1540         uma_zone_t zone = mem;
 1541         uma_zone_t z;
 1542         uma_keg_t keg;
 1543 
 1544         bzero(zone, size);
 1545         zone->uz_name = arg->name;
 1546         zone->uz_ctor = arg->ctor;
 1547         zone->uz_dtor = arg->dtor;
 1548         zone->uz_slab = zone_fetch_slab;
 1549         zone->uz_init = NULL;
 1550         zone->uz_fini = NULL;
 1551         zone->uz_allocs = 0;
 1552         zone->uz_frees = 0;
 1553         zone->uz_fails = 0;
 1554         zone->uz_sleeps = 0;
 1555         zone->uz_count = 0;
 1556         zone->uz_count_min = 0;
 1557         zone->uz_flags = 0;
 1558         zone->uz_warning = NULL;
 1559         timevalclear(&zone->uz_ratecheck);
 1560         keg = arg->keg;
 1561 
 1562         ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
 1563 
 1564         /*
 1565          * This is a pure cache zone, no kegs.
 1566          */
 1567         if (arg->import) {
 1568                 if (arg->flags & UMA_ZONE_VM)
 1569                         arg->flags |= UMA_ZFLAG_CACHEONLY;
 1570                 zone->uz_flags = arg->flags;
 1571                 zone->uz_size = arg->size;
 1572                 zone->uz_import = arg->import;
 1573                 zone->uz_release = arg->release;
 1574                 zone->uz_arg = arg->arg;
 1575                 zone->uz_lockptr = &zone->uz_lock;
 1576                 rw_wlock(&uma_rwlock);
 1577                 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
 1578                 rw_wunlock(&uma_rwlock);
 1579                 goto out;
 1580         }
 1581 
 1582         /*
 1583          * Use the regular zone/keg/slab allocator.
 1584          */
 1585         zone->uz_import = (uma_import)zone_import;
 1586         zone->uz_release = (uma_release)zone_release;
 1587         zone->uz_arg = zone; 
 1588 
 1589         if (arg->flags & UMA_ZONE_SECONDARY) {
 1590                 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
 1591                 zone->uz_init = arg->uminit;
 1592                 zone->uz_fini = arg->fini;
 1593                 zone->uz_lockptr = &keg->uk_lock;
 1594                 zone->uz_flags |= UMA_ZONE_SECONDARY;
 1595                 rw_wlock(&uma_rwlock);
 1596                 ZONE_LOCK(zone);
 1597                 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
 1598                         if (LIST_NEXT(z, uz_link) == NULL) {
 1599                                 LIST_INSERT_AFTER(z, zone, uz_link);
 1600                                 break;
 1601                         }
 1602                 }
 1603                 ZONE_UNLOCK(zone);
 1604                 rw_wunlock(&uma_rwlock);
 1605         } else if (keg == NULL) {
 1606                 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
 1607                     arg->align, arg->flags)) == NULL)
 1608                         return (ENOMEM);
 1609         } else {
 1610                 struct uma_kctor_args karg;
 1611                 int error;
 1612 
 1613                 /* We should only be here from uma_startup() */
 1614                 karg.size = arg->size;
 1615                 karg.uminit = arg->uminit;
 1616                 karg.fini = arg->fini;
 1617                 karg.align = arg->align;
 1618                 karg.flags = arg->flags;
 1619                 karg.zone = zone;
 1620                 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
 1621                     flags);
 1622                 if (error)
 1623                         return (error);
 1624         }
 1625 
 1626         /*
 1627          * Link in the first keg.
 1628          */
 1629         zone->uz_klink.kl_keg = keg;
 1630         LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
 1631         zone->uz_lockptr = &keg->uk_lock;
 1632         zone->uz_size = keg->uk_size;
 1633         zone->uz_flags |= (keg->uk_flags &
 1634             (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
 1635 
 1636         /*
 1637          * Some internal zones don't have room allocated for the per cpu
 1638          * caches.  If we're internal, bail out here.
 1639          */
 1640         if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
 1641                 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
 1642                     ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
 1643                 return (0);
 1644         }
 1645 
 1646 out:
 1647         KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
 1648             (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
 1649             ("Invalid zone flag combination"));
 1650         if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
 1651                 zone->uz_count = BUCKET_MAX;
 1652         else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
 1653                 zone->uz_count = 0;
 1654         else
 1655                 zone->uz_count = bucket_select(zone->uz_size);
 1656         zone->uz_count_min = zone->uz_count;
 1657 
 1658         return (0);
 1659 }
 1660 
 1661 /*
 1662  * Keg header dtor.  This frees all data, destroys locks, frees the hash
 1663  * table and removes the keg from the global list.
 1664  *
 1665  * Arguments/Returns follow uma_dtor specifications
 1666  *      udata  unused
 1667  */
 1668 static void
 1669 keg_dtor(void *arg, int size, void *udata)
 1670 {
 1671         uma_keg_t keg;
 1672 
 1673         keg = (uma_keg_t)arg;
 1674         KEG_LOCK(keg);
 1675         if (keg->uk_free != 0) {
 1676                 printf("Freed UMA keg (%s) was not empty (%d items). "
 1677                     " Lost %d pages of memory.\n",
 1678                     keg->uk_name ? keg->uk_name : "",
 1679                     keg->uk_free, keg->uk_pages);
 1680         }
 1681         KEG_UNLOCK(keg);
 1682 
 1683         hash_free(&keg->uk_hash);
 1684 
 1685         KEG_LOCK_FINI(keg);
 1686 }
 1687 
 1688 /*
 1689  * Zone header dtor.
 1690  *
 1691  * Arguments/Returns follow uma_dtor specifications
 1692  *      udata  unused
 1693  */
 1694 static void
 1695 zone_dtor(void *arg, int size, void *udata)
 1696 {
 1697         uma_klink_t klink;
 1698         uma_zone_t zone;
 1699         uma_keg_t keg;
 1700 
 1701         zone = (uma_zone_t)arg;
 1702         keg = zone_first_keg(zone);
 1703 
 1704         if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
 1705                 cache_drain(zone);
 1706 
 1707         rw_wlock(&uma_rwlock);
 1708         LIST_REMOVE(zone, uz_link);
 1709         rw_wunlock(&uma_rwlock);
 1710         /*
 1711          * XXX there are some races here where
 1712          * the zone can be drained but zone lock
 1713          * released and then refilled before we
 1714          * remove it... we dont care for now
 1715          */
 1716         zone_drain_wait(zone, M_WAITOK);
 1717         /*
 1718          * Unlink all of our kegs.
 1719          */
 1720         while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
 1721                 klink->kl_keg = NULL;
 1722                 LIST_REMOVE(klink, kl_link);
 1723                 if (klink == &zone->uz_klink)
 1724                         continue;
 1725                 free(klink, M_TEMP);
 1726         }
 1727         /*
 1728          * We only destroy kegs from non secondary zones.
 1729          */
 1730         if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
 1731                 rw_wlock(&uma_rwlock);
 1732                 LIST_REMOVE(keg, uk_link);
 1733                 rw_wunlock(&uma_rwlock);
 1734                 zone_free_item(kegs, keg, NULL, SKIP_NONE);
 1735         }
 1736         ZONE_LOCK_FINI(zone);
 1737 }
 1738 
 1739 /*
 1740  * Traverses every zone in the system and calls a callback
 1741  *
 1742  * Arguments:
 1743  *      zfunc  A pointer to a function which accepts a zone
 1744  *              as an argument.
 1745  *
 1746  * Returns:
 1747  *      Nothing
 1748  */
 1749 static void
 1750 zone_foreach(void (*zfunc)(uma_zone_t))
 1751 {
 1752         uma_keg_t keg;
 1753         uma_zone_t zone;
 1754 
 1755         rw_rlock(&uma_rwlock);
 1756         LIST_FOREACH(keg, &uma_kegs, uk_link) {
 1757                 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
 1758                         zfunc(zone);
 1759         }
 1760         rw_runlock(&uma_rwlock);
 1761 }
 1762 
 1763 /* Public functions */
 1764 /* See uma.h */
 1765 void
 1766 uma_startup(void *bootmem, int boot_pages)
 1767 {
 1768         struct uma_zctor_args args;
 1769         uma_slab_t slab;
 1770         int i;
 1771 
 1772 #ifdef UMA_DEBUG
 1773         printf("Creating uma keg headers zone and keg.\n");
 1774 #endif
 1775         rw_init(&uma_rwlock, "UMA lock");
 1776 
 1777         /* "manually" create the initial zone */
 1778         memset(&args, 0, sizeof(args));
 1779         args.name = "UMA Kegs";
 1780         args.size = sizeof(struct uma_keg);
 1781         args.ctor = keg_ctor;
 1782         args.dtor = keg_dtor;
 1783         args.uminit = zero_init;
 1784         args.fini = NULL;
 1785         args.keg = &masterkeg;
 1786         args.align = 32 - 1;
 1787         args.flags = UMA_ZFLAG_INTERNAL;
 1788         /* The initial zone has no Per cpu queues so it's smaller */
 1789         zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
 1790 
 1791 #ifdef UMA_DEBUG
 1792         printf("Filling boot free list.\n");
 1793 #endif
 1794         for (i = 0; i < boot_pages; i++) {
 1795                 slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE));
 1796                 slab->us_data = (uint8_t *)slab;
 1797                 slab->us_flags = UMA_SLAB_BOOT;
 1798                 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
 1799         }
 1800         mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
 1801 
 1802 #ifdef UMA_DEBUG
 1803         printf("Creating uma zone headers zone and keg.\n");
 1804 #endif
 1805         args.name = "UMA Zones";
 1806         args.size = sizeof(struct uma_zone) +
 1807             (sizeof(struct uma_cache) * (mp_maxid + 1));
 1808         args.ctor = zone_ctor;
 1809         args.dtor = zone_dtor;
 1810         args.uminit = zero_init;
 1811         args.fini = NULL;
 1812         args.keg = NULL;
 1813         args.align = 32 - 1;
 1814         args.flags = UMA_ZFLAG_INTERNAL;
 1815         /* The initial zone has no Per cpu queues so it's smaller */
 1816         zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
 1817 
 1818 #ifdef UMA_DEBUG
 1819         printf("Creating slab and hash zones.\n");
 1820 #endif
 1821 
 1822         /* Now make a zone for slab headers */
 1823         slabzone = uma_zcreate("UMA Slabs",
 1824                                 sizeof(struct uma_slab),
 1825                                 NULL, NULL, NULL, NULL,
 1826                                 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
 1827 
 1828         hashzone = uma_zcreate("UMA Hash",
 1829             sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
 1830             NULL, NULL, NULL, NULL,
 1831             UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
 1832 
 1833         bucket_init();
 1834 
 1835         booted = UMA_STARTUP;
 1836 
 1837 #ifdef UMA_DEBUG
 1838         printf("UMA startup complete.\n");
 1839 #endif
 1840 }
 1841 
 1842 /* see uma.h */
 1843 void
 1844 uma_startup2(void)
 1845 {
 1846         booted = UMA_STARTUP2;
 1847         bucket_enable();
 1848         sx_init(&uma_drain_lock, "umadrain");
 1849 #ifdef UMA_DEBUG
 1850         printf("UMA startup2 complete.\n");
 1851 #endif
 1852 }
 1853 
 1854 
 1855 static void
 1856 uma_startup3(void)
 1857 {
 1858 #ifdef UMA_DEBUG
 1859         printf("Starting callout.\n");
 1860 #endif
 1861         callout_init(&uma_callout, 1);
 1862         callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
 1863 #ifdef UMA_DEBUG
 1864         printf("UMA startup3 complete.\n");
 1865 #endif
 1866 
 1867         EVENTHANDLER_REGISTER(shutdown_post_sync, uma_shutdown, NULL,
 1868             EVENTHANDLER_PRI_FIRST);
 1869 }
 1870 
 1871 static void
 1872 uma_shutdown(void)
 1873 {
 1874 
 1875         booted = UMA_SHUTDOWN;
 1876 }
 1877 
 1878 static uma_keg_t
 1879 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
 1880                 int align, uint32_t flags)
 1881 {
 1882         struct uma_kctor_args args;
 1883 
 1884         args.size = size;
 1885         args.uminit = uminit;
 1886         args.fini = fini;
 1887         args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
 1888         args.flags = flags;
 1889         args.zone = zone;
 1890         return (zone_alloc_item(kegs, &args, M_WAITOK));
 1891 }
 1892 
 1893 /* See uma.h */
 1894 void
 1895 uma_set_align(int align)
 1896 {
 1897 
 1898         if (align != UMA_ALIGN_CACHE)
 1899                 uma_align_cache = align;
 1900 }
 1901 
 1902 /* See uma.h */
 1903 uma_zone_t
 1904 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
 1905                 uma_init uminit, uma_fini fini, int align, uint32_t flags)
 1906 
 1907 {
 1908         struct uma_zctor_args args;
 1909         uma_zone_t res;
 1910         bool locked;
 1911 
 1912         KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
 1913             align, name));
 1914 
 1915         /* This stuff is essential for the zone ctor */
 1916         memset(&args, 0, sizeof(args));
 1917         args.name = name;
 1918         args.size = size;
 1919         args.ctor = ctor;
 1920         args.dtor = dtor;
 1921         args.uminit = uminit;
 1922         args.fini = fini;
 1923 #ifdef  INVARIANTS
 1924         /*
 1925          * If a zone is being created with an empty constructor and
 1926          * destructor, pass UMA constructor/destructor which checks for
 1927          * memory use after free.
 1928          */
 1929         if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
 1930             ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
 1931                 args.ctor = trash_ctor;
 1932                 args.dtor = trash_dtor;
 1933                 args.uminit = trash_init;
 1934                 args.fini = trash_fini;
 1935         }
 1936 #endif
 1937         args.align = align;
 1938         args.flags = flags;
 1939         args.keg = NULL;
 1940 
 1941         if (booted < UMA_STARTUP2) {
 1942                 locked = false;
 1943         } else {
 1944                 sx_slock(&uma_drain_lock);
 1945                 locked = true;
 1946         }
 1947         res = zone_alloc_item(zones, &args, M_WAITOK);
 1948         if (locked)
 1949                 sx_sunlock(&uma_drain_lock);
 1950         return (res);
 1951 }
 1952 
 1953 /* See uma.h */
 1954 uma_zone_t
 1955 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
 1956                     uma_init zinit, uma_fini zfini, uma_zone_t master)
 1957 {
 1958         struct uma_zctor_args args;
 1959         uma_keg_t keg;
 1960         uma_zone_t res;
 1961         bool locked;
 1962 
 1963         keg = zone_first_keg(master);
 1964         memset(&args, 0, sizeof(args));
 1965         args.name = name;
 1966         args.size = keg->uk_size;
 1967         args.ctor = ctor;
 1968         args.dtor = dtor;
 1969         args.uminit = zinit;
 1970         args.fini = zfini;
 1971         args.align = keg->uk_align;
 1972         args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
 1973         args.keg = keg;
 1974 
 1975         if (booted < UMA_STARTUP2) {
 1976                 locked = false;
 1977         } else {
 1978                 sx_slock(&uma_drain_lock);
 1979                 locked = true;
 1980         }
 1981         /* XXX Attaches only one keg of potentially many. */
 1982         res = zone_alloc_item(zones, &args, M_WAITOK);
 1983         if (locked)
 1984                 sx_sunlock(&uma_drain_lock);
 1985         return (res);
 1986 }
 1987 
 1988 /* See uma.h */
 1989 uma_zone_t
 1990 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
 1991                     uma_init zinit, uma_fini zfini, uma_import zimport,
 1992                     uma_release zrelease, void *arg, int flags)
 1993 {
 1994         struct uma_zctor_args args;
 1995 
 1996         memset(&args, 0, sizeof(args));
 1997         args.name = name;
 1998         args.size = size;
 1999         args.ctor = ctor;
 2000         args.dtor = dtor;
 2001         args.uminit = zinit;
 2002         args.fini = zfini;
 2003         args.import = zimport;
 2004         args.release = zrelease;
 2005         args.arg = arg;
 2006         args.align = 0;
 2007         args.flags = flags;
 2008 
 2009         return (zone_alloc_item(zones, &args, M_WAITOK));
 2010 }
 2011 
 2012 static void
 2013 zone_lock_pair(uma_zone_t a, uma_zone_t b)
 2014 {
 2015         if (a < b) {
 2016                 ZONE_LOCK(a);
 2017                 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
 2018         } else {
 2019                 ZONE_LOCK(b);
 2020                 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
 2021         }
 2022 }
 2023 
 2024 static void
 2025 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
 2026 {
 2027 
 2028         ZONE_UNLOCK(a);
 2029         ZONE_UNLOCK(b);
 2030 }
 2031 
 2032 int
 2033 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
 2034 {
 2035         uma_klink_t klink;
 2036         uma_klink_t kl;
 2037         int error;
 2038 
 2039         error = 0;
 2040         klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
 2041 
 2042         zone_lock_pair(zone, master);
 2043         /*
 2044          * zone must use vtoslab() to resolve objects and must already be
 2045          * a secondary.
 2046          */
 2047         if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
 2048             != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
 2049                 error = EINVAL;
 2050                 goto out;
 2051         }
 2052         /*
 2053          * The new master must also use vtoslab().
 2054          */
 2055         if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
 2056                 error = EINVAL;
 2057                 goto out;
 2058         }
 2059 
 2060         /*
 2061          * The underlying object must be the same size.  rsize
 2062          * may be different.
 2063          */
 2064         if (master->uz_size != zone->uz_size) {
 2065                 error = E2BIG;
 2066                 goto out;
 2067         }
 2068         /*
 2069          * Put it at the end of the list.
 2070          */
 2071         klink->kl_keg = zone_first_keg(master);
 2072         LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
 2073                 if (LIST_NEXT(kl, kl_link) == NULL) {
 2074                         LIST_INSERT_AFTER(kl, klink, kl_link);
 2075                         break;
 2076                 }
 2077         }
 2078         klink = NULL;
 2079         zone->uz_flags |= UMA_ZFLAG_MULTI;
 2080         zone->uz_slab = zone_fetch_slab_multi;
 2081 
 2082 out:
 2083         zone_unlock_pair(zone, master);
 2084         if (klink != NULL)
 2085                 free(klink, M_TEMP);
 2086 
 2087         return (error);
 2088 }
 2089 
 2090 
 2091 /* See uma.h */
 2092 void
 2093 uma_zdestroy(uma_zone_t zone)
 2094 {
 2095 
 2096         /*
 2097          * Large slabs are expensive to reclaim, so don't bother doing
 2098          * unnecessary work if we're shutting down.
 2099          */
 2100         if (booted == UMA_SHUTDOWN &&
 2101             zone->uz_fini == NULL &&
 2102             zone->uz_release == (uma_release)zone_release)
 2103                 return;
 2104         sx_slock(&uma_drain_lock);
 2105         zone_free_item(zones, zone, NULL, SKIP_NONE);
 2106         sx_sunlock(&uma_drain_lock);
 2107 }
 2108 
 2109 void
 2110 uma_zwait(uma_zone_t zone)
 2111 {
 2112         void *item;
 2113 
 2114         item = uma_zalloc_arg(zone, NULL, M_WAITOK);
 2115         uma_zfree(zone, item);
 2116 }
 2117 
 2118 /* See uma.h */
 2119 void *
 2120 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
 2121 {
 2122         void *item;
 2123         uma_cache_t cache;
 2124         uma_bucket_t bucket;
 2125         int lockfail;
 2126         int cpu;
 2127 
 2128         /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
 2129         random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
 2130 
 2131         /* This is the fast path allocation */
 2132 #ifdef UMA_DEBUG_ALLOC_1
 2133         printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
 2134 #endif
 2135         CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
 2136             zone->uz_name, flags);
 2137 
 2138         if (flags & M_WAITOK) {
 2139                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
 2140                     "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
 2141         }
 2142         KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
 2143             ("uma_zalloc_arg: called with spinlock or critical section held"));
 2144 
 2145 #ifdef DEBUG_MEMGUARD
 2146         if (memguard_cmp_zone(zone)) {
 2147                 item = memguard_alloc(zone->uz_size, flags);
 2148                 if (item != NULL) {
 2149                         if (zone->uz_init != NULL &&
 2150                             zone->uz_init(item, zone->uz_size, flags) != 0)
 2151                                 return (NULL);
 2152                         if (zone->uz_ctor != NULL &&
 2153                             zone->uz_ctor(item, zone->uz_size, udata,
 2154                             flags) != 0) {
 2155                                 zone->uz_fini(item, zone->uz_size);
 2156                                 return (NULL);
 2157                         }
 2158                         return (item);
 2159                 }
 2160                 /* This is unfortunate but should not be fatal. */
 2161         }
 2162 #endif
 2163         /*
 2164          * If possible, allocate from the per-CPU cache.  There are two
 2165          * requirements for safe access to the per-CPU cache: (1) the thread
 2166          * accessing the cache must not be preempted or yield during access,
 2167          * and (2) the thread must not migrate CPUs without switching which
 2168          * cache it accesses.  We rely on a critical section to prevent
 2169          * preemption and migration.  We release the critical section in
 2170          * order to acquire the zone mutex if we are unable to allocate from
 2171          * the current cache; when we re-acquire the critical section, we
 2172          * must detect and handle migration if it has occurred.
 2173          */
 2174         critical_enter();
 2175         cpu = curcpu;
 2176         cache = &zone->uz_cpu[cpu];
 2177 
 2178 zalloc_start:
 2179         bucket = cache->uc_allocbucket;
 2180         if (bucket != NULL && bucket->ub_cnt > 0) {
 2181                 bucket->ub_cnt--;
 2182                 item = bucket->ub_bucket[bucket->ub_cnt];
 2183 #ifdef INVARIANTS
 2184                 bucket->ub_bucket[bucket->ub_cnt] = NULL;
 2185 #endif
 2186                 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
 2187                 cache->uc_allocs++;
 2188                 critical_exit();
 2189                 if (zone->uz_ctor != NULL &&
 2190                     zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
 2191                         atomic_add_long(&zone->uz_fails, 1);
 2192                         zone_free_item(zone, item, udata, SKIP_DTOR);
 2193                         return (NULL);
 2194                 }
 2195 #ifdef INVARIANTS
 2196                 uma_dbg_alloc(zone, NULL, item);
 2197 #endif
 2198                 if (flags & M_ZERO)
 2199                         uma_zero_item(item, zone);
 2200                 return (item);
 2201         }
 2202 
 2203         /*
 2204          * We have run out of items in our alloc bucket.
 2205          * See if we can switch with our free bucket.
 2206          */
 2207         bucket = cache->uc_freebucket;
 2208         if (bucket != NULL && bucket->ub_cnt > 0) {
 2209 #ifdef UMA_DEBUG_ALLOC
 2210                 printf("uma_zalloc: Swapping empty with alloc.\n");
 2211 #endif
 2212                 cache->uc_freebucket = cache->uc_allocbucket;
 2213                 cache->uc_allocbucket = bucket;
 2214                 goto zalloc_start;
 2215         }
 2216 
 2217         /*
 2218          * Discard any empty allocation bucket while we hold no locks.
 2219          */
 2220         bucket = cache->uc_allocbucket;
 2221         cache->uc_allocbucket = NULL;
 2222         critical_exit();
 2223         if (bucket != NULL)
 2224                 bucket_free(zone, bucket, udata);
 2225 
 2226         /* Short-circuit for zones without buckets and low memory. */
 2227         if (zone->uz_count == 0 || bucketdisable)
 2228                 goto zalloc_item;
 2229 
 2230         /*
 2231          * Attempt to retrieve the item from the per-CPU cache has failed, so
 2232          * we must go back to the zone.  This requires the zone lock, so we
 2233          * must drop the critical section, then re-acquire it when we go back
 2234          * to the cache.  Since the critical section is released, we may be
 2235          * preempted or migrate.  As such, make sure not to maintain any
 2236          * thread-local state specific to the cache from prior to releasing
 2237          * the critical section.
 2238          */
 2239         lockfail = 0;
 2240         if (ZONE_TRYLOCK(zone) == 0) {
 2241                 /* Record contention to size the buckets. */
 2242                 ZONE_LOCK(zone);
 2243                 lockfail = 1;
 2244         }
 2245         critical_enter();
 2246         cpu = curcpu;
 2247         cache = &zone->uz_cpu[cpu];
 2248 
 2249         /*
 2250          * Since we have locked the zone we may as well send back our stats.
 2251          */
 2252         atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
 2253         atomic_add_long(&zone->uz_frees, cache->uc_frees);
 2254         cache->uc_allocs = 0;
 2255         cache->uc_frees = 0;
 2256 
 2257         /* See if we lost the race to fill the cache. */
 2258         if (cache->uc_allocbucket != NULL) {
 2259                 ZONE_UNLOCK(zone);
 2260                 goto zalloc_start;
 2261         }
 2262 
 2263         /*
 2264          * Check the zone's cache of buckets.
 2265          */
 2266         if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
 2267                 KASSERT(bucket->ub_cnt != 0,
 2268                     ("uma_zalloc_arg: Returning an empty bucket."));
 2269 
 2270                 LIST_REMOVE(bucket, ub_link);
 2271                 cache->uc_allocbucket = bucket;
 2272                 ZONE_UNLOCK(zone);
 2273                 goto zalloc_start;
 2274         }
 2275         /* We are no longer associated with this CPU. */
 2276         critical_exit();
 2277 
 2278         /*
 2279          * We bump the uz count when the cache size is insufficient to
 2280          * handle the working set.
 2281          */
 2282         if (lockfail && zone->uz_count < BUCKET_MAX)
 2283                 zone->uz_count++;
 2284         ZONE_UNLOCK(zone);
 2285 
 2286         /*
 2287          * Now lets just fill a bucket and put it on the free list.  If that
 2288          * works we'll restart the allocation from the beginning and it
 2289          * will use the just filled bucket.
 2290          */
 2291         bucket = zone_alloc_bucket(zone, udata, flags);
 2292         if (bucket != NULL) {
 2293                 ZONE_LOCK(zone);
 2294                 critical_enter();
 2295                 cpu = curcpu;
 2296                 cache = &zone->uz_cpu[cpu];
 2297                 /*
 2298                  * See if we lost the race or were migrated.  Cache the
 2299                  * initialized bucket to make this less likely or claim
 2300                  * the memory directly.
 2301                  */
 2302                 if (cache->uc_allocbucket == NULL)
 2303                         cache->uc_allocbucket = bucket;
 2304                 else
 2305                         LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
 2306                 ZONE_UNLOCK(zone);
 2307                 goto zalloc_start;
 2308         }
 2309 
 2310         /*
 2311          * We may not be able to get a bucket so return an actual item.
 2312          */
 2313 #ifdef UMA_DEBUG
 2314         printf("uma_zalloc_arg: Bucketzone returned NULL\n");
 2315 #endif
 2316 
 2317 zalloc_item:
 2318         item = zone_alloc_item(zone, udata, flags);
 2319 
 2320         return (item);
 2321 }
 2322 
 2323 static uma_slab_t
 2324 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
 2325 {
 2326         uma_slab_t slab;
 2327         int reserve;
 2328 
 2329         mtx_assert(&keg->uk_lock, MA_OWNED);
 2330         slab = NULL;
 2331         reserve = 0;
 2332         if ((flags & M_USE_RESERVE) == 0)
 2333                 reserve = keg->uk_reserve;
 2334 
 2335         for (;;) {
 2336                 /*
 2337                  * Find a slab with some space.  Prefer slabs that are partially
 2338                  * used over those that are totally full.  This helps to reduce
 2339                  * fragmentation.
 2340                  */
 2341                 if (keg->uk_free > reserve) {
 2342                         if (!LIST_EMPTY(&keg->uk_part_slab)) {
 2343                                 slab = LIST_FIRST(&keg->uk_part_slab);
 2344                         } else {
 2345                                 slab = LIST_FIRST(&keg->uk_free_slab);
 2346                                 LIST_REMOVE(slab, us_link);
 2347                                 LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
 2348                                     us_link);
 2349                         }
 2350                         MPASS(slab->us_keg == keg);
 2351                         return (slab);
 2352                 }
 2353 
 2354                 /*
 2355                  * M_NOVM means don't ask at all!
 2356                  */
 2357                 if (flags & M_NOVM)
 2358                         break;
 2359 
 2360                 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
 2361                         keg->uk_flags |= UMA_ZFLAG_FULL;
 2362                         /*
 2363                          * If this is not a multi-zone, set the FULL bit.
 2364                          * Otherwise slab_multi() takes care of it.
 2365                          */
 2366                         if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
 2367                                 zone->uz_flags |= UMA_ZFLAG_FULL;
 2368                                 zone_log_warning(zone);
 2369                                 zone_maxaction(zone);
 2370                         }
 2371                         if (flags & M_NOWAIT)
 2372                                 break;
 2373                         zone->uz_sleeps++;
 2374                         msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
 2375                         continue;
 2376                 }
 2377                 slab = keg_alloc_slab(keg, zone, flags);
 2378                 /*
 2379                  * If we got a slab here it's safe to mark it partially used
 2380                  * and return.  We assume that the caller is going to remove
 2381                  * at least one item.
 2382                  */
 2383                 if (slab) {
 2384                         MPASS(slab->us_keg == keg);
 2385                         LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
 2386                         return (slab);
 2387                 }
 2388                 /*
 2389                  * We might not have been able to get a slab but another cpu
 2390                  * could have while we were unlocked.  Check again before we
 2391                  * fail.
 2392                  */
 2393                 flags |= M_NOVM;
 2394         }
 2395         return (slab);
 2396 }
 2397 
 2398 static uma_slab_t
 2399 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
 2400 {
 2401         uma_slab_t slab;
 2402 
 2403         if (keg == NULL) {
 2404                 keg = zone_first_keg(zone);
 2405                 KEG_LOCK(keg);
 2406         }
 2407 
 2408         for (;;) {
 2409                 slab = keg_fetch_slab(keg, zone, flags);
 2410                 if (slab)
 2411                         return (slab);
 2412                 if (flags & (M_NOWAIT | M_NOVM))
 2413                         break;
 2414         }
 2415         KEG_UNLOCK(keg);
 2416         return (NULL);
 2417 }
 2418 
 2419 /*
 2420  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
 2421  * with the keg locked.  On NULL no lock is held.
 2422  *
 2423  * The last pointer is used to seed the search.  It is not required.
 2424  */
 2425 static uma_slab_t
 2426 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
 2427 {
 2428         uma_klink_t klink;
 2429         uma_slab_t slab;
 2430         uma_keg_t keg;
 2431         int flags;
 2432         int empty;
 2433         int full;
 2434 
 2435         /*
 2436          * Don't wait on the first pass.  This will skip limit tests
 2437          * as well.  We don't want to block if we can find a provider
 2438          * without blocking.
 2439          */
 2440         flags = (rflags & ~M_WAITOK) | M_NOWAIT;
 2441         /*
 2442          * Use the last slab allocated as a hint for where to start
 2443          * the search.
 2444          */
 2445         if (last != NULL) {
 2446                 slab = keg_fetch_slab(last, zone, flags);
 2447                 if (slab)
 2448                         return (slab);
 2449                 KEG_UNLOCK(last);
 2450         }
 2451         /*
 2452          * Loop until we have a slab incase of transient failures
 2453          * while M_WAITOK is specified.  I'm not sure this is 100%
 2454          * required but we've done it for so long now.
 2455          */
 2456         for (;;) {
 2457                 empty = 0;
 2458                 full = 0;
 2459                 /*
 2460                  * Search the available kegs for slabs.  Be careful to hold the
 2461                  * correct lock while calling into the keg layer.
 2462                  */
 2463                 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
 2464                         keg = klink->kl_keg;
 2465                         KEG_LOCK(keg);
 2466                         if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
 2467                                 slab = keg_fetch_slab(keg, zone, flags);
 2468                                 if (slab)
 2469                                         return (slab);
 2470                         }
 2471                         if (keg->uk_flags & UMA_ZFLAG_FULL)
 2472                                 full++;
 2473                         else
 2474                                 empty++;
 2475                         KEG_UNLOCK(keg);
 2476                 }
 2477                 if (rflags & (M_NOWAIT | M_NOVM))
 2478                         break;
 2479                 flags = rflags;
 2480                 /*
 2481                  * All kegs are full.  XXX We can't atomically check all kegs
 2482                  * and sleep so just sleep for a short period and retry.
 2483                  */
 2484                 if (full && !empty) {
 2485                         ZONE_LOCK(zone);
 2486                         zone->uz_flags |= UMA_ZFLAG_FULL;
 2487                         zone->uz_sleeps++;
 2488                         zone_log_warning(zone);
 2489                         zone_maxaction(zone);
 2490                         msleep(zone, zone->uz_lockptr, PVM,
 2491                             "zonelimit", hz/100);
 2492                         zone->uz_flags &= ~UMA_ZFLAG_FULL;
 2493                         ZONE_UNLOCK(zone);
 2494                         continue;
 2495                 }
 2496         }
 2497         return (NULL);
 2498 }
 2499 
 2500 static void *
 2501 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
 2502 {
 2503         void *item;
 2504         uint8_t freei;
 2505 
 2506         MPASS(keg == slab->us_keg);
 2507         mtx_assert(&keg->uk_lock, MA_OWNED);
 2508 
 2509         freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
 2510         BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
 2511         item = slab->us_data + (keg->uk_rsize * freei);
 2512         slab->us_freecount--;
 2513         keg->uk_free--;
 2514 
 2515         /* Move this slab to the full list */
 2516         if (slab->us_freecount == 0) {
 2517                 LIST_REMOVE(slab, us_link);
 2518                 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
 2519         }
 2520 
 2521         return (item);
 2522 }
 2523 
 2524 static int
 2525 zone_import(uma_zone_t zone, void **bucket, int max, int flags)
 2526 {
 2527         uma_slab_t slab;
 2528         uma_keg_t keg;
 2529         int i;
 2530 
 2531         slab = NULL;
 2532         keg = NULL;
 2533         /* Try to keep the buckets totally full */
 2534         for (i = 0; i < max; ) {
 2535                 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
 2536                         break;
 2537                 keg = slab->us_keg;
 2538                 while (slab->us_freecount && i < max) { 
 2539                         bucket[i++] = slab_alloc_item(keg, slab);
 2540                         if (keg->uk_free <= keg->uk_reserve)
 2541                                 break;
 2542                 }
 2543                 /* Don't grab more than one slab at a time. */
 2544                 flags &= ~M_WAITOK;
 2545                 flags |= M_NOWAIT;
 2546         }
 2547         if (slab != NULL)
 2548                 KEG_UNLOCK(keg);
 2549 
 2550         return i;
 2551 }
 2552 
 2553 static uma_bucket_t
 2554 zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
 2555 {
 2556         uma_bucket_t bucket;
 2557         int max;
 2558 
 2559         /* Don't wait for buckets, preserve caller's NOVM setting. */
 2560         bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
 2561         if (bucket == NULL)
 2562                 return (NULL);
 2563 
 2564         max = MIN(bucket->ub_entries, zone->uz_count);
 2565         bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
 2566             max, flags);
 2567 
 2568         /*
 2569          * Initialize the memory if necessary.
 2570          */
 2571         if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
 2572                 int i;
 2573 
 2574                 for (i = 0; i < bucket->ub_cnt; i++)
 2575                         if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
 2576                             flags) != 0)
 2577                                 break;
 2578                 /*
 2579                  * If we couldn't initialize the whole bucket, put the
 2580                  * rest back onto the freelist.
 2581                  */
 2582                 if (i != bucket->ub_cnt) {
 2583                         zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
 2584                             bucket->ub_cnt - i);
 2585 #ifdef INVARIANTS
 2586                         bzero(&bucket->ub_bucket[i],
 2587                             sizeof(void *) * (bucket->ub_cnt - i));
 2588 #endif
 2589                         bucket->ub_cnt = i;
 2590                 }
 2591         }
 2592 
 2593         if (bucket->ub_cnt == 0) {
 2594                 bucket_free(zone, bucket, udata);
 2595                 atomic_add_long(&zone->uz_fails, 1);
 2596                 return (NULL);
 2597         }
 2598 
 2599         return (bucket);
 2600 }
 2601 
 2602 /*
 2603  * Allocates a single item from a zone.
 2604  *
 2605  * Arguments
 2606  *      zone   The zone to alloc for.
 2607  *      udata  The data to be passed to the constructor.
 2608  *      flags  M_WAITOK, M_NOWAIT, M_ZERO.
 2609  *
 2610  * Returns
 2611  *      NULL if there is no memory and M_NOWAIT is set
 2612  *      An item if successful
 2613  */
 2614 
 2615 static void *
 2616 zone_alloc_item(uma_zone_t zone, void *udata, int flags)
 2617 {
 2618         void *item;
 2619 
 2620         item = NULL;
 2621 
 2622 #ifdef UMA_DEBUG_ALLOC
 2623         printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
 2624 #endif
 2625         if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
 2626                 goto fail;
 2627         atomic_add_long(&zone->uz_allocs, 1);
 2628 
 2629         /*
 2630          * We have to call both the zone's init (not the keg's init)
 2631          * and the zone's ctor.  This is because the item is going from
 2632          * a keg slab directly to the user, and the user is expecting it
 2633          * to be both zone-init'd as well as zone-ctor'd.
 2634          */
 2635         if (zone->uz_init != NULL) {
 2636                 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
 2637                         zone_free_item(zone, item, udata, SKIP_FINI);
 2638                         goto fail;
 2639                 }
 2640         }
 2641         if (zone->uz_ctor != NULL) {
 2642                 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
 2643                         zone_free_item(zone, item, udata, SKIP_DTOR);
 2644                         goto fail;
 2645                 }
 2646         }
 2647 #ifdef INVARIANTS
 2648         uma_dbg_alloc(zone, NULL, item);
 2649 #endif
 2650         if (flags & M_ZERO)
 2651                 uma_zero_item(item, zone);
 2652 
 2653         return (item);
 2654 
 2655 fail:
 2656         atomic_add_long(&zone->uz_fails, 1);
 2657         return (NULL);
 2658 }
 2659 
 2660 /* See uma.h */
 2661 void
 2662 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
 2663 {
 2664         uma_cache_t cache;
 2665         uma_bucket_t bucket;
 2666         int lockfail;
 2667         int cpu;
 2668 
 2669         /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
 2670         random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
 2671 
 2672 #ifdef UMA_DEBUG_ALLOC_1
 2673         printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
 2674 #endif
 2675         CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
 2676             zone->uz_name);
 2677 
 2678         KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
 2679             ("uma_zfree_arg: called with spinlock or critical section held"));
 2680 
 2681         /* uma_zfree(..., NULL) does nothing, to match free(9). */
 2682         if (item == NULL)
 2683                 return;
 2684 #ifdef DEBUG_MEMGUARD
 2685         if (is_memguard_addr(item)) {
 2686                 if (zone->uz_dtor != NULL)
 2687                         zone->uz_dtor(item, zone->uz_size, udata);
 2688                 if (zone->uz_fini != NULL)
 2689                         zone->uz_fini(item, zone->uz_size);
 2690                 memguard_free(item);
 2691                 return;
 2692         }
 2693 #endif
 2694 #ifdef INVARIANTS
 2695         if (zone->uz_flags & UMA_ZONE_MALLOC)
 2696                 uma_dbg_free(zone, udata, item);
 2697         else
 2698                 uma_dbg_free(zone, NULL, item);
 2699 #endif
 2700         if (zone->uz_dtor != NULL)
 2701                 zone->uz_dtor(item, zone->uz_size, udata);
 2702 
 2703         /*
 2704          * The race here is acceptable.  If we miss it we'll just have to wait
 2705          * a little longer for the limits to be reset.
 2706          */
 2707         if (zone->uz_flags & UMA_ZFLAG_FULL)
 2708                 goto zfree_item;
 2709 
 2710         /*
 2711          * If possible, free to the per-CPU cache.  There are two
 2712          * requirements for safe access to the per-CPU cache: (1) the thread
 2713          * accessing the cache must not be preempted or yield during access,
 2714          * and (2) the thread must not migrate CPUs without switching which
 2715          * cache it accesses.  We rely on a critical section to prevent
 2716          * preemption and migration.  We release the critical section in
 2717          * order to acquire the zone mutex if we are unable to free to the
 2718          * current cache; when we re-acquire the critical section, we must
 2719          * detect and handle migration if it has occurred.
 2720          */
 2721 zfree_restart:
 2722         critical_enter();
 2723         cpu = curcpu;
 2724         cache = &zone->uz_cpu[cpu];
 2725 
 2726 zfree_start:
 2727         /*
 2728          * Try to free into the allocbucket first to give LIFO ordering
 2729          * for cache-hot datastructures.  Spill over into the freebucket
 2730          * if necessary.  Alloc will swap them if one runs dry.
 2731          */
 2732         bucket = cache->uc_allocbucket;
 2733         if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
 2734                 bucket = cache->uc_freebucket;
 2735         if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
 2736                 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
 2737                     ("uma_zfree: Freeing to non free bucket index."));
 2738                 bucket->ub_bucket[bucket->ub_cnt] = item;
 2739                 bucket->ub_cnt++;
 2740                 cache->uc_frees++;
 2741                 critical_exit();
 2742                 return;
 2743         }
 2744 
 2745         /*
 2746          * We must go back the zone, which requires acquiring the zone lock,
 2747          * which in turn means we must release and re-acquire the critical
 2748          * section.  Since the critical section is released, we may be
 2749          * preempted or migrate.  As such, make sure not to maintain any
 2750          * thread-local state specific to the cache from prior to releasing
 2751          * the critical section.
 2752          */
 2753         critical_exit();
 2754         if (zone->uz_count == 0 || bucketdisable)
 2755                 goto zfree_item;
 2756 
 2757         lockfail = 0;
 2758         if (ZONE_TRYLOCK(zone) == 0) {
 2759                 /* Record contention to size the buckets. */
 2760                 ZONE_LOCK(zone);
 2761                 lockfail = 1;
 2762         }
 2763         critical_enter();
 2764         cpu = curcpu;
 2765         cache = &zone->uz_cpu[cpu];
 2766 
 2767         /*
 2768          * Since we have locked the zone we may as well send back our stats.
 2769          */
 2770         atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
 2771         atomic_add_long(&zone->uz_frees, cache->uc_frees);
 2772         cache->uc_allocs = 0;
 2773         cache->uc_frees = 0;
 2774 
 2775         bucket = cache->uc_freebucket;
 2776         if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
 2777                 ZONE_UNLOCK(zone);
 2778                 goto zfree_start;
 2779         }
 2780         cache->uc_freebucket = NULL;
 2781         /* We are no longer associated with this CPU. */
 2782         critical_exit();
 2783 
 2784         /* Can we throw this on the zone full list? */
 2785         if (bucket != NULL) {
 2786 #ifdef UMA_DEBUG_ALLOC
 2787                 printf("uma_zfree: Putting old bucket on the free list.\n");
 2788 #endif
 2789                 /* ub_cnt is pointing to the last free item */
 2790                 KASSERT(bucket->ub_cnt != 0,
 2791                     ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
 2792                 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
 2793         }
 2794 
 2795         /*
 2796          * We bump the uz count when the cache size is insufficient to
 2797          * handle the working set.
 2798          */
 2799         if (lockfail && zone->uz_count < BUCKET_MAX)
 2800                 zone->uz_count++;
 2801         ZONE_UNLOCK(zone);
 2802 
 2803 #ifdef UMA_DEBUG_ALLOC
 2804         printf("uma_zfree: Allocating new free bucket.\n");
 2805 #endif
 2806         bucket = bucket_alloc(zone, udata, M_NOWAIT);
 2807         if (bucket) {
 2808                 critical_enter();
 2809                 cpu = curcpu;
 2810                 cache = &zone->uz_cpu[cpu];
 2811                 if (cache->uc_freebucket == NULL) {
 2812                         cache->uc_freebucket = bucket;
 2813                         goto zfree_start;
 2814                 }
 2815                 /*
 2816                  * We lost the race, start over.  We have to drop our
 2817                  * critical section to free the bucket.
 2818                  */
 2819                 critical_exit();
 2820                 bucket_free(zone, bucket, udata);
 2821                 goto zfree_restart;
 2822         }
 2823 
 2824         /*
 2825          * If nothing else caught this, we'll just do an internal free.
 2826          */
 2827 zfree_item:
 2828         zone_free_item(zone, item, udata, SKIP_DTOR);
 2829 
 2830         return;
 2831 }
 2832 
 2833 static void
 2834 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
 2835 {
 2836         uint8_t freei;
 2837 
 2838         mtx_assert(&keg->uk_lock, MA_OWNED);
 2839         MPASS(keg == slab->us_keg);
 2840 
 2841         /* Do we need to remove from any lists? */
 2842         if (slab->us_freecount+1 == keg->uk_ipers) {
 2843                 LIST_REMOVE(slab, us_link);
 2844                 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
 2845         } else if (slab->us_freecount == 0) {
 2846                 LIST_REMOVE(slab, us_link);
 2847                 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
 2848         }
 2849 
 2850         /* Slab management. */
 2851         freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
 2852         BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
 2853         slab->us_freecount++;
 2854 
 2855         /* Keg statistics. */
 2856         keg->uk_free++;
 2857 }
 2858 
 2859 static void
 2860 zone_release(uma_zone_t zone, void **bucket, int cnt)
 2861 {
 2862         void *item;
 2863         uma_slab_t slab;
 2864         uma_keg_t keg;
 2865         uint8_t *mem;
 2866         int clearfull;
 2867         int i;
 2868 
 2869         clearfull = 0;
 2870         keg = zone_first_keg(zone);
 2871         KEG_LOCK(keg);
 2872         for (i = 0; i < cnt; i++) {
 2873                 item = bucket[i];
 2874                 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
 2875                         mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
 2876                         if (zone->uz_flags & UMA_ZONE_HASH) {
 2877                                 slab = hash_sfind(&keg->uk_hash, mem);
 2878                         } else {
 2879                                 mem += keg->uk_pgoff;
 2880                                 slab = (uma_slab_t)mem;
 2881                         }
 2882                 } else {
 2883                         slab = vtoslab((vm_offset_t)item);
 2884                         if (slab->us_keg != keg) {
 2885                                 KEG_UNLOCK(keg);
 2886                                 keg = slab->us_keg;
 2887                                 KEG_LOCK(keg);
 2888                         }
 2889                 }
 2890                 slab_free_item(keg, slab, item);
 2891                 if (keg->uk_flags & UMA_ZFLAG_FULL) {
 2892                         if (keg->uk_pages < keg->uk_maxpages) {
 2893                                 keg->uk_flags &= ~UMA_ZFLAG_FULL;
 2894                                 clearfull = 1;
 2895                         }
 2896 
 2897                         /* 
 2898                          * We can handle one more allocation. Since we're
 2899                          * clearing ZFLAG_FULL, wake up all procs blocked
 2900                          * on pages. This should be uncommon, so keeping this
 2901                          * simple for now (rather than adding count of blocked 
 2902                          * threads etc).
 2903                          */
 2904                         wakeup(keg);
 2905                 }
 2906         }
 2907         KEG_UNLOCK(keg);
 2908         if (clearfull) {
 2909                 ZONE_LOCK(zone);
 2910                 zone->uz_flags &= ~UMA_ZFLAG_FULL;
 2911                 wakeup(zone);
 2912                 ZONE_UNLOCK(zone);
 2913         }
 2914 
 2915 }
 2916 
 2917 /*
 2918  * Frees a single item to any zone.
 2919  *
 2920  * Arguments:
 2921  *      zone   The zone to free to
 2922  *      item   The item we're freeing
 2923  *      udata  User supplied data for the dtor
 2924  *      skip   Skip dtors and finis
 2925  */
 2926 static void
 2927 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
 2928 {
 2929 
 2930 #ifdef INVARIANTS
 2931         if (skip == SKIP_NONE) {
 2932                 if (zone->uz_flags & UMA_ZONE_MALLOC)
 2933                         uma_dbg_free(zone, udata, item);
 2934                 else
 2935                         uma_dbg_free(zone, NULL, item);
 2936         }
 2937 #endif
 2938         if (skip < SKIP_DTOR && zone->uz_dtor)
 2939                 zone->uz_dtor(item, zone->uz_size, udata);
 2940 
 2941         if (skip < SKIP_FINI && zone->uz_fini)
 2942                 zone->uz_fini(item, zone->uz_size);
 2943 
 2944         atomic_add_long(&zone->uz_frees, 1);
 2945         zone->uz_release(zone->uz_arg, &item, 1);
 2946 }
 2947 
 2948 /* See uma.h */
 2949 int
 2950 uma_zone_set_max(uma_zone_t zone, int nitems)
 2951 {
 2952         uma_keg_t keg;
 2953 
 2954         keg = zone_first_keg(zone);
 2955         if (keg == NULL)
 2956                 return (0);
 2957         KEG_LOCK(keg);
 2958         keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
 2959         if (keg->uk_maxpages * keg->uk_ipers < nitems)
 2960                 keg->uk_maxpages += keg->uk_ppera;
 2961         nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
 2962         KEG_UNLOCK(keg);
 2963 
 2964         return (nitems);
 2965 }
 2966 
 2967 /* See uma.h */
 2968 int
 2969 uma_zone_get_max(uma_zone_t zone)
 2970 {
 2971         int nitems;
 2972         uma_keg_t keg;
 2973 
 2974         keg = zone_first_keg(zone);
 2975         if (keg == NULL)
 2976                 return (0);
 2977         KEG_LOCK(keg);
 2978         nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
 2979         KEG_UNLOCK(keg);
 2980 
 2981         return (nitems);
 2982 }
 2983 
 2984 /* See uma.h */
 2985 void
 2986 uma_zone_set_warning(uma_zone_t zone, const char *warning)
 2987 {
 2988 
 2989         ZONE_LOCK(zone);
 2990         zone->uz_warning = warning;
 2991         ZONE_UNLOCK(zone);
 2992 }
 2993 
 2994 /* See uma.h */
 2995 void
 2996 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
 2997 {
 2998 
 2999         ZONE_LOCK(zone);
 3000         TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
 3001         ZONE_UNLOCK(zone);
 3002 }
 3003 
 3004 /* See uma.h */
 3005 int
 3006 uma_zone_get_cur(uma_zone_t zone)
 3007 {
 3008         int64_t nitems;
 3009         u_int i;
 3010 
 3011         ZONE_LOCK(zone);
 3012         nitems = zone->uz_allocs - zone->uz_frees;
 3013         CPU_FOREACH(i) {
 3014                 /*
 3015                  * See the comment in sysctl_vm_zone_stats() regarding the
 3016                  * safety of accessing the per-cpu caches. With the zone lock
 3017                  * held, it is safe, but can potentially result in stale data.
 3018                  */
 3019                 nitems += zone->uz_cpu[i].uc_allocs -
 3020                     zone->uz_cpu[i].uc_frees;
 3021         }
 3022         ZONE_UNLOCK(zone);
 3023 
 3024         return (nitems < 0 ? 0 : nitems);
 3025 }
 3026 
 3027 /* See uma.h */
 3028 void
 3029 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
 3030 {
 3031         uma_keg_t keg;
 3032 
 3033         keg = zone_first_keg(zone);
 3034         KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
 3035         KEG_LOCK(keg);
 3036         KASSERT(keg->uk_pages == 0,
 3037             ("uma_zone_set_init on non-empty keg"));
 3038         keg->uk_init = uminit;
 3039         KEG_UNLOCK(keg);
 3040 }
 3041 
 3042 /* See uma.h */
 3043 void
 3044 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
 3045 {
 3046         uma_keg_t keg;
 3047 
 3048         keg = zone_first_keg(zone);
 3049         KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
 3050         KEG_LOCK(keg);
 3051         KASSERT(keg->uk_pages == 0,
 3052             ("uma_zone_set_fini on non-empty keg"));
 3053         keg->uk_fini = fini;
 3054         KEG_UNLOCK(keg);
 3055 }
 3056 
 3057 /* See uma.h */
 3058 void
 3059 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
 3060 {
 3061 
 3062         ZONE_LOCK(zone);
 3063         KASSERT(zone_first_keg(zone)->uk_pages == 0,
 3064             ("uma_zone_set_zinit on non-empty keg"));
 3065         zone->uz_init = zinit;
 3066         ZONE_UNLOCK(zone);
 3067 }
 3068 
 3069 /* See uma.h */
 3070 void
 3071 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
 3072 {
 3073 
 3074         ZONE_LOCK(zone);
 3075         KASSERT(zone_first_keg(zone)->uk_pages == 0,
 3076             ("uma_zone_set_zfini on non-empty keg"));
 3077         zone->uz_fini = zfini;
 3078         ZONE_UNLOCK(zone);
 3079 }
 3080 
 3081 /* See uma.h */
 3082 /* XXX uk_freef is not actually used with the zone locked */
 3083 void
 3084 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
 3085 {
 3086         uma_keg_t keg;
 3087 
 3088         keg = zone_first_keg(zone);
 3089         KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
 3090         KEG_LOCK(keg);
 3091         keg->uk_freef = freef;
 3092         KEG_UNLOCK(keg);
 3093 }
 3094 
 3095 /* See uma.h */
 3096 /* XXX uk_allocf is not actually used with the zone locked */
 3097 void
 3098 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
 3099 {
 3100         uma_keg_t keg;
 3101 
 3102         keg = zone_first_keg(zone);
 3103         KEG_LOCK(keg);
 3104         keg->uk_allocf = allocf;
 3105         KEG_UNLOCK(keg);
 3106 }
 3107 
 3108 /* See uma.h */
 3109 void
 3110 uma_zone_reserve(uma_zone_t zone, int items)
 3111 {
 3112         uma_keg_t keg;
 3113 
 3114         keg = zone_first_keg(zone);
 3115         if (keg == NULL)
 3116                 return;
 3117         KEG_LOCK(keg);
 3118         keg->uk_reserve = items;
 3119         KEG_UNLOCK(keg);
 3120 
 3121         return;
 3122 }
 3123 
 3124 /* See uma.h */
 3125 int
 3126 uma_zone_reserve_kva(uma_zone_t zone, int count)
 3127 {
 3128         uma_keg_t keg;
 3129         vm_offset_t kva;
 3130         u_int pages;
 3131 
 3132         keg = zone_first_keg(zone);
 3133         if (keg == NULL)
 3134                 return (0);
 3135         pages = count / keg->uk_ipers;
 3136 
 3137         if (pages * keg->uk_ipers < count)
 3138                 pages++;
 3139         pages *= keg->uk_ppera;
 3140 
 3141 #ifdef UMA_MD_SMALL_ALLOC
 3142         if (keg->uk_ppera > 1) {
 3143 #else
 3144         if (1) {
 3145 #endif
 3146                 kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
 3147                 if (kva == 0)
 3148                         return (0);
 3149         } else
 3150                 kva = 0;
 3151         KEG_LOCK(keg);
 3152         keg->uk_kva = kva;
 3153         keg->uk_offset = 0;
 3154         keg->uk_maxpages = pages;
 3155 #ifdef UMA_MD_SMALL_ALLOC
 3156         keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
 3157 #else
 3158         keg->uk_allocf = noobj_alloc;
 3159 #endif
 3160         keg->uk_flags |= UMA_ZONE_NOFREE;
 3161         KEG_UNLOCK(keg);
 3162 
 3163         return (1);
 3164 }
 3165 
 3166 /* See uma.h */
 3167 void
 3168 uma_prealloc(uma_zone_t zone, int items)
 3169 {
 3170         int slabs;
 3171         uma_slab_t slab;
 3172         uma_keg_t keg;
 3173 
 3174         keg = zone_first_keg(zone);
 3175         if (keg == NULL)
 3176                 return;
 3177         KEG_LOCK(keg);
 3178         slabs = items / keg->uk_ipers;
 3179         if (slabs * keg->uk_ipers < items)
 3180                 slabs++;
 3181         while (slabs > 0) {
 3182                 slab = keg_alloc_slab(keg, zone, M_WAITOK);
 3183                 if (slab == NULL)
 3184                         break;
 3185                 MPASS(slab->us_keg == keg);
 3186                 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
 3187                 slabs--;
 3188         }
 3189         KEG_UNLOCK(keg);
 3190 }
 3191 
 3192 /* See uma.h */
 3193 static void
 3194 uma_reclaim_locked(bool kmem_danger)
 3195 {
 3196 
 3197 #ifdef UMA_DEBUG
 3198         printf("UMA: vm asked us to release pages!\n");
 3199 #endif
 3200         sx_assert(&uma_drain_lock, SA_XLOCKED);
 3201         bucket_enable();
 3202         zone_foreach(zone_drain);
 3203         if (vm_page_count_min() || kmem_danger) {
 3204                 cache_drain_safe(NULL);
 3205                 zone_foreach(zone_drain);
 3206         }
 3207         /*
 3208          * Some slabs may have been freed but this zone will be visited early
 3209          * we visit again so that we can free pages that are empty once other
 3210          * zones are drained.  We have to do the same for buckets.
 3211          */
 3212         zone_drain(slabzone);
 3213         bucket_zone_drain();
 3214 }
 3215 
 3216 void
 3217 uma_reclaim(void)
 3218 {
 3219 
 3220         sx_xlock(&uma_drain_lock);
 3221         uma_reclaim_locked(false);
 3222         sx_xunlock(&uma_drain_lock);
 3223 }
 3224 
 3225 static int uma_reclaim_needed;
 3226 
 3227 void
 3228 uma_reclaim_wakeup(void)
 3229 {
 3230 
 3231         uma_reclaim_needed = 1;
 3232         wakeup(&uma_reclaim_needed);
 3233 }
 3234 
 3235 void
 3236 uma_reclaim_worker(void *arg __unused)
 3237 {
 3238 
 3239         sx_xlock(&uma_drain_lock);
 3240         for (;;) {
 3241                 sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM,
 3242                     "umarcl", 0);
 3243                 if (uma_reclaim_needed) {
 3244                         uma_reclaim_needed = 0;
 3245                         sx_xunlock(&uma_drain_lock);
 3246                         EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
 3247                         sx_xlock(&uma_drain_lock);
 3248                         uma_reclaim_locked(true);
 3249                 }
 3250         }
 3251 }
 3252 
 3253 /* See uma.h */
 3254 int
 3255 uma_zone_exhausted(uma_zone_t zone)
 3256 {
 3257         int full;
 3258 
 3259         ZONE_LOCK(zone);
 3260         full = (zone->uz_flags & UMA_ZFLAG_FULL);
 3261         ZONE_UNLOCK(zone);
 3262         return (full);  
 3263 }
 3264 
 3265 int
 3266 uma_zone_exhausted_nolock(uma_zone_t zone)
 3267 {
 3268         return (zone->uz_flags & UMA_ZFLAG_FULL);
 3269 }
 3270 
 3271 void *
 3272 uma_large_malloc(vm_size_t size, int wait)
 3273 {
 3274         void *mem;
 3275         uma_slab_t slab;
 3276         uint8_t flags;
 3277 
 3278         slab = zone_alloc_item(slabzone, NULL, wait);
 3279         if (slab == NULL)
 3280                 return (NULL);
 3281         mem = page_alloc(NULL, size, &flags, wait);
 3282         if (mem) {
 3283                 vsetslab((vm_offset_t)mem, slab);
 3284                 slab->us_data = mem;
 3285                 slab->us_flags = flags | UMA_SLAB_MALLOC;
 3286                 slab->us_size = size;
 3287         } else {
 3288                 zone_free_item(slabzone, slab, NULL, SKIP_NONE);
 3289         }
 3290 
 3291         return (mem);
 3292 }
 3293 
 3294 void
 3295 uma_large_free(uma_slab_t slab)
 3296 {
 3297 
 3298         page_free(slab->us_data, slab->us_size, slab->us_flags);
 3299         zone_free_item(slabzone, slab, NULL, SKIP_NONE);
 3300 }
 3301 
 3302 static void
 3303 uma_zero_item(void *item, uma_zone_t zone)
 3304 {
 3305         int i;
 3306 
 3307         if (zone->uz_flags & UMA_ZONE_PCPU) {
 3308                 CPU_FOREACH(i)
 3309                         bzero(zpcpu_get_cpu(item, i), zone->uz_size);
 3310         } else
 3311                 bzero(item, zone->uz_size);
 3312 }
 3313 
 3314 void
 3315 uma_print_stats(void)
 3316 {
 3317         zone_foreach(uma_print_zone);
 3318 }
 3319 
 3320 static void
 3321 slab_print(uma_slab_t slab)
 3322 {
 3323         printf("slab: keg %p, data %p, freecount %d\n",
 3324                 slab->us_keg, slab->us_data, slab->us_freecount);
 3325 }
 3326 
 3327 static void
 3328 cache_print(uma_cache_t cache)
 3329 {
 3330         printf("alloc: %p(%d), free: %p(%d)\n",
 3331                 cache->uc_allocbucket,
 3332                 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
 3333                 cache->uc_freebucket,
 3334                 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
 3335 }
 3336 
 3337 static void
 3338 uma_print_keg(uma_keg_t keg)
 3339 {
 3340         uma_slab_t slab;
 3341 
 3342         printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
 3343             "out %d free %d limit %d\n",
 3344             keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
 3345             keg->uk_ipers, keg->uk_ppera,
 3346             (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
 3347             keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
 3348         printf("Part slabs:\n");
 3349         LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
 3350                 slab_print(slab);
 3351         printf("Free slabs:\n");
 3352         LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
 3353                 slab_print(slab);
 3354         printf("Full slabs:\n");
 3355         LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
 3356                 slab_print(slab);
 3357 }
 3358 
 3359 void
 3360 uma_print_zone(uma_zone_t zone)
 3361 {
 3362         uma_cache_t cache;
 3363         uma_klink_t kl;
 3364         int i;
 3365 
 3366         printf("zone: %s(%p) size %d flags %#x\n",
 3367             zone->uz_name, zone, zone->uz_size, zone->uz_flags);
 3368         LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
 3369                 uma_print_keg(kl->kl_keg);
 3370         CPU_FOREACH(i) {
 3371                 cache = &zone->uz_cpu[i];
 3372                 printf("CPU %d Cache:\n", i);
 3373                 cache_print(cache);
 3374         }
 3375 }
 3376 
 3377 #ifdef DDB
 3378 /*
 3379  * Generate statistics across both the zone and its per-cpu cache's.  Return
 3380  * desired statistics if the pointer is non-NULL for that statistic.
 3381  *
 3382  * Note: does not update the zone statistics, as it can't safely clear the
 3383  * per-CPU cache statistic.
 3384  *
 3385  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
 3386  * safe from off-CPU; we should modify the caches to track this information
 3387  * directly so that we don't have to.
 3388  */
 3389 static void
 3390 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
 3391     uint64_t *freesp, uint64_t *sleepsp)
 3392 {
 3393         uma_cache_t cache;
 3394         uint64_t allocs, frees, sleeps;
 3395         int cachefree, cpu;
 3396 
 3397         allocs = frees = sleeps = 0;
 3398         cachefree = 0;
 3399         CPU_FOREACH(cpu) {
 3400                 cache = &z->uz_cpu[cpu];
 3401                 if (cache->uc_allocbucket != NULL)
 3402                         cachefree += cache->uc_allocbucket->ub_cnt;
 3403                 if (cache->uc_freebucket != NULL)
 3404                         cachefree += cache->uc_freebucket->ub_cnt;
 3405                 allocs += cache->uc_allocs;
 3406                 frees += cache->uc_frees;
 3407         }
 3408         allocs += z->uz_allocs;
 3409         frees += z->uz_frees;
 3410         sleeps += z->uz_sleeps;
 3411         if (cachefreep != NULL)
 3412                 *cachefreep = cachefree;
 3413         if (allocsp != NULL)
 3414                 *allocsp = allocs;
 3415         if (freesp != NULL)
 3416                 *freesp = frees;
 3417         if (sleepsp != NULL)
 3418                 *sleepsp = sleeps;
 3419 }
 3420 #endif /* DDB */
 3421 
 3422 static int
 3423 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
 3424 {
 3425         uma_keg_t kz;
 3426         uma_zone_t z;
 3427         int count;
 3428 
 3429         count = 0;
 3430         rw_rlock(&uma_rwlock);
 3431         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3432                 LIST_FOREACH(z, &kz->uk_zones, uz_link)
 3433                         count++;
 3434         }
 3435         rw_runlock(&uma_rwlock);
 3436         return (sysctl_handle_int(oidp, &count, 0, req));
 3437 }
 3438 
 3439 static int
 3440 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
 3441 {
 3442         struct uma_stream_header ush;
 3443         struct uma_type_header uth;
 3444         struct uma_percpu_stat *ups;
 3445         uma_bucket_t bucket;
 3446         struct sbuf sbuf;
 3447         uma_cache_t cache;
 3448         uma_klink_t kl;
 3449         uma_keg_t kz;
 3450         uma_zone_t z;
 3451         uma_keg_t k;
 3452         int count, error, i;
 3453 
 3454         error = sysctl_wire_old_buffer(req, 0);
 3455         if (error != 0)
 3456                 return (error);
 3457         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
 3458         sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
 3459         ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
 3460 
 3461         count = 0;
 3462         rw_rlock(&uma_rwlock);
 3463         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3464                 LIST_FOREACH(z, &kz->uk_zones, uz_link)
 3465                         count++;
 3466         }
 3467 
 3468         /*
 3469          * Insert stream header.
 3470          */
 3471         bzero(&ush, sizeof(ush));
 3472         ush.ush_version = UMA_STREAM_VERSION;
 3473         ush.ush_maxcpus = (mp_maxid + 1);
 3474         ush.ush_count = count;
 3475         (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
 3476 
 3477         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3478                 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
 3479                         bzero(&uth, sizeof(uth));
 3480                         ZONE_LOCK(z);
 3481                         strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
 3482                         uth.uth_align = kz->uk_align;
 3483                         uth.uth_size = kz->uk_size;
 3484                         uth.uth_rsize = kz->uk_rsize;
 3485                         LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
 3486                                 k = kl->kl_keg;
 3487                                 uth.uth_maxpages += k->uk_maxpages;
 3488                                 uth.uth_pages += k->uk_pages;
 3489                                 uth.uth_keg_free += k->uk_free;
 3490                                 uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
 3491                                     * k->uk_ipers;
 3492                         }
 3493 
 3494                         /*
 3495                          * A zone is secondary is it is not the first entry
 3496                          * on the keg's zone list.
 3497                          */
 3498                         if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
 3499                             (LIST_FIRST(&kz->uk_zones) != z))
 3500                                 uth.uth_zone_flags = UTH_ZONE_SECONDARY;
 3501 
 3502                         LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
 3503                                 uth.uth_zone_free += bucket->ub_cnt;
 3504                         uth.uth_allocs = z->uz_allocs;
 3505                         uth.uth_frees = z->uz_frees;
 3506                         uth.uth_fails = z->uz_fails;
 3507                         uth.uth_sleeps = z->uz_sleeps;
 3508                         /*
 3509                          * While it is not normally safe to access the cache
 3510                          * bucket pointers while not on the CPU that owns the
 3511                          * cache, we only allow the pointers to be exchanged
 3512                          * without the zone lock held, not invalidated, so
 3513                          * accept the possible race associated with bucket
 3514                          * exchange during monitoring.
 3515                          */
 3516                         for (i = 0; i < mp_maxid + 1; i++) {
 3517                                 bzero(&ups[i], sizeof(*ups));
 3518                                 if (kz->uk_flags & UMA_ZFLAG_INTERNAL ||
 3519                                     CPU_ABSENT(i))
 3520                                         continue;
 3521                                 cache = &z->uz_cpu[i];
 3522                                 if (cache->uc_allocbucket != NULL)
 3523                                         ups[i].ups_cache_free +=
 3524                                             cache->uc_allocbucket->ub_cnt;
 3525                                 if (cache->uc_freebucket != NULL)
 3526                                         ups[i].ups_cache_free +=
 3527                                             cache->uc_freebucket->ub_cnt;
 3528                                 ups[i].ups_allocs = cache->uc_allocs;
 3529                                 ups[i].ups_frees = cache->uc_frees;
 3530                         }
 3531                         ZONE_UNLOCK(z);
 3532                         (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
 3533                         for (i = 0; i < mp_maxid + 1; i++)
 3534                                 (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
 3535                 }
 3536         }
 3537         rw_runlock(&uma_rwlock);
 3538         error = sbuf_finish(&sbuf);
 3539         sbuf_delete(&sbuf);
 3540         free(ups, M_TEMP);
 3541         return (error);
 3542 }
 3543 
 3544 int
 3545 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
 3546 {
 3547         uma_zone_t zone = *(uma_zone_t *)arg1;
 3548         int error, max;
 3549 
 3550         max = uma_zone_get_max(zone);
 3551         error = sysctl_handle_int(oidp, &max, 0, req);
 3552         if (error || !req->newptr)
 3553                 return (error);
 3554 
 3555         uma_zone_set_max(zone, max);
 3556 
 3557         return (0);
 3558 }
 3559 
 3560 int
 3561 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
 3562 {
 3563         uma_zone_t zone = *(uma_zone_t *)arg1;
 3564         int cur;
 3565 
 3566         cur = uma_zone_get_cur(zone);
 3567         return (sysctl_handle_int(oidp, &cur, 0, req));
 3568 }
 3569 
 3570 #ifdef INVARIANTS
 3571 static uma_slab_t
 3572 uma_dbg_getslab(uma_zone_t zone, void *item)
 3573 {
 3574         uma_slab_t slab;
 3575         uma_keg_t keg;
 3576         uint8_t *mem;
 3577 
 3578         mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
 3579         if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
 3580                 slab = vtoslab((vm_offset_t)mem);
 3581         } else {
 3582                 /*
 3583                  * It is safe to return the slab here even though the
 3584                  * zone is unlocked because the item's allocation state
 3585                  * essentially holds a reference.
 3586                  */
 3587                 ZONE_LOCK(zone);
 3588                 keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
 3589                 if (keg->uk_flags & UMA_ZONE_HASH)
 3590                         slab = hash_sfind(&keg->uk_hash, mem);
 3591                 else
 3592                         slab = (uma_slab_t)(mem + keg->uk_pgoff);
 3593                 ZONE_UNLOCK(zone);
 3594         }
 3595 
 3596         return (slab);
 3597 }
 3598 
 3599 /*
 3600  * Set up the slab's freei data such that uma_dbg_free can function.
 3601  *
 3602  */
 3603 static void
 3604 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
 3605 {
 3606         uma_keg_t keg;
 3607         int freei;
 3608 
 3609         if (zone_first_keg(zone) == NULL)
 3610                 return;
 3611         if (slab == NULL) {
 3612                 slab = uma_dbg_getslab(zone, item);
 3613                 if (slab == NULL) 
 3614                         panic("uma: item %p did not belong to zone %s\n",
 3615                             item, zone->uz_name);
 3616         }
 3617         keg = slab->us_keg;
 3618         freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
 3619 
 3620         if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
 3621                 panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
 3622                     item, zone, zone->uz_name, slab, freei);
 3623         BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
 3624 
 3625         return;
 3626 }
 3627 
 3628 /*
 3629  * Verifies freed addresses.  Checks for alignment, valid slab membership
 3630  * and duplicate frees.
 3631  *
 3632  */
 3633 static void
 3634 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
 3635 {
 3636         uma_keg_t keg;
 3637         int freei;
 3638 
 3639         if (zone_first_keg(zone) == NULL)
 3640                 return;
 3641         if (slab == NULL) {
 3642                 slab = uma_dbg_getslab(zone, item);
 3643                 if (slab == NULL) 
 3644                         panic("uma: Freed item %p did not belong to zone %s\n",
 3645                             item, zone->uz_name);
 3646         }
 3647         keg = slab->us_keg;
 3648         freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
 3649 
 3650         if (freei >= keg->uk_ipers)
 3651                 panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
 3652                     item, zone, zone->uz_name, slab, freei);
 3653 
 3654         if (((freei * keg->uk_rsize) + slab->us_data) != item) 
 3655                 panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
 3656                     item, zone, zone->uz_name, slab, freei);
 3657 
 3658         if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
 3659                 panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
 3660                     item, zone, zone->uz_name, slab, freei);
 3661 
 3662         BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
 3663 }
 3664 #endif /* INVARIANTS */
 3665 
 3666 #ifdef DDB
 3667 DB_SHOW_COMMAND(uma, db_show_uma)
 3668 {
 3669         uint64_t allocs, frees, sleeps;
 3670         uma_bucket_t bucket;
 3671         uma_keg_t kz;
 3672         uma_zone_t z;
 3673         int cachefree;
 3674 
 3675         db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
 3676             "Free", "Requests", "Sleeps", "Bucket");
 3677         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3678                 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
 3679                         if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
 3680                                 allocs = z->uz_allocs;
 3681                                 frees = z->uz_frees;
 3682                                 sleeps = z->uz_sleeps;
 3683                                 cachefree = 0;
 3684                         } else
 3685                                 uma_zone_sumstat(z, &cachefree, &allocs,
 3686                                     &frees, &sleeps);
 3687                         if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
 3688                             (LIST_FIRST(&kz->uk_zones) != z)))
 3689                                 cachefree += kz->uk_free;
 3690                         LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
 3691                                 cachefree += bucket->ub_cnt;
 3692                         db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
 3693                             z->uz_name, (uintmax_t)kz->uk_size,
 3694                             (intmax_t)(allocs - frees), cachefree,
 3695                             (uintmax_t)allocs, sleeps, z->uz_count);
 3696                         if (db_pager_quit)
 3697                                 return;
 3698                 }
 3699         }
 3700 }
 3701 
 3702 DB_SHOW_COMMAND(umacache, db_show_umacache)
 3703 {
 3704         uint64_t allocs, frees;
 3705         uma_bucket_t bucket;
 3706         uma_zone_t z;
 3707         int cachefree;
 3708 
 3709         db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
 3710             "Requests", "Bucket");
 3711         LIST_FOREACH(z, &uma_cachezones, uz_link) {
 3712                 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
 3713                 LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
 3714                         cachefree += bucket->ub_cnt;
 3715                 db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
 3716                     z->uz_name, (uintmax_t)z->uz_size,
 3717                     (intmax_t)(allocs - frees), cachefree,
 3718                     (uintmax_t)allocs, z->uz_count);
 3719                 if (db_pager_quit)
 3720                         return;
 3721         }
 3722 }
 3723 #endif  /* DDB */

Cache object: c629105776739d1aaf5d10265a332faf


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.