The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/uma_core.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org>
    3  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
    4  * Copyright (c) 2004-2006 Robert N. M. Watson
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice unmodified, this list of conditions, and the following
   12  *    disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 /*
   30  * uma_core.c  Implementation of the Universal Memory allocator
   31  *
   32  * This allocator is intended to replace the multitude of similar object caches
   33  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
   34  * effecient.  A primary design goal is to return unused memory to the rest of
   35  * the system.  This will make the system as a whole more flexible due to the
   36  * ability to move memory to subsystems which most need it instead of leaving
   37  * pools of reserved memory unused.
   38  *
   39  * The basic ideas stem from similar slab/zone based allocators whose algorithms
   40  * are well known.
   41  *
   42  */
   43 
   44 /*
   45  * TODO:
   46  *      - Improve memory usage for large allocations
   47  *      - Investigate cache size adjustments
   48  */
   49 
   50 #include <sys/cdefs.h>
   51 __FBSDID("$FreeBSD: releng/9.0/sys/vm/uma_core.c 222184 2011-05-22 17:46:16Z alc $");
   52 
   53 /* I should really use ktr.. */
   54 /*
   55 #define UMA_DEBUG 1
   56 #define UMA_DEBUG_ALLOC 1
   57 #define UMA_DEBUG_ALLOC_1 1
   58 */
   59 
   60 #include "opt_ddb.h"
   61 #include "opt_param.h"
   62 
   63 #include <sys/param.h>
   64 #include <sys/systm.h>
   65 #include <sys/kernel.h>
   66 #include <sys/types.h>
   67 #include <sys/queue.h>
   68 #include <sys/malloc.h>
   69 #include <sys/ktr.h>
   70 #include <sys/lock.h>
   71 #include <sys/sysctl.h>
   72 #include <sys/mutex.h>
   73 #include <sys/proc.h>
   74 #include <sys/sbuf.h>
   75 #include <sys/smp.h>
   76 #include <sys/vmmeter.h>
   77 
   78 #include <vm/vm.h>
   79 #include <vm/vm_object.h>
   80 #include <vm/vm_page.h>
   81 #include <vm/vm_param.h>
   82 #include <vm/vm_map.h>
   83 #include <vm/vm_kern.h>
   84 #include <vm/vm_extern.h>
   85 #include <vm/uma.h>
   86 #include <vm/uma_int.h>
   87 #include <vm/uma_dbg.h>
   88 
   89 #include <ddb/ddb.h>
   90 
   91 /*
   92  * This is the zone and keg from which all zones are spawned.  The idea is that
   93  * even the zone & keg heads are allocated from the allocator, so we use the
   94  * bss section to bootstrap us.
   95  */
   96 static struct uma_keg masterkeg;
   97 static struct uma_zone masterzone_k;
   98 static struct uma_zone masterzone_z;
   99 static uma_zone_t kegs = &masterzone_k;
  100 static uma_zone_t zones = &masterzone_z;
  101 
  102 /* This is the zone from which all of uma_slab_t's are allocated. */
  103 static uma_zone_t slabzone;
  104 static uma_zone_t slabrefzone;  /* With refcounters (for UMA_ZONE_REFCNT) */
  105 
  106 /*
  107  * The initial hash tables come out of this zone so they can be allocated
  108  * prior to malloc coming up.
  109  */
  110 static uma_zone_t hashzone;
  111 
  112 /* The boot-time adjusted value for cache line alignment. */
  113 int uma_align_cache = 64 - 1;
  114 
  115 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
  116 
  117 /*
  118  * Are we allowed to allocate buckets?
  119  */
  120 static int bucketdisable = 1;
  121 
  122 /* Linked list of all kegs in the system */
  123 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
  124 
  125 /* This mutex protects the keg list */
  126 static struct mtx uma_mtx;
  127 
  128 /* Linked list of boot time pages */
  129 static LIST_HEAD(,uma_slab) uma_boot_pages =
  130     LIST_HEAD_INITIALIZER(uma_boot_pages);
  131 
  132 /* This mutex protects the boot time pages list */
  133 static struct mtx uma_boot_pages_mtx;
  134 
  135 /* Is the VM done starting up? */
  136 static int booted = 0;
  137 #define UMA_STARTUP     1
  138 #define UMA_STARTUP2    2
  139 
  140 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
  141 static u_int uma_max_ipers;
  142 static u_int uma_max_ipers_ref;
  143 
  144 /*
  145  * This is the handle used to schedule events that need to happen
  146  * outside of the allocation fast path.
  147  */
  148 static struct callout uma_callout;
  149 #define UMA_TIMEOUT     20              /* Seconds for callout interval. */
  150 
  151 /*
  152  * This structure is passed as the zone ctor arg so that I don't have to create
  153  * a special allocation function just for zones.
  154  */
  155 struct uma_zctor_args {
  156         char *name;
  157         size_t size;
  158         uma_ctor ctor;
  159         uma_dtor dtor;
  160         uma_init uminit;
  161         uma_fini fini;
  162         uma_keg_t keg;
  163         int align;
  164         u_int32_t flags;
  165 };
  166 
  167 struct uma_kctor_args {
  168         uma_zone_t zone;
  169         size_t size;
  170         uma_init uminit;
  171         uma_fini fini;
  172         int align;
  173         u_int32_t flags;
  174 };
  175 
  176 struct uma_bucket_zone {
  177         uma_zone_t      ubz_zone;
  178         char            *ubz_name;
  179         int             ubz_entries;
  180 };
  181 
  182 #define BUCKET_MAX      128
  183 
  184 struct uma_bucket_zone bucket_zones[] = {
  185         { NULL, "16 Bucket", 16 },
  186         { NULL, "32 Bucket", 32 },
  187         { NULL, "64 Bucket", 64 },
  188         { NULL, "128 Bucket", 128 },
  189         { NULL, NULL, 0}
  190 };
  191 
  192 #define BUCKET_SHIFT    4
  193 #define BUCKET_ZONES    ((BUCKET_MAX >> BUCKET_SHIFT) + 1)
  194 
  195 /*
  196  * bucket_size[] maps requested bucket sizes to zones that allocate a bucket
  197  * of approximately the right size.
  198  */
  199 static uint8_t bucket_size[BUCKET_ZONES];
  200 
  201 /*
  202  * Flags and enumerations to be passed to internal functions.
  203  */
  204 enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
  205 
  206 #define ZFREE_STATFAIL  0x00000001      /* Update zone failure statistic. */
  207 #define ZFREE_STATFREE  0x00000002      /* Update zone free statistic. */
  208 
  209 /* Prototypes.. */
  210 
  211 static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
  212 static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
  213 static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
  214 static void page_free(void *, int, u_int8_t);
  215 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
  216 static void cache_drain(uma_zone_t);
  217 static void bucket_drain(uma_zone_t, uma_bucket_t);
  218 static void bucket_cache_drain(uma_zone_t zone);
  219 static int keg_ctor(void *, int, void *, int);
  220 static void keg_dtor(void *, int, void *);
  221 static int zone_ctor(void *, int, void *, int);
  222 static void zone_dtor(void *, int, void *);
  223 static int zero_init(void *, int, int);
  224 static void keg_small_init(uma_keg_t keg);
  225 static void keg_large_init(uma_keg_t keg);
  226 static void zone_foreach(void (*zfunc)(uma_zone_t));
  227 static void zone_timeout(uma_zone_t zone);
  228 static int hash_alloc(struct uma_hash *);
  229 static int hash_expand(struct uma_hash *, struct uma_hash *);
  230 static void hash_free(struct uma_hash *hash);
  231 static void uma_timeout(void *);
  232 static void uma_startup3(void);
  233 static void *zone_alloc_item(uma_zone_t, void *, int);
  234 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip,
  235     int);
  236 static void bucket_enable(void);
  237 static void bucket_init(void);
  238 static uma_bucket_t bucket_alloc(int, int);
  239 static void bucket_free(uma_bucket_t);
  240 static void bucket_zone_drain(void);
  241 static int zone_alloc_bucket(uma_zone_t zone, int flags);
  242 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
  243 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
  244 static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
  245 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
  246     uma_fini fini, int align, u_int32_t flags);
  247 static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
  248 static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
  249 
  250 void uma_print_zone(uma_zone_t);
  251 void uma_print_stats(void);
  252 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
  253 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
  254 
  255 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
  256 
  257 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
  258     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
  259 
  260 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
  261     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
  262 
  263 /*
  264  * This routine checks to see whether or not it's safe to enable buckets.
  265  */
  266 
  267 static void
  268 bucket_enable(void)
  269 {
  270         if (cnt.v_free_count < cnt.v_free_min)
  271                 bucketdisable = 1;
  272         else
  273                 bucketdisable = 0;
  274 }
  275 
  276 /*
  277  * Initialize bucket_zones, the array of zones of buckets of various sizes.
  278  *
  279  * For each zone, calculate the memory required for each bucket, consisting
  280  * of the header and an array of pointers.  Initialize bucket_size[] to point
  281  * the range of appropriate bucket sizes at the zone.
  282  */
  283 static void
  284 bucket_init(void)
  285 {
  286         struct uma_bucket_zone *ubz;
  287         int i;
  288         int j;
  289 
  290         for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
  291                 int size;
  292 
  293                 ubz = &bucket_zones[j];
  294                 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
  295                 size += sizeof(void *) * ubz->ubz_entries;
  296                 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
  297                     NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
  298                     UMA_ZFLAG_INTERNAL | UMA_ZFLAG_BUCKET);
  299                 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
  300                         bucket_size[i >> BUCKET_SHIFT] = j;
  301         }
  302 }
  303 
  304 /*
  305  * Given a desired number of entries for a bucket, return the zone from which
  306  * to allocate the bucket.
  307  */
  308 static struct uma_bucket_zone *
  309 bucket_zone_lookup(int entries)
  310 {
  311         int idx;
  312 
  313         idx = howmany(entries, 1 << BUCKET_SHIFT);
  314         return (&bucket_zones[bucket_size[idx]]);
  315 }
  316 
  317 static uma_bucket_t
  318 bucket_alloc(int entries, int bflags)
  319 {
  320         struct uma_bucket_zone *ubz;
  321         uma_bucket_t bucket;
  322 
  323         /*
  324          * This is to stop us from allocating per cpu buckets while we're
  325          * running out of vm.boot_pages.  Otherwise, we would exhaust the
  326          * boot pages.  This also prevents us from allocating buckets in
  327          * low memory situations.
  328          */
  329         if (bucketdisable)
  330                 return (NULL);
  331 
  332         ubz = bucket_zone_lookup(entries);
  333         bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags);
  334         if (bucket) {
  335 #ifdef INVARIANTS
  336                 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
  337 #endif
  338                 bucket->ub_cnt = 0;
  339                 bucket->ub_entries = ubz->ubz_entries;
  340         }
  341 
  342         return (bucket);
  343 }
  344 
  345 static void
  346 bucket_free(uma_bucket_t bucket)
  347 {
  348         struct uma_bucket_zone *ubz;
  349 
  350         ubz = bucket_zone_lookup(bucket->ub_entries);
  351         zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
  352             ZFREE_STATFREE);
  353 }
  354 
  355 static void
  356 bucket_zone_drain(void)
  357 {
  358         struct uma_bucket_zone *ubz;
  359 
  360         for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
  361                 zone_drain(ubz->ubz_zone);
  362 }
  363 
  364 static inline uma_keg_t
  365 zone_first_keg(uma_zone_t zone)
  366 {
  367 
  368         return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
  369 }
  370 
  371 static void
  372 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
  373 {
  374         uma_klink_t klink;
  375 
  376         LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
  377                 kegfn(klink->kl_keg);
  378 }
  379 
  380 /*
  381  * Routine called by timeout which is used to fire off some time interval
  382  * based calculations.  (stats, hash size, etc.)
  383  *
  384  * Arguments:
  385  *      arg   Unused
  386  *
  387  * Returns:
  388  *      Nothing
  389  */
  390 static void
  391 uma_timeout(void *unused)
  392 {
  393         bucket_enable();
  394         zone_foreach(zone_timeout);
  395 
  396         /* Reschedule this event */
  397         callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
  398 }
  399 
  400 /*
  401  * Routine to perform timeout driven calculations.  This expands the
  402  * hashes and does per cpu statistics aggregation.
  403  *
  404  *  Returns nothing.
  405  */
  406 static void
  407 keg_timeout(uma_keg_t keg)
  408 {
  409 
  410         KEG_LOCK(keg);
  411         /*
  412          * Expand the keg hash table.
  413          *
  414          * This is done if the number of slabs is larger than the hash size.
  415          * What I'm trying to do here is completely reduce collisions.  This
  416          * may be a little aggressive.  Should I allow for two collisions max?
  417          */
  418         if (keg->uk_flags & UMA_ZONE_HASH &&
  419             keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
  420                 struct uma_hash newhash;
  421                 struct uma_hash oldhash;
  422                 int ret;
  423 
  424                 /*
  425                  * This is so involved because allocating and freeing
  426                  * while the keg lock is held will lead to deadlock.
  427                  * I have to do everything in stages and check for
  428                  * races.
  429                  */
  430                 newhash = keg->uk_hash;
  431                 KEG_UNLOCK(keg);
  432                 ret = hash_alloc(&newhash);
  433                 KEG_LOCK(keg);
  434                 if (ret) {
  435                         if (hash_expand(&keg->uk_hash, &newhash)) {
  436                                 oldhash = keg->uk_hash;
  437                                 keg->uk_hash = newhash;
  438                         } else
  439                                 oldhash = newhash;
  440 
  441                         KEG_UNLOCK(keg);
  442                         hash_free(&oldhash);
  443                         KEG_LOCK(keg);
  444                 }
  445         }
  446         KEG_UNLOCK(keg);
  447 }
  448 
  449 static void
  450 zone_timeout(uma_zone_t zone)
  451 {
  452 
  453         zone_foreach_keg(zone, &keg_timeout);
  454 }
  455 
  456 /*
  457  * Allocate and zero fill the next sized hash table from the appropriate
  458  * backing store.
  459  *
  460  * Arguments:
  461  *      hash  A new hash structure with the old hash size in uh_hashsize
  462  *
  463  * Returns:
  464  *      1 on sucess and 0 on failure.
  465  */
  466 static int
  467 hash_alloc(struct uma_hash *hash)
  468 {
  469         int oldsize;
  470         int alloc;
  471 
  472         oldsize = hash->uh_hashsize;
  473 
  474         /* We're just going to go to a power of two greater */
  475         if (oldsize)  {
  476                 hash->uh_hashsize = oldsize * 2;
  477                 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
  478                 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
  479                     M_UMAHASH, M_NOWAIT);
  480         } else {
  481                 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
  482                 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
  483                     M_WAITOK);
  484                 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
  485         }
  486         if (hash->uh_slab_hash) {
  487                 bzero(hash->uh_slab_hash, alloc);
  488                 hash->uh_hashmask = hash->uh_hashsize - 1;
  489                 return (1);
  490         }
  491 
  492         return (0);
  493 }
  494 
  495 /*
  496  * Expands the hash table for HASH zones.  This is done from zone_timeout
  497  * to reduce collisions.  This must not be done in the regular allocation
  498  * path, otherwise, we can recurse on the vm while allocating pages.
  499  *
  500  * Arguments:
  501  *      oldhash  The hash you want to expand
  502  *      newhash  The hash structure for the new table
  503  *
  504  * Returns:
  505  *      Nothing
  506  *
  507  * Discussion:
  508  */
  509 static int
  510 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
  511 {
  512         uma_slab_t slab;
  513         int hval;
  514         int i;
  515 
  516         if (!newhash->uh_slab_hash)
  517                 return (0);
  518 
  519         if (oldhash->uh_hashsize >= newhash->uh_hashsize)
  520                 return (0);
  521 
  522         /*
  523          * I need to investigate hash algorithms for resizing without a
  524          * full rehash.
  525          */
  526 
  527         for (i = 0; i < oldhash->uh_hashsize; i++)
  528                 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
  529                         slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
  530                         SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
  531                         hval = UMA_HASH(newhash, slab->us_data);
  532                         SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
  533                             slab, us_hlink);
  534                 }
  535 
  536         return (1);
  537 }
  538 
  539 /*
  540  * Free the hash bucket to the appropriate backing store.
  541  *
  542  * Arguments:
  543  *      slab_hash  The hash bucket we're freeing
  544  *      hashsize   The number of entries in that hash bucket
  545  *
  546  * Returns:
  547  *      Nothing
  548  */
  549 static void
  550 hash_free(struct uma_hash *hash)
  551 {
  552         if (hash->uh_slab_hash == NULL)
  553                 return;
  554         if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
  555                 zone_free_item(hashzone,
  556                     hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE);
  557         else
  558                 free(hash->uh_slab_hash, M_UMAHASH);
  559 }
  560 
  561 /*
  562  * Frees all outstanding items in a bucket
  563  *
  564  * Arguments:
  565  *      zone   The zone to free to, must be unlocked.
  566  *      bucket The free/alloc bucket with items, cpu queue must be locked.
  567  *
  568  * Returns:
  569  *      Nothing
  570  */
  571 
  572 static void
  573 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
  574 {
  575         void *item;
  576 
  577         if (bucket == NULL)
  578                 return;
  579 
  580         while (bucket->ub_cnt > 0)  {
  581                 bucket->ub_cnt--;
  582                 item = bucket->ub_bucket[bucket->ub_cnt];
  583 #ifdef INVARIANTS
  584                 bucket->ub_bucket[bucket->ub_cnt] = NULL;
  585                 KASSERT(item != NULL,
  586                     ("bucket_drain: botched ptr, item is NULL"));
  587 #endif
  588                 zone_free_item(zone, item, NULL, SKIP_DTOR, 0);
  589         }
  590 }
  591 
  592 /*
  593  * Drains the per cpu caches for a zone.
  594  *
  595  * NOTE: This may only be called while the zone is being turn down, and not
  596  * during normal operation.  This is necessary in order that we do not have
  597  * to migrate CPUs to drain the per-CPU caches.
  598  *
  599  * Arguments:
  600  *      zone     The zone to drain, must be unlocked.
  601  *
  602  * Returns:
  603  *      Nothing
  604  */
  605 static void
  606 cache_drain(uma_zone_t zone)
  607 {
  608         uma_cache_t cache;
  609         int cpu;
  610 
  611         /*
  612          * XXX: It is safe to not lock the per-CPU caches, because we're
  613          * tearing down the zone anyway.  I.e., there will be no further use
  614          * of the caches at this point.
  615          *
  616          * XXX: It would good to be able to assert that the zone is being
  617          * torn down to prevent improper use of cache_drain().
  618          *
  619          * XXX: We lock the zone before passing into bucket_cache_drain() as
  620          * it is used elsewhere.  Should the tear-down path be made special
  621          * there in some form?
  622          */
  623         CPU_FOREACH(cpu) {
  624                 cache = &zone->uz_cpu[cpu];
  625                 bucket_drain(zone, cache->uc_allocbucket);
  626                 bucket_drain(zone, cache->uc_freebucket);
  627                 if (cache->uc_allocbucket != NULL)
  628                         bucket_free(cache->uc_allocbucket);
  629                 if (cache->uc_freebucket != NULL)
  630                         bucket_free(cache->uc_freebucket);
  631                 cache->uc_allocbucket = cache->uc_freebucket = NULL;
  632         }
  633         ZONE_LOCK(zone);
  634         bucket_cache_drain(zone);
  635         ZONE_UNLOCK(zone);
  636 }
  637 
  638 /*
  639  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
  640  */
  641 static void
  642 bucket_cache_drain(uma_zone_t zone)
  643 {
  644         uma_bucket_t bucket;
  645 
  646         /*
  647          * Drain the bucket queues and free the buckets, we just keep two per
  648          * cpu (alloc/free).
  649          */
  650         while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
  651                 LIST_REMOVE(bucket, ub_link);
  652                 ZONE_UNLOCK(zone);
  653                 bucket_drain(zone, bucket);
  654                 bucket_free(bucket);
  655                 ZONE_LOCK(zone);
  656         }
  657 
  658         /* Now we do the free queue.. */
  659         while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
  660                 LIST_REMOVE(bucket, ub_link);
  661                 bucket_free(bucket);
  662         }
  663 }
  664 
  665 /*
  666  * Frees pages from a keg back to the system.  This is done on demand from
  667  * the pageout daemon.
  668  *
  669  * Returns nothing.
  670  */
  671 static void
  672 keg_drain(uma_keg_t keg)
  673 {
  674         struct slabhead freeslabs = { 0 };
  675         uma_slab_t slab;
  676         uma_slab_t n;
  677         u_int8_t flags;
  678         u_int8_t *mem;
  679         int i;
  680 
  681         /*
  682          * We don't want to take pages from statically allocated kegs at this
  683          * time
  684          */
  685         if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
  686                 return;
  687 
  688 #ifdef UMA_DEBUG
  689         printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
  690 #endif
  691         KEG_LOCK(keg);
  692         if (keg->uk_free == 0)
  693                 goto finished;
  694 
  695         slab = LIST_FIRST(&keg->uk_free_slab);
  696         while (slab) {
  697                 n = LIST_NEXT(slab, us_link);
  698 
  699                 /* We have no where to free these to */
  700                 if (slab->us_flags & UMA_SLAB_BOOT) {
  701                         slab = n;
  702                         continue;
  703                 }
  704 
  705                 LIST_REMOVE(slab, us_link);
  706                 keg->uk_pages -= keg->uk_ppera;
  707                 keg->uk_free -= keg->uk_ipers;
  708 
  709                 if (keg->uk_flags & UMA_ZONE_HASH)
  710                         UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
  711 
  712                 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
  713 
  714                 slab = n;
  715         }
  716 finished:
  717         KEG_UNLOCK(keg);
  718 
  719         while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
  720                 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
  721                 if (keg->uk_fini)
  722                         for (i = 0; i < keg->uk_ipers; i++)
  723                                 keg->uk_fini(
  724                                     slab->us_data + (keg->uk_rsize * i),
  725                                     keg->uk_size);
  726                 flags = slab->us_flags;
  727                 mem = slab->us_data;
  728 
  729                 if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
  730                         vm_object_t obj;
  731 
  732                         if (flags & UMA_SLAB_KMEM)
  733                                 obj = kmem_object;
  734                         else if (flags & UMA_SLAB_KERNEL)
  735                                 obj = kernel_object;
  736                         else
  737                                 obj = NULL;
  738                         for (i = 0; i < keg->uk_ppera; i++)
  739                                 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
  740                                     obj);
  741                 }
  742                 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
  743                         zone_free_item(keg->uk_slabzone, slab, NULL,
  744                             SKIP_NONE, ZFREE_STATFREE);
  745 #ifdef UMA_DEBUG
  746                 printf("%s: Returning %d bytes.\n",
  747                     keg->uk_name, UMA_SLAB_SIZE * keg->uk_ppera);
  748 #endif
  749                 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
  750         }
  751 }
  752 
  753 static void
  754 zone_drain_wait(uma_zone_t zone, int waitok)
  755 {
  756 
  757         /*
  758          * Set draining to interlock with zone_dtor() so we can release our
  759          * locks as we go.  Only dtor() should do a WAITOK call since it
  760          * is the only call that knows the structure will still be available
  761          * when it wakes up.
  762          */
  763         ZONE_LOCK(zone);
  764         while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
  765                 if (waitok == M_NOWAIT)
  766                         goto out;
  767                 mtx_unlock(&uma_mtx);
  768                 msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
  769                 mtx_lock(&uma_mtx);
  770         }
  771         zone->uz_flags |= UMA_ZFLAG_DRAINING;
  772         bucket_cache_drain(zone);
  773         ZONE_UNLOCK(zone);
  774         /*
  775          * The DRAINING flag protects us from being freed while
  776          * we're running.  Normally the uma_mtx would protect us but we
  777          * must be able to release and acquire the right lock for each keg.
  778          */
  779         zone_foreach_keg(zone, &keg_drain);
  780         ZONE_LOCK(zone);
  781         zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
  782         wakeup(zone);
  783 out:
  784         ZONE_UNLOCK(zone);
  785 }
  786 
  787 void
  788 zone_drain(uma_zone_t zone)
  789 {
  790 
  791         zone_drain_wait(zone, M_NOWAIT);
  792 }
  793 
  794 /*
  795  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
  796  *
  797  * Arguments:
  798  *      wait  Shall we wait?
  799  *
  800  * Returns:
  801  *      The slab that was allocated or NULL if there is no memory and the
  802  *      caller specified M_NOWAIT.
  803  */
  804 static uma_slab_t
  805 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
  806 {
  807         uma_slabrefcnt_t slabref;
  808         uma_alloc allocf;
  809         uma_slab_t slab;
  810         u_int8_t *mem;
  811         u_int8_t flags;
  812         int i;
  813 
  814         mtx_assert(&keg->uk_lock, MA_OWNED);
  815         slab = NULL;
  816 
  817 #ifdef UMA_DEBUG
  818         printf("slab_zalloc:  Allocating a new slab for %s\n", keg->uk_name);
  819 #endif
  820         allocf = keg->uk_allocf;
  821         KEG_UNLOCK(keg);
  822 
  823         if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
  824                 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
  825                 if (slab == NULL) {
  826                         KEG_LOCK(keg);
  827                         return NULL;
  828                 }
  829         }
  830 
  831         /*
  832          * This reproduces the old vm_zone behavior of zero filling pages the
  833          * first time they are added to a zone.
  834          *
  835          * Malloced items are zeroed in uma_zalloc.
  836          */
  837 
  838         if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
  839                 wait |= M_ZERO;
  840         else
  841                 wait &= ~M_ZERO;
  842 
  843         /* zone is passed for legacy reasons. */
  844         mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
  845         if (mem == NULL) {
  846                 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
  847                         zone_free_item(keg->uk_slabzone, slab, NULL,
  848                             SKIP_NONE, ZFREE_STATFREE);
  849                 KEG_LOCK(keg);
  850                 return (NULL);
  851         }
  852 
  853         /* Point the slab into the allocated memory */
  854         if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
  855                 slab = (uma_slab_t )(mem + keg->uk_pgoff);
  856 
  857         if (keg->uk_flags & UMA_ZONE_VTOSLAB)
  858                 for (i = 0; i < keg->uk_ppera; i++)
  859                         vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
  860 
  861         slab->us_keg = keg;
  862         slab->us_data = mem;
  863         slab->us_freecount = keg->uk_ipers;
  864         slab->us_firstfree = 0;
  865         slab->us_flags = flags;
  866 
  867         if (keg->uk_flags & UMA_ZONE_REFCNT) {
  868                 slabref = (uma_slabrefcnt_t)slab;
  869                 for (i = 0; i < keg->uk_ipers; i++) {
  870                         slabref->us_freelist[i].us_refcnt = 0;
  871                         slabref->us_freelist[i].us_item = i+1;
  872                 }
  873         } else {
  874                 for (i = 0; i < keg->uk_ipers; i++)
  875                         slab->us_freelist[i].us_item = i+1;
  876         }
  877 
  878         if (keg->uk_init != NULL) {
  879                 for (i = 0; i < keg->uk_ipers; i++)
  880                         if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
  881                             keg->uk_size, wait) != 0)
  882                                 break;
  883                 if (i != keg->uk_ipers) {
  884                         if (keg->uk_fini != NULL) {
  885                                 for (i--; i > -1; i--)
  886                                         keg->uk_fini(slab->us_data +
  887                                             (keg->uk_rsize * i),
  888                                             keg->uk_size);
  889                         }
  890                         if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
  891                                 vm_object_t obj;
  892 
  893                                 if (flags & UMA_SLAB_KMEM)
  894                                         obj = kmem_object;
  895                                 else if (flags & UMA_SLAB_KERNEL)
  896                                         obj = kernel_object;
  897                                 else
  898                                         obj = NULL;
  899                                 for (i = 0; i < keg->uk_ppera; i++)
  900                                         vsetobj((vm_offset_t)mem +
  901                                             (i * PAGE_SIZE), obj);
  902                         }
  903                         if (keg->uk_flags & UMA_ZONE_OFFPAGE)
  904                                 zone_free_item(keg->uk_slabzone, slab,
  905                                     NULL, SKIP_NONE, ZFREE_STATFREE);
  906                         keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
  907                             flags);
  908                         KEG_LOCK(keg);
  909                         return (NULL);
  910                 }
  911         }
  912         KEG_LOCK(keg);
  913 
  914         if (keg->uk_flags & UMA_ZONE_HASH)
  915                 UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
  916 
  917         keg->uk_pages += keg->uk_ppera;
  918         keg->uk_free += keg->uk_ipers;
  919 
  920         return (slab);
  921 }
  922 
  923 /*
  924  * This function is intended to be used early on in place of page_alloc() so
  925  * that we may use the boot time page cache to satisfy allocations before
  926  * the VM is ready.
  927  */
  928 static void *
  929 startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
  930 {
  931         uma_keg_t keg;
  932         uma_slab_t tmps;
  933         int pages, check_pages;
  934 
  935         keg = zone_first_keg(zone);
  936         pages = howmany(bytes, PAGE_SIZE);
  937         check_pages = pages - 1;
  938         KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
  939 
  940         /*
  941          * Check our small startup cache to see if it has pages remaining.
  942          */
  943         mtx_lock(&uma_boot_pages_mtx);
  944 
  945         /* First check if we have enough room. */
  946         tmps = LIST_FIRST(&uma_boot_pages);
  947         while (tmps != NULL && check_pages-- > 0)
  948                 tmps = LIST_NEXT(tmps, us_link);
  949         if (tmps != NULL) {
  950                 /*
  951                  * It's ok to lose tmps references.  The last one will
  952                  * have tmps->us_data pointing to the start address of
  953                  * "pages" contiguous pages of memory.
  954                  */
  955                 while (pages-- > 0) {
  956                         tmps = LIST_FIRST(&uma_boot_pages);
  957                         LIST_REMOVE(tmps, us_link);
  958                 }
  959                 mtx_unlock(&uma_boot_pages_mtx);
  960                 *pflag = tmps->us_flags;
  961                 return (tmps->us_data);
  962         }
  963         mtx_unlock(&uma_boot_pages_mtx);
  964         if (booted < UMA_STARTUP2)
  965                 panic("UMA: Increase vm.boot_pages");
  966         /*
  967          * Now that we've booted reset these users to their real allocator.
  968          */
  969 #ifdef UMA_MD_SMALL_ALLOC
  970         keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
  971 #else
  972         keg->uk_allocf = page_alloc;
  973 #endif
  974         return keg->uk_allocf(zone, bytes, pflag, wait);
  975 }
  976 
  977 /*
  978  * Allocates a number of pages from the system
  979  *
  980  * Arguments:
  981  *      bytes  The number of bytes requested
  982  *      wait  Shall we wait?
  983  *
  984  * Returns:
  985  *      A pointer to the alloced memory or possibly
  986  *      NULL if M_NOWAIT is set.
  987  */
  988 static void *
  989 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
  990 {
  991         void *p;        /* Returned page */
  992 
  993         *pflag = UMA_SLAB_KMEM;
  994         p = (void *) kmem_malloc(kmem_map, bytes, wait);
  995 
  996         return (p);
  997 }
  998 
  999 /*
 1000  * Allocates a number of pages from within an object
 1001  *
 1002  * Arguments:
 1003  *      bytes  The number of bytes requested
 1004  *      wait   Shall we wait?
 1005  *
 1006  * Returns:
 1007  *      A pointer to the alloced memory or possibly
 1008  *      NULL if M_NOWAIT is set.
 1009  */
 1010 static void *
 1011 obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
 1012 {
 1013         vm_object_t object;
 1014         vm_offset_t retkva, zkva;
 1015         vm_page_t p;
 1016         int pages, startpages;
 1017         uma_keg_t keg;
 1018 
 1019         keg = zone_first_keg(zone);
 1020         object = keg->uk_obj;
 1021         retkva = 0;
 1022 
 1023         /*
 1024          * This looks a little weird since we're getting one page at a time.
 1025          */
 1026         VM_OBJECT_LOCK(object);
 1027         p = TAILQ_LAST(&object->memq, pglist);
 1028         pages = p != NULL ? p->pindex + 1 : 0;
 1029         startpages = pages;
 1030         zkva = keg->uk_kva + pages * PAGE_SIZE;
 1031         for (; bytes > 0; bytes -= PAGE_SIZE) {
 1032                 p = vm_page_alloc(object, pages,
 1033                     VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
 1034                 if (p == NULL) {
 1035                         if (pages != startpages)
 1036                                 pmap_qremove(retkva, pages - startpages);
 1037                         while (pages != startpages) {
 1038                                 pages--;
 1039                                 p = TAILQ_LAST(&object->memq, pglist);
 1040                                 vm_page_unwire(p, 0);
 1041                                 vm_page_free(p);
 1042                         }
 1043                         retkva = 0;
 1044                         goto done;
 1045                 }
 1046                 pmap_qenter(zkva, &p, 1);
 1047                 if (retkva == 0)
 1048                         retkva = zkva;
 1049                 zkva += PAGE_SIZE;
 1050                 pages += 1;
 1051         }
 1052 done:
 1053         VM_OBJECT_UNLOCK(object);
 1054         *flags = UMA_SLAB_PRIV;
 1055 
 1056         return ((void *)retkva);
 1057 }
 1058 
 1059 /*
 1060  * Frees a number of pages to the system
 1061  *
 1062  * Arguments:
 1063  *      mem   A pointer to the memory to be freed
 1064  *      size  The size of the memory being freed
 1065  *      flags The original p->us_flags field
 1066  *
 1067  * Returns:
 1068  *      Nothing
 1069  */
 1070 static void
 1071 page_free(void *mem, int size, u_int8_t flags)
 1072 {
 1073         vm_map_t map;
 1074 
 1075         if (flags & UMA_SLAB_KMEM)
 1076                 map = kmem_map;
 1077         else if (flags & UMA_SLAB_KERNEL)
 1078                 map = kernel_map;
 1079         else
 1080                 panic("UMA: page_free used with invalid flags %d", flags);
 1081 
 1082         kmem_free(map, (vm_offset_t)mem, size);
 1083 }
 1084 
 1085 /*
 1086  * Zero fill initializer
 1087  *
 1088  * Arguments/Returns follow uma_init specifications
 1089  */
 1090 static int
 1091 zero_init(void *mem, int size, int flags)
 1092 {
 1093         bzero(mem, size);
 1094         return (0);
 1095 }
 1096 
 1097 /*
 1098  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
 1099  *
 1100  * Arguments
 1101  *      keg  The zone we should initialize
 1102  *
 1103  * Returns
 1104  *      Nothing
 1105  */
 1106 static void
 1107 keg_small_init(uma_keg_t keg)
 1108 {
 1109         u_int rsize;
 1110         u_int memused;
 1111         u_int wastedspace;
 1112         u_int shsize;
 1113 
 1114         KASSERT(keg != NULL, ("Keg is null in keg_small_init"));
 1115         rsize = keg->uk_size;
 1116 
 1117         if (rsize < UMA_SMALLEST_UNIT)
 1118                 rsize = UMA_SMALLEST_UNIT;
 1119         if (rsize & keg->uk_align)
 1120                 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
 1121 
 1122         keg->uk_rsize = rsize;
 1123         keg->uk_ppera = 1;
 1124 
 1125         if (keg->uk_flags & UMA_ZONE_REFCNT) {
 1126                 rsize += UMA_FRITMREF_SZ;       /* linkage & refcnt */
 1127                 shsize = sizeof(struct uma_slab_refcnt);
 1128         } else {
 1129                 rsize += UMA_FRITM_SZ;  /* Account for linkage */
 1130                 shsize = sizeof(struct uma_slab);
 1131         }
 1132 
 1133         keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
 1134         KASSERT(keg->uk_ipers != 0, ("keg_small_init: ipers is 0"));
 1135         memused = keg->uk_ipers * rsize + shsize;
 1136         wastedspace = UMA_SLAB_SIZE - memused;
 1137 
 1138         /*
 1139          * We can't do OFFPAGE if we're internal or if we've been
 1140          * asked to not go to the VM for buckets.  If we do this we
 1141          * may end up going to the VM (kmem_map) for slabs which we
 1142          * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
 1143          * result of UMA_ZONE_VM, which clearly forbids it.
 1144          */
 1145         if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
 1146             (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
 1147                 return;
 1148 
 1149         if ((wastedspace >= UMA_MAX_WASTE) &&
 1150             (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
 1151                 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
 1152                 KASSERT(keg->uk_ipers <= 255,
 1153                     ("keg_small_init: keg->uk_ipers too high!"));
 1154 #ifdef UMA_DEBUG
 1155                 printf("UMA decided we need offpage slab headers for "
 1156                     "keg: %s, calculated wastedspace = %d, "
 1157                     "maximum wasted space allowed = %d, "
 1158                     "calculated ipers = %d, "
 1159                     "new wasted space = %d\n", keg->uk_name, wastedspace,
 1160                     UMA_MAX_WASTE, keg->uk_ipers,
 1161                     UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
 1162 #endif
 1163                 keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1164                 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
 1165                         keg->uk_flags |= UMA_ZONE_HASH;
 1166         }
 1167 }
 1168 
 1169 /*
 1170  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
 1171  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
 1172  * more complicated.
 1173  *
 1174  * Arguments
 1175  *      keg  The keg we should initialize
 1176  *
 1177  * Returns
 1178  *      Nothing
 1179  */
 1180 static void
 1181 keg_large_init(uma_keg_t keg)
 1182 {
 1183         int pages;
 1184 
 1185         KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
 1186         KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
 1187             ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
 1188 
 1189         pages = keg->uk_size / UMA_SLAB_SIZE;
 1190 
 1191         /* Account for remainder */
 1192         if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
 1193                 pages++;
 1194 
 1195         keg->uk_ppera = pages;
 1196         keg->uk_ipers = 1;
 1197         keg->uk_rsize = keg->uk_size;
 1198 
 1199         /* We can't do OFFPAGE if we're internal, bail out here. */
 1200         if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
 1201                 return;
 1202 
 1203         keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1204         if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
 1205                 keg->uk_flags |= UMA_ZONE_HASH;
 1206 }
 1207 
 1208 static void
 1209 keg_cachespread_init(uma_keg_t keg)
 1210 {
 1211         int alignsize;
 1212         int trailer;
 1213         int pages;
 1214         int rsize;
 1215 
 1216         alignsize = keg->uk_align + 1;
 1217         rsize = keg->uk_size;
 1218         /*
 1219          * We want one item to start on every align boundary in a page.  To
 1220          * do this we will span pages.  We will also extend the item by the
 1221          * size of align if it is an even multiple of align.  Otherwise, it
 1222          * would fall on the same boundary every time.
 1223          */
 1224         if (rsize & keg->uk_align)
 1225                 rsize = (rsize & ~keg->uk_align) + alignsize;
 1226         if ((rsize & alignsize) == 0)
 1227                 rsize += alignsize;
 1228         trailer = rsize - keg->uk_size;
 1229         pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
 1230         pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
 1231         keg->uk_rsize = rsize;
 1232         keg->uk_ppera = pages;
 1233         keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
 1234         keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
 1235         KASSERT(keg->uk_ipers <= uma_max_ipers,
 1236             ("keg_small_init: keg->uk_ipers too high(%d) increase max_ipers",
 1237             keg->uk_ipers));
 1238 }
 1239 
 1240 /*
 1241  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
 1242  * the keg onto the global keg list.
 1243  *
 1244  * Arguments/Returns follow uma_ctor specifications
 1245  *      udata  Actually uma_kctor_args
 1246  */
 1247 static int
 1248 keg_ctor(void *mem, int size, void *udata, int flags)
 1249 {
 1250         struct uma_kctor_args *arg = udata;
 1251         uma_keg_t keg = mem;
 1252         uma_zone_t zone;
 1253 
 1254         bzero(keg, size);
 1255         keg->uk_size = arg->size;
 1256         keg->uk_init = arg->uminit;
 1257         keg->uk_fini = arg->fini;
 1258         keg->uk_align = arg->align;
 1259         keg->uk_free = 0;
 1260         keg->uk_pages = 0;
 1261         keg->uk_flags = arg->flags;
 1262         keg->uk_allocf = page_alloc;
 1263         keg->uk_freef = page_free;
 1264         keg->uk_recurse = 0;
 1265         keg->uk_slabzone = NULL;
 1266 
 1267         /*
 1268          * The master zone is passed to us at keg-creation time.
 1269          */
 1270         zone = arg->zone;
 1271         keg->uk_name = zone->uz_name;
 1272 
 1273         if (arg->flags & UMA_ZONE_VM)
 1274                 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
 1275 
 1276         if (arg->flags & UMA_ZONE_ZINIT)
 1277                 keg->uk_init = zero_init;
 1278 
 1279         if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
 1280                 keg->uk_flags |= UMA_ZONE_VTOSLAB;
 1281 
 1282         /*
 1283          * The +UMA_FRITM_SZ added to uk_size is to account for the
 1284          * linkage that is added to the size in keg_small_init().  If
 1285          * we don't account for this here then we may end up in
 1286          * keg_small_init() with a calculated 'ipers' of 0.
 1287          */
 1288         if (keg->uk_flags & UMA_ZONE_REFCNT) {
 1289                 if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
 1290                         keg_cachespread_init(keg);
 1291                 else if ((keg->uk_size+UMA_FRITMREF_SZ) >
 1292                     (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
 1293                         keg_large_init(keg);
 1294                 else
 1295                         keg_small_init(keg);
 1296         } else {
 1297                 if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
 1298                         keg_cachespread_init(keg);
 1299                 else if ((keg->uk_size+UMA_FRITM_SZ) >
 1300                     (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
 1301                         keg_large_init(keg);
 1302                 else
 1303                         keg_small_init(keg);
 1304         }
 1305 
 1306         if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
 1307                 if (keg->uk_flags & UMA_ZONE_REFCNT)
 1308                         keg->uk_slabzone = slabrefzone;
 1309                 else
 1310                         keg->uk_slabzone = slabzone;
 1311         }
 1312 
 1313         /*
 1314          * If we haven't booted yet we need allocations to go through the
 1315          * startup cache until the vm is ready.
 1316          */
 1317         if (keg->uk_ppera == 1) {
 1318 #ifdef UMA_MD_SMALL_ALLOC
 1319                 keg->uk_allocf = uma_small_alloc;
 1320                 keg->uk_freef = uma_small_free;
 1321 
 1322                 if (booted < UMA_STARTUP)
 1323                         keg->uk_allocf = startup_alloc;
 1324 #else
 1325                 if (booted < UMA_STARTUP2)
 1326                         keg->uk_allocf = startup_alloc;
 1327 #endif
 1328         } else if (booted < UMA_STARTUP2 &&
 1329             (keg->uk_flags & UMA_ZFLAG_INTERNAL))
 1330                 keg->uk_allocf = startup_alloc;
 1331 
 1332         /*
 1333          * Initialize keg's lock (shared among zones).
 1334          */
 1335         if (arg->flags & UMA_ZONE_MTXCLASS)
 1336                 KEG_LOCK_INIT(keg, 1);
 1337         else
 1338                 KEG_LOCK_INIT(keg, 0);
 1339 
 1340         /*
 1341          * If we're putting the slab header in the actual page we need to
 1342          * figure out where in each page it goes.  This calculates a right
 1343          * justified offset into the memory on an ALIGN_PTR boundary.
 1344          */
 1345         if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
 1346                 u_int totsize;
 1347 
 1348                 /* Size of the slab struct and free list */
 1349                 if (keg->uk_flags & UMA_ZONE_REFCNT)
 1350                         totsize = sizeof(struct uma_slab_refcnt) +
 1351                             keg->uk_ipers * UMA_FRITMREF_SZ;
 1352                 else
 1353                         totsize = sizeof(struct uma_slab) +
 1354                             keg->uk_ipers * UMA_FRITM_SZ;
 1355 
 1356                 if (totsize & UMA_ALIGN_PTR)
 1357                         totsize = (totsize & ~UMA_ALIGN_PTR) +
 1358                             (UMA_ALIGN_PTR + 1);
 1359                 keg->uk_pgoff = (UMA_SLAB_SIZE * keg->uk_ppera) - totsize;
 1360 
 1361                 if (keg->uk_flags & UMA_ZONE_REFCNT)
 1362                         totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
 1363                             + keg->uk_ipers * UMA_FRITMREF_SZ;
 1364                 else
 1365                         totsize = keg->uk_pgoff + sizeof(struct uma_slab)
 1366                             + keg->uk_ipers * UMA_FRITM_SZ;
 1367 
 1368                 /*
 1369                  * The only way the following is possible is if with our
 1370                  * UMA_ALIGN_PTR adjustments we are now bigger than
 1371                  * UMA_SLAB_SIZE.  I haven't checked whether this is
 1372                  * mathematically possible for all cases, so we make
 1373                  * sure here anyway.
 1374                  */
 1375                 if (totsize > UMA_SLAB_SIZE * keg->uk_ppera) {
 1376                         printf("zone %s ipers %d rsize %d size %d\n",
 1377                             zone->uz_name, keg->uk_ipers, keg->uk_rsize,
 1378                             keg->uk_size);
 1379                         panic("UMA slab won't fit.");
 1380                 }
 1381         }
 1382 
 1383         if (keg->uk_flags & UMA_ZONE_HASH)
 1384                 hash_alloc(&keg->uk_hash);
 1385 
 1386 #ifdef UMA_DEBUG
 1387         printf("UMA: %s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
 1388             zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
 1389             keg->uk_ipers, keg->uk_ppera,
 1390             (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
 1391 #endif
 1392 
 1393         LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
 1394 
 1395         mtx_lock(&uma_mtx);
 1396         LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
 1397         mtx_unlock(&uma_mtx);
 1398         return (0);
 1399 }
 1400 
 1401 /*
 1402  * Zone header ctor.  This initializes all fields, locks, etc.
 1403  *
 1404  * Arguments/Returns follow uma_ctor specifications
 1405  *      udata  Actually uma_zctor_args
 1406  */
 1407 static int
 1408 zone_ctor(void *mem, int size, void *udata, int flags)
 1409 {
 1410         struct uma_zctor_args *arg = udata;
 1411         uma_zone_t zone = mem;
 1412         uma_zone_t z;
 1413         uma_keg_t keg;
 1414 
 1415         bzero(zone, size);
 1416         zone->uz_name = arg->name;
 1417         zone->uz_ctor = arg->ctor;
 1418         zone->uz_dtor = arg->dtor;
 1419         zone->uz_slab = zone_fetch_slab;
 1420         zone->uz_init = NULL;
 1421         zone->uz_fini = NULL;
 1422         zone->uz_allocs = 0;
 1423         zone->uz_frees = 0;
 1424         zone->uz_fails = 0;
 1425         zone->uz_sleeps = 0;
 1426         zone->uz_fills = zone->uz_count = 0;
 1427         zone->uz_flags = 0;
 1428         keg = arg->keg;
 1429 
 1430         if (arg->flags & UMA_ZONE_SECONDARY) {
 1431                 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
 1432                 zone->uz_init = arg->uminit;
 1433                 zone->uz_fini = arg->fini;
 1434                 zone->uz_lock = &keg->uk_lock;
 1435                 zone->uz_flags |= UMA_ZONE_SECONDARY;
 1436                 mtx_lock(&uma_mtx);
 1437                 ZONE_LOCK(zone);
 1438                 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
 1439                         if (LIST_NEXT(z, uz_link) == NULL) {
 1440                                 LIST_INSERT_AFTER(z, zone, uz_link);
 1441                                 break;
 1442                         }
 1443                 }
 1444                 ZONE_UNLOCK(zone);
 1445                 mtx_unlock(&uma_mtx);
 1446         } else if (keg == NULL) {
 1447                 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
 1448                     arg->align, arg->flags)) == NULL)
 1449                         return (ENOMEM);
 1450         } else {
 1451                 struct uma_kctor_args karg;
 1452                 int error;
 1453 
 1454                 /* We should only be here from uma_startup() */
 1455                 karg.size = arg->size;
 1456                 karg.uminit = arg->uminit;
 1457                 karg.fini = arg->fini;
 1458                 karg.align = arg->align;
 1459                 karg.flags = arg->flags;
 1460                 karg.zone = zone;
 1461                 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
 1462                     flags);
 1463                 if (error)
 1464                         return (error);
 1465         }
 1466         /*
 1467          * Link in the first keg.
 1468          */
 1469         zone->uz_klink.kl_keg = keg;
 1470         LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
 1471         zone->uz_lock = &keg->uk_lock;
 1472         zone->uz_size = keg->uk_size;
 1473         zone->uz_flags |= (keg->uk_flags &
 1474             (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
 1475 
 1476         /*
 1477          * Some internal zones don't have room allocated for the per cpu
 1478          * caches.  If we're internal, bail out here.
 1479          */
 1480         if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
 1481                 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
 1482                     ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
 1483                 return (0);
 1484         }
 1485 
 1486         if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
 1487                 zone->uz_count = BUCKET_MAX;
 1488         else if (keg->uk_ipers <= BUCKET_MAX)
 1489                 zone->uz_count = keg->uk_ipers;
 1490         else
 1491                 zone->uz_count = BUCKET_MAX;
 1492         return (0);
 1493 }
 1494 
 1495 /*
 1496  * Keg header dtor.  This frees all data, destroys locks, frees the hash
 1497  * table and removes the keg from the global list.
 1498  *
 1499  * Arguments/Returns follow uma_dtor specifications
 1500  *      udata  unused
 1501  */
 1502 static void
 1503 keg_dtor(void *arg, int size, void *udata)
 1504 {
 1505         uma_keg_t keg;
 1506 
 1507         keg = (uma_keg_t)arg;
 1508         KEG_LOCK(keg);
 1509         if (keg->uk_free != 0) {
 1510                 printf("Freed UMA keg was not empty (%d items). "
 1511                     " Lost %d pages of memory.\n",
 1512                     keg->uk_free, keg->uk_pages);
 1513         }
 1514         KEG_UNLOCK(keg);
 1515 
 1516         hash_free(&keg->uk_hash);
 1517 
 1518         KEG_LOCK_FINI(keg);
 1519 }
 1520 
 1521 /*
 1522  * Zone header dtor.
 1523  *
 1524  * Arguments/Returns follow uma_dtor specifications
 1525  *      udata  unused
 1526  */
 1527 static void
 1528 zone_dtor(void *arg, int size, void *udata)
 1529 {
 1530         uma_klink_t klink;
 1531         uma_zone_t zone;
 1532         uma_keg_t keg;
 1533 
 1534         zone = (uma_zone_t)arg;
 1535         keg = zone_first_keg(zone);
 1536 
 1537         if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
 1538                 cache_drain(zone);
 1539 
 1540         mtx_lock(&uma_mtx);
 1541         LIST_REMOVE(zone, uz_link);
 1542         mtx_unlock(&uma_mtx);
 1543         /*
 1544          * XXX there are some races here where
 1545          * the zone can be drained but zone lock
 1546          * released and then refilled before we
 1547          * remove it... we dont care for now
 1548          */
 1549         zone_drain_wait(zone, M_WAITOK);
 1550         /*
 1551          * Unlink all of our kegs.
 1552          */
 1553         while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
 1554                 klink->kl_keg = NULL;
 1555                 LIST_REMOVE(klink, kl_link);
 1556                 if (klink == &zone->uz_klink)
 1557                         continue;
 1558                 free(klink, M_TEMP);
 1559         }
 1560         /*
 1561          * We only destroy kegs from non secondary zones.
 1562          */
 1563         if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
 1564                 mtx_lock(&uma_mtx);
 1565                 LIST_REMOVE(keg, uk_link);
 1566                 mtx_unlock(&uma_mtx);
 1567                 zone_free_item(kegs, keg, NULL, SKIP_NONE,
 1568                     ZFREE_STATFREE);
 1569         }
 1570 }
 1571 
 1572 /*
 1573  * Traverses every zone in the system and calls a callback
 1574  *
 1575  * Arguments:
 1576  *      zfunc  A pointer to a function which accepts a zone
 1577  *              as an argument.
 1578  *
 1579  * Returns:
 1580  *      Nothing
 1581  */
 1582 static void
 1583 zone_foreach(void (*zfunc)(uma_zone_t))
 1584 {
 1585         uma_keg_t keg;
 1586         uma_zone_t zone;
 1587 
 1588         mtx_lock(&uma_mtx);
 1589         LIST_FOREACH(keg, &uma_kegs, uk_link) {
 1590                 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
 1591                         zfunc(zone);
 1592         }
 1593         mtx_unlock(&uma_mtx);
 1594 }
 1595 
 1596 /* Public functions */
 1597 /* See uma.h */
 1598 void
 1599 uma_startup(void *bootmem, int boot_pages)
 1600 {
 1601         struct uma_zctor_args args;
 1602         uma_slab_t slab;
 1603         u_int slabsize;
 1604         u_int objsize, totsize, wsize;
 1605         int i;
 1606 
 1607 #ifdef UMA_DEBUG
 1608         printf("Creating uma keg headers zone and keg.\n");
 1609 #endif
 1610         mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
 1611 
 1612         /*
 1613          * Figure out the maximum number of items-per-slab we'll have if
 1614          * we're using the OFFPAGE slab header to track free items, given
 1615          * all possible object sizes and the maximum desired wastage
 1616          * (UMA_MAX_WASTE).
 1617          *
 1618          * We iterate until we find an object size for
 1619          * which the calculated wastage in keg_small_init() will be
 1620          * enough to warrant OFFPAGE.  Since wastedspace versus objsize
 1621          * is an overall increasing see-saw function, we find the smallest
 1622          * objsize such that the wastage is always acceptable for objects
 1623          * with that objsize or smaller.  Since a smaller objsize always
 1624          * generates a larger possible uma_max_ipers, we use this computed
 1625          * objsize to calculate the largest ipers possible.  Since the
 1626          * ipers calculated for OFFPAGE slab headers is always larger than
 1627          * the ipers initially calculated in keg_small_init(), we use
 1628          * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
 1629          * obtain the maximum ipers possible for offpage slab headers.
 1630          *
 1631          * It should be noted that ipers versus objsize is an inversly
 1632          * proportional function which drops off rather quickly so as
 1633          * long as our UMA_MAX_WASTE is such that the objsize we calculate
 1634          * falls into the portion of the inverse relation AFTER the steep
 1635          * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
 1636          *
 1637          * Note that we have 8-bits (1 byte) to use as a freelist index
 1638          * inside the actual slab header itself and this is enough to
 1639          * accomodate us.  In the worst case, a UMA_SMALLEST_UNIT sized
 1640          * object with offpage slab header would have ipers =
 1641          * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
 1642          * 1 greater than what our byte-integer freelist index can
 1643          * accomodate, but we know that this situation never occurs as
 1644          * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
 1645          * that we need to go to offpage slab headers.  Or, if we do,
 1646          * then we trap that condition below and panic in the INVARIANTS case.
 1647          */
 1648         wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
 1649         totsize = wsize;
 1650         objsize = UMA_SMALLEST_UNIT;
 1651         while (totsize >= wsize) {
 1652                 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
 1653                     (objsize + UMA_FRITM_SZ);
 1654                 totsize *= (UMA_FRITM_SZ + objsize);
 1655                 objsize++;
 1656         }
 1657         if (objsize > UMA_SMALLEST_UNIT)
 1658                 objsize--;
 1659         uma_max_ipers = MAX(UMA_SLAB_SIZE / objsize, 64);
 1660 
 1661         wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
 1662         totsize = wsize;
 1663         objsize = UMA_SMALLEST_UNIT;
 1664         while (totsize >= wsize) {
 1665                 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
 1666                     (objsize + UMA_FRITMREF_SZ);
 1667                 totsize *= (UMA_FRITMREF_SZ + objsize);
 1668                 objsize++;
 1669         }
 1670         if (objsize > UMA_SMALLEST_UNIT)
 1671                 objsize--;
 1672         uma_max_ipers_ref = MAX(UMA_SLAB_SIZE / objsize, 64);
 1673 
 1674         KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
 1675             ("uma_startup: calculated uma_max_ipers values too large!"));
 1676 
 1677 #ifdef UMA_DEBUG
 1678         printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
 1679         printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
 1680             uma_max_ipers_ref);
 1681 #endif
 1682 
 1683         /* "manually" create the initial zone */
 1684         args.name = "UMA Kegs";
 1685         args.size = sizeof(struct uma_keg);
 1686         args.ctor = keg_ctor;
 1687         args.dtor = keg_dtor;
 1688         args.uminit = zero_init;
 1689         args.fini = NULL;
 1690         args.keg = &masterkeg;
 1691         args.align = 32 - 1;
 1692         args.flags = UMA_ZFLAG_INTERNAL;
 1693         /* The initial zone has no Per cpu queues so it's smaller */
 1694         zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
 1695 
 1696 #ifdef UMA_DEBUG
 1697         printf("Filling boot free list.\n");
 1698 #endif
 1699         for (i = 0; i < boot_pages; i++) {
 1700                 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
 1701                 slab->us_data = (u_int8_t *)slab;
 1702                 slab->us_flags = UMA_SLAB_BOOT;
 1703                 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
 1704         }
 1705         mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
 1706 
 1707 #ifdef UMA_DEBUG
 1708         printf("Creating uma zone headers zone and keg.\n");
 1709 #endif
 1710         args.name = "UMA Zones";
 1711         args.size = sizeof(struct uma_zone) +
 1712             (sizeof(struct uma_cache) * (mp_maxid + 1));
 1713         args.ctor = zone_ctor;
 1714         args.dtor = zone_dtor;
 1715         args.uminit = zero_init;
 1716         args.fini = NULL;
 1717         args.keg = NULL;
 1718         args.align = 32 - 1;
 1719         args.flags = UMA_ZFLAG_INTERNAL;
 1720         /* The initial zone has no Per cpu queues so it's smaller */
 1721         zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
 1722 
 1723 #ifdef UMA_DEBUG
 1724         printf("Initializing pcpu cache locks.\n");
 1725 #endif
 1726 #ifdef UMA_DEBUG
 1727         printf("Creating slab and hash zones.\n");
 1728 #endif
 1729 
 1730         /*
 1731          * This is the max number of free list items we'll have with
 1732          * offpage slabs.
 1733          */
 1734         slabsize = uma_max_ipers * UMA_FRITM_SZ;
 1735         slabsize += sizeof(struct uma_slab);
 1736 
 1737         /* Now make a zone for slab headers */
 1738         slabzone = uma_zcreate("UMA Slabs",
 1739                                 slabsize,
 1740                                 NULL, NULL, NULL, NULL,
 1741                                 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
 1742 
 1743         /*
 1744          * We also create a zone for the bigger slabs with reference
 1745          * counts in them, to accomodate UMA_ZONE_REFCNT zones.
 1746          */
 1747         slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
 1748         slabsize += sizeof(struct uma_slab_refcnt);
 1749         slabrefzone = uma_zcreate("UMA RCntSlabs",
 1750                                   slabsize,
 1751                                   NULL, NULL, NULL, NULL,
 1752                                   UMA_ALIGN_PTR,
 1753                                   UMA_ZFLAG_INTERNAL);
 1754 
 1755         hashzone = uma_zcreate("UMA Hash",
 1756             sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
 1757             NULL, NULL, NULL, NULL,
 1758             UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
 1759 
 1760         bucket_init();
 1761 
 1762         booted = UMA_STARTUP;
 1763 
 1764 #ifdef UMA_DEBUG
 1765         printf("UMA startup complete.\n");
 1766 #endif
 1767 }
 1768 
 1769 /* see uma.h */
 1770 void
 1771 uma_startup2(void)
 1772 {
 1773         booted = UMA_STARTUP2;
 1774         bucket_enable();
 1775 #ifdef UMA_DEBUG
 1776         printf("UMA startup2 complete.\n");
 1777 #endif
 1778 }
 1779 
 1780 /*
 1781  * Initialize our callout handle
 1782  *
 1783  */
 1784 
 1785 static void
 1786 uma_startup3(void)
 1787 {
 1788 #ifdef UMA_DEBUG
 1789         printf("Starting callout.\n");
 1790 #endif
 1791         callout_init(&uma_callout, CALLOUT_MPSAFE);
 1792         callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
 1793 #ifdef UMA_DEBUG
 1794         printf("UMA startup3 complete.\n");
 1795 #endif
 1796 }
 1797 
 1798 static uma_keg_t
 1799 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
 1800                 int align, u_int32_t flags)
 1801 {
 1802         struct uma_kctor_args args;
 1803 
 1804         args.size = size;
 1805         args.uminit = uminit;
 1806         args.fini = fini;
 1807         args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
 1808         args.flags = flags;
 1809         args.zone = zone;
 1810         return (zone_alloc_item(kegs, &args, M_WAITOK));
 1811 }
 1812 
 1813 /* See uma.h */
 1814 void
 1815 uma_set_align(int align)
 1816 {
 1817 
 1818         if (align != UMA_ALIGN_CACHE)
 1819                 uma_align_cache = align;
 1820 }
 1821 
 1822 /* See uma.h */
 1823 uma_zone_t
 1824 uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
 1825                 uma_init uminit, uma_fini fini, int align, u_int32_t flags)
 1826 
 1827 {
 1828         struct uma_zctor_args args;
 1829 
 1830         /* This stuff is essential for the zone ctor */
 1831         args.name = name;
 1832         args.size = size;
 1833         args.ctor = ctor;
 1834         args.dtor = dtor;
 1835         args.uminit = uminit;
 1836         args.fini = fini;
 1837         args.align = align;
 1838         args.flags = flags;
 1839         args.keg = NULL;
 1840 
 1841         return (zone_alloc_item(zones, &args, M_WAITOK));
 1842 }
 1843 
 1844 /* See uma.h */
 1845 uma_zone_t
 1846 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
 1847                     uma_init zinit, uma_fini zfini, uma_zone_t master)
 1848 {
 1849         struct uma_zctor_args args;
 1850         uma_keg_t keg;
 1851 
 1852         keg = zone_first_keg(master);
 1853         args.name = name;
 1854         args.size = keg->uk_size;
 1855         args.ctor = ctor;
 1856         args.dtor = dtor;
 1857         args.uminit = zinit;
 1858         args.fini = zfini;
 1859         args.align = keg->uk_align;
 1860         args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
 1861         args.keg = keg;
 1862 
 1863         /* XXX Attaches only one keg of potentially many. */
 1864         return (zone_alloc_item(zones, &args, M_WAITOK));
 1865 }
 1866 
 1867 static void
 1868 zone_lock_pair(uma_zone_t a, uma_zone_t b)
 1869 {
 1870         if (a < b) {
 1871                 ZONE_LOCK(a);
 1872                 mtx_lock_flags(b->uz_lock, MTX_DUPOK);
 1873         } else {
 1874                 ZONE_LOCK(b);
 1875                 mtx_lock_flags(a->uz_lock, MTX_DUPOK);
 1876         }
 1877 }
 1878 
 1879 static void
 1880 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
 1881 {
 1882 
 1883         ZONE_UNLOCK(a);
 1884         ZONE_UNLOCK(b);
 1885 }
 1886 
 1887 int
 1888 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
 1889 {
 1890         uma_klink_t klink;
 1891         uma_klink_t kl;
 1892         int error;
 1893 
 1894         error = 0;
 1895         klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
 1896 
 1897         zone_lock_pair(zone, master);
 1898         /*
 1899          * zone must use vtoslab() to resolve objects and must already be
 1900          * a secondary.
 1901          */
 1902         if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
 1903             != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
 1904                 error = EINVAL;
 1905                 goto out;
 1906         }
 1907         /*
 1908          * The new master must also use vtoslab().
 1909          */
 1910         if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
 1911                 error = EINVAL;
 1912                 goto out;
 1913         }
 1914         /*
 1915          * Both must either be refcnt, or not be refcnt.
 1916          */
 1917         if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
 1918             (master->uz_flags & UMA_ZONE_REFCNT)) {
 1919                 error = EINVAL;
 1920                 goto out;
 1921         }
 1922         /*
 1923          * The underlying object must be the same size.  rsize
 1924          * may be different.
 1925          */
 1926         if (master->uz_size != zone->uz_size) {
 1927                 error = E2BIG;
 1928                 goto out;
 1929         }
 1930         /*
 1931          * Put it at the end of the list.
 1932          */
 1933         klink->kl_keg = zone_first_keg(master);
 1934         LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
 1935                 if (LIST_NEXT(kl, kl_link) == NULL) {
 1936                         LIST_INSERT_AFTER(kl, klink, kl_link);
 1937                         break;
 1938                 }
 1939         }
 1940         klink = NULL;
 1941         zone->uz_flags |= UMA_ZFLAG_MULTI;
 1942         zone->uz_slab = zone_fetch_slab_multi;
 1943 
 1944 out:
 1945         zone_unlock_pair(zone, master);
 1946         if (klink != NULL)
 1947                 free(klink, M_TEMP);
 1948 
 1949         return (error);
 1950 }
 1951 
 1952 
 1953 /* See uma.h */
 1954 void
 1955 uma_zdestroy(uma_zone_t zone)
 1956 {
 1957 
 1958         zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
 1959 }
 1960 
 1961 /* See uma.h */
 1962 void *
 1963 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
 1964 {
 1965         void *item;
 1966         uma_cache_t cache;
 1967         uma_bucket_t bucket;
 1968         int cpu;
 1969 
 1970         /* This is the fast path allocation */
 1971 #ifdef UMA_DEBUG_ALLOC_1
 1972         printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
 1973 #endif
 1974         CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
 1975             zone->uz_name, flags);
 1976 
 1977         if (flags & M_WAITOK) {
 1978                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
 1979                     "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
 1980         }
 1981 
 1982         /*
 1983          * If possible, allocate from the per-CPU cache.  There are two
 1984          * requirements for safe access to the per-CPU cache: (1) the thread
 1985          * accessing the cache must not be preempted or yield during access,
 1986          * and (2) the thread must not migrate CPUs without switching which
 1987          * cache it accesses.  We rely on a critical section to prevent
 1988          * preemption and migration.  We release the critical section in
 1989          * order to acquire the zone mutex if we are unable to allocate from
 1990          * the current cache; when we re-acquire the critical section, we
 1991          * must detect and handle migration if it has occurred.
 1992          */
 1993 zalloc_restart:
 1994         critical_enter();
 1995         cpu = curcpu;
 1996         cache = &zone->uz_cpu[cpu];
 1997 
 1998 zalloc_start:
 1999         bucket = cache->uc_allocbucket;
 2000 
 2001         if (bucket) {
 2002                 if (bucket->ub_cnt > 0) {
 2003                         bucket->ub_cnt--;
 2004                         item = bucket->ub_bucket[bucket->ub_cnt];
 2005 #ifdef INVARIANTS
 2006                         bucket->ub_bucket[bucket->ub_cnt] = NULL;
 2007 #endif
 2008                         KASSERT(item != NULL,
 2009                             ("uma_zalloc: Bucket pointer mangled."));
 2010                         cache->uc_allocs++;
 2011                         critical_exit();
 2012 #ifdef INVARIANTS
 2013                         ZONE_LOCK(zone);
 2014                         uma_dbg_alloc(zone, NULL, item);
 2015                         ZONE_UNLOCK(zone);
 2016 #endif
 2017                         if (zone->uz_ctor != NULL) {
 2018                                 if (zone->uz_ctor(item, zone->uz_size,
 2019                                     udata, flags) != 0) {
 2020                                         zone_free_item(zone, item, udata,
 2021                                             SKIP_DTOR, ZFREE_STATFAIL |
 2022                                             ZFREE_STATFREE);
 2023                                         return (NULL);
 2024                                 }
 2025                         }
 2026                         if (flags & M_ZERO)
 2027                                 bzero(item, zone->uz_size);
 2028                         return (item);
 2029                 } else if (cache->uc_freebucket) {
 2030                         /*
 2031                          * We have run out of items in our allocbucket.
 2032                          * See if we can switch with our free bucket.
 2033                          */
 2034                         if (cache->uc_freebucket->ub_cnt > 0) {
 2035 #ifdef UMA_DEBUG_ALLOC
 2036                                 printf("uma_zalloc: Swapping empty with"
 2037                                     " alloc.\n");
 2038 #endif
 2039                                 bucket = cache->uc_freebucket;
 2040                                 cache->uc_freebucket = cache->uc_allocbucket;
 2041                                 cache->uc_allocbucket = bucket;
 2042 
 2043                                 goto zalloc_start;
 2044                         }
 2045                 }
 2046         }
 2047         /*
 2048          * Attempt to retrieve the item from the per-CPU cache has failed, so
 2049          * we must go back to the zone.  This requires the zone lock, so we
 2050          * must drop the critical section, then re-acquire it when we go back
 2051          * to the cache.  Since the critical section is released, we may be
 2052          * preempted or migrate.  As such, make sure not to maintain any
 2053          * thread-local state specific to the cache from prior to releasing
 2054          * the critical section.
 2055          */
 2056         critical_exit();
 2057         ZONE_LOCK(zone);
 2058         critical_enter();
 2059         cpu = curcpu;
 2060         cache = &zone->uz_cpu[cpu];
 2061         bucket = cache->uc_allocbucket;
 2062         if (bucket != NULL) {
 2063                 if (bucket->ub_cnt > 0) {
 2064                         ZONE_UNLOCK(zone);
 2065                         goto zalloc_start;
 2066                 }
 2067                 bucket = cache->uc_freebucket;
 2068                 if (bucket != NULL && bucket->ub_cnt > 0) {
 2069                         ZONE_UNLOCK(zone);
 2070                         goto zalloc_start;
 2071                 }
 2072         }
 2073 
 2074         /* Since we have locked the zone we may as well send back our stats */
 2075         zone->uz_allocs += cache->uc_allocs;
 2076         cache->uc_allocs = 0;
 2077         zone->uz_frees += cache->uc_frees;
 2078         cache->uc_frees = 0;
 2079 
 2080         /* Our old one is now a free bucket */
 2081         if (cache->uc_allocbucket) {
 2082                 KASSERT(cache->uc_allocbucket->ub_cnt == 0,
 2083                     ("uma_zalloc_arg: Freeing a non free bucket."));
 2084                 LIST_INSERT_HEAD(&zone->uz_free_bucket,
 2085                     cache->uc_allocbucket, ub_link);
 2086                 cache->uc_allocbucket = NULL;
 2087         }
 2088 
 2089         /* Check the free list for a new alloc bucket */
 2090         if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
 2091                 KASSERT(bucket->ub_cnt != 0,
 2092                     ("uma_zalloc_arg: Returning an empty bucket."));
 2093 
 2094                 LIST_REMOVE(bucket, ub_link);
 2095                 cache->uc_allocbucket = bucket;
 2096                 ZONE_UNLOCK(zone);
 2097                 goto zalloc_start;
 2098         }
 2099         /* We are no longer associated with this CPU. */
 2100         critical_exit();
 2101 
 2102         /* Bump up our uz_count so we get here less */
 2103         if (zone->uz_count < BUCKET_MAX)
 2104                 zone->uz_count++;
 2105 
 2106         /*
 2107          * Now lets just fill a bucket and put it on the free list.  If that
 2108          * works we'll restart the allocation from the begining.
 2109          */
 2110         if (zone_alloc_bucket(zone, flags)) {
 2111                 ZONE_UNLOCK(zone);
 2112                 goto zalloc_restart;
 2113         }
 2114         ZONE_UNLOCK(zone);
 2115         /*
 2116          * We may not be able to get a bucket so return an actual item.
 2117          */
 2118 #ifdef UMA_DEBUG
 2119         printf("uma_zalloc_arg: Bucketzone returned NULL\n");
 2120 #endif
 2121 
 2122         item = zone_alloc_item(zone, udata, flags);
 2123         return (item);
 2124 }
 2125 
 2126 static uma_slab_t
 2127 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
 2128 {
 2129         uma_slab_t slab;
 2130 
 2131         mtx_assert(&keg->uk_lock, MA_OWNED);
 2132         slab = NULL;
 2133 
 2134         for (;;) {
 2135                 /*
 2136                  * Find a slab with some space.  Prefer slabs that are partially
 2137                  * used over those that are totally full.  This helps to reduce
 2138                  * fragmentation.
 2139                  */
 2140                 if (keg->uk_free != 0) {
 2141                         if (!LIST_EMPTY(&keg->uk_part_slab)) {
 2142                                 slab = LIST_FIRST(&keg->uk_part_slab);
 2143                         } else {
 2144                                 slab = LIST_FIRST(&keg->uk_free_slab);
 2145                                 LIST_REMOVE(slab, us_link);
 2146                                 LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
 2147                                     us_link);
 2148                         }
 2149                         MPASS(slab->us_keg == keg);
 2150                         return (slab);
 2151                 }
 2152 
 2153                 /*
 2154                  * M_NOVM means don't ask at all!
 2155                  */
 2156                 if (flags & M_NOVM)
 2157                         break;
 2158 
 2159                 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
 2160                         keg->uk_flags |= UMA_ZFLAG_FULL;
 2161                         /*
 2162                          * If this is not a multi-zone, set the FULL bit.
 2163                          * Otherwise slab_multi() takes care of it.
 2164                          */
 2165                         if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0)
 2166                                 zone->uz_flags |= UMA_ZFLAG_FULL;
 2167                         if (flags & M_NOWAIT)
 2168                                 break;
 2169                         msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
 2170                         continue;
 2171                 }
 2172                 keg->uk_recurse++;
 2173                 slab = keg_alloc_slab(keg, zone, flags);
 2174                 keg->uk_recurse--;
 2175                 /*
 2176                  * If we got a slab here it's safe to mark it partially used
 2177                  * and return.  We assume that the caller is going to remove
 2178                  * at least one item.
 2179                  */
 2180                 if (slab) {
 2181                         MPASS(slab->us_keg == keg);
 2182                         LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
 2183                         return (slab);
 2184                 }
 2185                 /*
 2186                  * We might not have been able to get a slab but another cpu
 2187                  * could have while we were unlocked.  Check again before we
 2188                  * fail.
 2189                  */
 2190                 flags |= M_NOVM;
 2191         }
 2192         return (slab);
 2193 }
 2194 
 2195 static inline void
 2196 zone_relock(uma_zone_t zone, uma_keg_t keg)
 2197 {
 2198         if (zone->uz_lock != &keg->uk_lock) {
 2199                 KEG_UNLOCK(keg);
 2200                 ZONE_LOCK(zone);
 2201         }
 2202 }
 2203 
 2204 static inline void
 2205 keg_relock(uma_keg_t keg, uma_zone_t zone)
 2206 {
 2207         if (zone->uz_lock != &keg->uk_lock) {
 2208                 ZONE_UNLOCK(zone);
 2209                 KEG_LOCK(keg);
 2210         }
 2211 }
 2212 
 2213 static uma_slab_t
 2214 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
 2215 {
 2216         uma_slab_t slab;
 2217 
 2218         if (keg == NULL)
 2219                 keg = zone_first_keg(zone);
 2220         /*
 2221          * This is to prevent us from recursively trying to allocate
 2222          * buckets.  The problem is that if an allocation forces us to
 2223          * grab a new bucket we will call page_alloc, which will go off
 2224          * and cause the vm to allocate vm_map_entries.  If we need new
 2225          * buckets there too we will recurse in kmem_alloc and bad
 2226          * things happen.  So instead we return a NULL bucket, and make
 2227          * the code that allocates buckets smart enough to deal with it
 2228          */
 2229         if (keg->uk_flags & UMA_ZFLAG_BUCKET && keg->uk_recurse != 0)
 2230                 return (NULL);
 2231 
 2232         for (;;) {
 2233                 slab = keg_fetch_slab(keg, zone, flags);
 2234                 if (slab)
 2235                         return (slab);
 2236                 if (flags & (M_NOWAIT | M_NOVM))
 2237                         break;
 2238         }
 2239         return (NULL);
 2240 }
 2241 
 2242 /*
 2243  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
 2244  * with the keg locked.  Caller must call zone_relock() afterwards if the
 2245  * zone lock is required.  On NULL the zone lock is held.
 2246  *
 2247  * The last pointer is used to seed the search.  It is not required.
 2248  */
 2249 static uma_slab_t
 2250 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
 2251 {
 2252         uma_klink_t klink;
 2253         uma_slab_t slab;
 2254         uma_keg_t keg;
 2255         int flags;
 2256         int empty;
 2257         int full;
 2258 
 2259         /*
 2260          * Don't wait on the first pass.  This will skip limit tests
 2261          * as well.  We don't want to block if we can find a provider
 2262          * without blocking.
 2263          */
 2264         flags = (rflags & ~M_WAITOK) | M_NOWAIT;
 2265         /*
 2266          * Use the last slab allocated as a hint for where to start
 2267          * the search.
 2268          */
 2269         if (last) {
 2270                 slab = keg_fetch_slab(last, zone, flags);
 2271                 if (slab)
 2272                         return (slab);
 2273                 zone_relock(zone, last);
 2274                 last = NULL;
 2275         }
 2276         /*
 2277          * Loop until we have a slab incase of transient failures
 2278          * while M_WAITOK is specified.  I'm not sure this is 100%
 2279          * required but we've done it for so long now.
 2280          */
 2281         for (;;) {
 2282                 empty = 0;
 2283                 full = 0;
 2284                 /*
 2285                  * Search the available kegs for slabs.  Be careful to hold the
 2286                  * correct lock while calling into the keg layer.
 2287                  */
 2288                 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
 2289                         keg = klink->kl_keg;
 2290                         keg_relock(keg, zone);
 2291                         if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
 2292                                 slab = keg_fetch_slab(keg, zone, flags);
 2293                                 if (slab)
 2294                                         return (slab);
 2295                         }
 2296                         if (keg->uk_flags & UMA_ZFLAG_FULL)
 2297                                 full++;
 2298                         else
 2299                                 empty++;
 2300                         zone_relock(zone, keg);
 2301                 }
 2302                 if (rflags & (M_NOWAIT | M_NOVM))
 2303                         break;
 2304                 flags = rflags;
 2305                 /*
 2306                  * All kegs are full.  XXX We can't atomically check all kegs
 2307                  * and sleep so just sleep for a short period and retry.
 2308                  */
 2309                 if (full && !empty) {
 2310                         zone->uz_flags |= UMA_ZFLAG_FULL;
 2311                         zone->uz_sleeps++;
 2312                         msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
 2313                         zone->uz_flags &= ~UMA_ZFLAG_FULL;
 2314                         continue;
 2315                 }
 2316         }
 2317         return (NULL);
 2318 }
 2319 
 2320 static void *
 2321 slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
 2322 {
 2323         uma_keg_t keg;
 2324         uma_slabrefcnt_t slabref;
 2325         void *item;
 2326         u_int8_t freei;
 2327 
 2328         keg = slab->us_keg;
 2329         mtx_assert(&keg->uk_lock, MA_OWNED);
 2330 
 2331         freei = slab->us_firstfree;
 2332         if (keg->uk_flags & UMA_ZONE_REFCNT) {
 2333                 slabref = (uma_slabrefcnt_t)slab;
 2334                 slab->us_firstfree = slabref->us_freelist[freei].us_item;
 2335         } else {
 2336                 slab->us_firstfree = slab->us_freelist[freei].us_item;
 2337         }
 2338         item = slab->us_data + (keg->uk_rsize * freei);
 2339 
 2340         slab->us_freecount--;
 2341         keg->uk_free--;
 2342 #ifdef INVARIANTS
 2343         uma_dbg_alloc(zone, slab, item);
 2344 #endif
 2345         /* Move this slab to the full list */
 2346         if (slab->us_freecount == 0) {
 2347                 LIST_REMOVE(slab, us_link);
 2348                 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
 2349         }
 2350 
 2351         return (item);
 2352 }
 2353 
 2354 static int
 2355 zone_alloc_bucket(uma_zone_t zone, int flags)
 2356 {
 2357         uma_bucket_t bucket;
 2358         uma_slab_t slab;
 2359         uma_keg_t keg;
 2360         int16_t saved;
 2361         int max, origflags = flags;
 2362 
 2363         /*
 2364          * Try this zone's free list first so we don't allocate extra buckets.
 2365          */
 2366         if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
 2367                 KASSERT(bucket->ub_cnt == 0,
 2368                     ("zone_alloc_bucket: Bucket on free list is not empty."));
 2369                 LIST_REMOVE(bucket, ub_link);
 2370         } else {
 2371                 int bflags;
 2372 
 2373                 bflags = (flags & ~M_ZERO);
 2374                 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
 2375                         bflags |= M_NOVM;
 2376 
 2377                 ZONE_UNLOCK(zone);
 2378                 bucket = bucket_alloc(zone->uz_count, bflags);
 2379                 ZONE_LOCK(zone);
 2380         }
 2381 
 2382         if (bucket == NULL) {
 2383                 return (0);
 2384         }
 2385 
 2386 #ifdef SMP
 2387         /*
 2388          * This code is here to limit the number of simultaneous bucket fills
 2389          * for any given zone to the number of per cpu caches in this zone. This
 2390          * is done so that we don't allocate more memory than we really need.
 2391          */
 2392         if (zone->uz_fills >= mp_ncpus)
 2393                 goto done;
 2394 
 2395 #endif
 2396         zone->uz_fills++;
 2397 
 2398         max = MIN(bucket->ub_entries, zone->uz_count);
 2399         /* Try to keep the buckets totally full */
 2400         saved = bucket->ub_cnt;
 2401         slab = NULL;
 2402         keg = NULL;
 2403         while (bucket->ub_cnt < max &&
 2404             (slab = zone->uz_slab(zone, keg, flags)) != NULL) {
 2405                 keg = slab->us_keg;
 2406                 while (slab->us_freecount && bucket->ub_cnt < max) {
 2407                         bucket->ub_bucket[bucket->ub_cnt++] =
 2408                             slab_alloc_item(zone, slab);
 2409                 }
 2410 
 2411                 /* Don't block on the next fill */
 2412                 flags |= M_NOWAIT;
 2413         }
 2414         if (slab)
 2415                 zone_relock(zone, keg);
 2416 
 2417         /*
 2418          * We unlock here because we need to call the zone's init.
 2419          * It should be safe to unlock because the slab dealt with
 2420          * above is already on the appropriate list within the keg
 2421          * and the bucket we filled is not yet on any list, so we
 2422          * own it.
 2423          */
 2424         if (zone->uz_init != NULL) {
 2425                 int i;
 2426 
 2427                 ZONE_UNLOCK(zone);
 2428                 for (i = saved; i < bucket->ub_cnt; i++)
 2429                         if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
 2430                             origflags) != 0)
 2431                                 break;
 2432                 /*
 2433                  * If we couldn't initialize the whole bucket, put the
 2434                  * rest back onto the freelist.
 2435                  */
 2436                 if (i != bucket->ub_cnt) {
 2437                         int j;
 2438 
 2439                         for (j = i; j < bucket->ub_cnt; j++) {
 2440                                 zone_free_item(zone, bucket->ub_bucket[j],
 2441                                     NULL, SKIP_FINI, 0);
 2442 #ifdef INVARIANTS
 2443                                 bucket->ub_bucket[j] = NULL;
 2444 #endif
 2445                         }
 2446                         bucket->ub_cnt = i;
 2447                 }
 2448                 ZONE_LOCK(zone);
 2449         }
 2450 
 2451         zone->uz_fills--;
 2452         if (bucket->ub_cnt != 0) {
 2453                 LIST_INSERT_HEAD(&zone->uz_full_bucket,
 2454                     bucket, ub_link);
 2455                 return (1);
 2456         }
 2457 #ifdef SMP
 2458 done:
 2459 #endif
 2460         bucket_free(bucket);
 2461 
 2462         return (0);
 2463 }
 2464 /*
 2465  * Allocates an item for an internal zone
 2466  *
 2467  * Arguments
 2468  *      zone   The zone to alloc for.
 2469  *      udata  The data to be passed to the constructor.
 2470  *      flags  M_WAITOK, M_NOWAIT, M_ZERO.
 2471  *
 2472  * Returns
 2473  *      NULL if there is no memory and M_NOWAIT is set
 2474  *      An item if successful
 2475  */
 2476 
 2477 static void *
 2478 zone_alloc_item(uma_zone_t zone, void *udata, int flags)
 2479 {
 2480         uma_slab_t slab;
 2481         void *item;
 2482 
 2483         item = NULL;
 2484 
 2485 #ifdef UMA_DEBUG_ALLOC
 2486         printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
 2487 #endif
 2488         ZONE_LOCK(zone);
 2489 
 2490         slab = zone->uz_slab(zone, NULL, flags);
 2491         if (slab == NULL) {
 2492                 zone->uz_fails++;
 2493                 ZONE_UNLOCK(zone);
 2494                 return (NULL);
 2495         }
 2496 
 2497         item = slab_alloc_item(zone, slab);
 2498 
 2499         zone_relock(zone, slab->us_keg);
 2500         zone->uz_allocs++;
 2501         ZONE_UNLOCK(zone);
 2502 
 2503         /*
 2504          * We have to call both the zone's init (not the keg's init)
 2505          * and the zone's ctor.  This is because the item is going from
 2506          * a keg slab directly to the user, and the user is expecting it
 2507          * to be both zone-init'd as well as zone-ctor'd.
 2508          */
 2509         if (zone->uz_init != NULL) {
 2510                 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
 2511                         zone_free_item(zone, item, udata, SKIP_FINI,
 2512                             ZFREE_STATFAIL | ZFREE_STATFREE);
 2513                         return (NULL);
 2514                 }
 2515         }
 2516         if (zone->uz_ctor != NULL) {
 2517                 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
 2518                         zone_free_item(zone, item, udata, SKIP_DTOR,
 2519                             ZFREE_STATFAIL | ZFREE_STATFREE);
 2520                         return (NULL);
 2521                 }
 2522         }
 2523         if (flags & M_ZERO)
 2524                 bzero(item, zone->uz_size);
 2525 
 2526         return (item);
 2527 }
 2528 
 2529 /* See uma.h */
 2530 void
 2531 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
 2532 {
 2533         uma_cache_t cache;
 2534         uma_bucket_t bucket;
 2535         int bflags;
 2536         int cpu;
 2537 
 2538 #ifdef UMA_DEBUG_ALLOC_1
 2539         printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
 2540 #endif
 2541         CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
 2542             zone->uz_name);
 2543 
 2544         /* uma_zfree(..., NULL) does nothing, to match free(9). */
 2545         if (item == NULL)
 2546                 return;
 2547 
 2548         if (zone->uz_dtor)
 2549                 zone->uz_dtor(item, zone->uz_size, udata);
 2550 
 2551 #ifdef INVARIANTS
 2552         ZONE_LOCK(zone);
 2553         if (zone->uz_flags & UMA_ZONE_MALLOC)
 2554                 uma_dbg_free(zone, udata, item);
 2555         else
 2556                 uma_dbg_free(zone, NULL, item);
 2557         ZONE_UNLOCK(zone);
 2558 #endif
 2559         /*
 2560          * The race here is acceptable.  If we miss it we'll just have to wait
 2561          * a little longer for the limits to be reset.
 2562          */
 2563         if (zone->uz_flags & UMA_ZFLAG_FULL)
 2564                 goto zfree_internal;
 2565 
 2566         /*
 2567          * If possible, free to the per-CPU cache.  There are two
 2568          * requirements for safe access to the per-CPU cache: (1) the thread
 2569          * accessing the cache must not be preempted or yield during access,
 2570          * and (2) the thread must not migrate CPUs without switching which
 2571          * cache it accesses.  We rely on a critical section to prevent
 2572          * preemption and migration.  We release the critical section in
 2573          * order to acquire the zone mutex if we are unable to free to the
 2574          * current cache; when we re-acquire the critical section, we must
 2575          * detect and handle migration if it has occurred.
 2576          */
 2577 zfree_restart:
 2578         critical_enter();
 2579         cpu = curcpu;
 2580         cache = &zone->uz_cpu[cpu];
 2581 
 2582 zfree_start:
 2583         bucket = cache->uc_freebucket;
 2584 
 2585         if (bucket) {
 2586                 /*
 2587                  * Do we have room in our bucket? It is OK for this uz count
 2588                  * check to be slightly out of sync.
 2589                  */
 2590 
 2591                 if (bucket->ub_cnt < bucket->ub_entries) {
 2592                         KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
 2593                             ("uma_zfree: Freeing to non free bucket index."));
 2594                         bucket->ub_bucket[bucket->ub_cnt] = item;
 2595                         bucket->ub_cnt++;
 2596                         cache->uc_frees++;
 2597                         critical_exit();
 2598                         return;
 2599                 } else if (cache->uc_allocbucket) {
 2600 #ifdef UMA_DEBUG_ALLOC
 2601                         printf("uma_zfree: Swapping buckets.\n");
 2602 #endif
 2603                         /*
 2604                          * We have run out of space in our freebucket.
 2605                          * See if we can switch with our alloc bucket.
 2606                          */
 2607                         if (cache->uc_allocbucket->ub_cnt <
 2608                             cache->uc_freebucket->ub_cnt) {
 2609                                 bucket = cache->uc_freebucket;
 2610                                 cache->uc_freebucket = cache->uc_allocbucket;
 2611                                 cache->uc_allocbucket = bucket;
 2612                                 goto zfree_start;
 2613                         }
 2614                 }
 2615         }
 2616         /*
 2617          * We can get here for two reasons:
 2618          *
 2619          * 1) The buckets are NULL
 2620          * 2) The alloc and free buckets are both somewhat full.
 2621          *
 2622          * We must go back the zone, which requires acquiring the zone lock,
 2623          * which in turn means we must release and re-acquire the critical
 2624          * section.  Since the critical section is released, we may be
 2625          * preempted or migrate.  As such, make sure not to maintain any
 2626          * thread-local state specific to the cache from prior to releasing
 2627          * the critical section.
 2628          */
 2629         critical_exit();
 2630         ZONE_LOCK(zone);
 2631         critical_enter();
 2632         cpu = curcpu;
 2633         cache = &zone->uz_cpu[cpu];
 2634         if (cache->uc_freebucket != NULL) {
 2635                 if (cache->uc_freebucket->ub_cnt <
 2636                     cache->uc_freebucket->ub_entries) {
 2637                         ZONE_UNLOCK(zone);
 2638                         goto zfree_start;
 2639                 }
 2640                 if (cache->uc_allocbucket != NULL &&
 2641                     (cache->uc_allocbucket->ub_cnt <
 2642                     cache->uc_freebucket->ub_cnt)) {
 2643                         ZONE_UNLOCK(zone);
 2644                         goto zfree_start;
 2645                 }
 2646         }
 2647 
 2648         /* Since we have locked the zone we may as well send back our stats */
 2649         zone->uz_allocs += cache->uc_allocs;
 2650         cache->uc_allocs = 0;
 2651         zone->uz_frees += cache->uc_frees;
 2652         cache->uc_frees = 0;
 2653 
 2654         bucket = cache->uc_freebucket;
 2655         cache->uc_freebucket = NULL;
 2656 
 2657         /* Can we throw this on the zone full list? */
 2658         if (bucket != NULL) {
 2659 #ifdef UMA_DEBUG_ALLOC
 2660                 printf("uma_zfree: Putting old bucket on the free list.\n");
 2661 #endif
 2662                 /* ub_cnt is pointing to the last free item */
 2663                 KASSERT(bucket->ub_cnt != 0,
 2664                     ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
 2665                 LIST_INSERT_HEAD(&zone->uz_full_bucket,
 2666                     bucket, ub_link);
 2667         }
 2668         if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
 2669                 LIST_REMOVE(bucket, ub_link);
 2670                 ZONE_UNLOCK(zone);
 2671                 cache->uc_freebucket = bucket;
 2672                 goto zfree_start;
 2673         }
 2674         /* We are no longer associated with this CPU. */
 2675         critical_exit();
 2676 
 2677         /* And the zone.. */
 2678         ZONE_UNLOCK(zone);
 2679 
 2680 #ifdef UMA_DEBUG_ALLOC
 2681         printf("uma_zfree: Allocating new free bucket.\n");
 2682 #endif
 2683         bflags = M_NOWAIT;
 2684 
 2685         if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
 2686                 bflags |= M_NOVM;
 2687         bucket = bucket_alloc(zone->uz_count, bflags);
 2688         if (bucket) {
 2689                 ZONE_LOCK(zone);
 2690                 LIST_INSERT_HEAD(&zone->uz_free_bucket,
 2691                     bucket, ub_link);
 2692                 ZONE_UNLOCK(zone);
 2693                 goto zfree_restart;
 2694         }
 2695 
 2696         /*
 2697          * If nothing else caught this, we'll just do an internal free.
 2698          */
 2699 zfree_internal:
 2700         zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE);
 2701 
 2702         return;
 2703 }
 2704 
 2705 /*
 2706  * Frees an item to an INTERNAL zone or allocates a free bucket
 2707  *
 2708  * Arguments:
 2709  *      zone   The zone to free to
 2710  *      item   The item we're freeing
 2711  *      udata  User supplied data for the dtor
 2712  *      skip   Skip dtors and finis
 2713  */
 2714 static void
 2715 zone_free_item(uma_zone_t zone, void *item, void *udata,
 2716     enum zfreeskip skip, int flags)
 2717 {
 2718         uma_slab_t slab;
 2719         uma_slabrefcnt_t slabref;
 2720         uma_keg_t keg;
 2721         u_int8_t *mem;
 2722         u_int8_t freei;
 2723         int clearfull;
 2724 
 2725         if (skip < SKIP_DTOR && zone->uz_dtor)
 2726                 zone->uz_dtor(item, zone->uz_size, udata);
 2727 
 2728         if (skip < SKIP_FINI && zone->uz_fini)
 2729                 zone->uz_fini(item, zone->uz_size);
 2730 
 2731         ZONE_LOCK(zone);
 2732 
 2733         if (flags & ZFREE_STATFAIL)
 2734                 zone->uz_fails++;
 2735         if (flags & ZFREE_STATFREE)
 2736                 zone->uz_frees++;
 2737 
 2738         if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
 2739                 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
 2740                 keg = zone_first_keg(zone); /* Must only be one. */
 2741                 if (zone->uz_flags & UMA_ZONE_HASH) {
 2742                         slab = hash_sfind(&keg->uk_hash, mem);
 2743                 } else {
 2744                         mem += keg->uk_pgoff;
 2745                         slab = (uma_slab_t)mem;
 2746                 }
 2747         } else {
 2748                 /* This prevents redundant lookups via free(). */
 2749                 if ((zone->uz_flags & UMA_ZONE_MALLOC) && udata != NULL)
 2750                         slab = (uma_slab_t)udata;
 2751                 else
 2752                         slab = vtoslab((vm_offset_t)item);
 2753                 keg = slab->us_keg;
 2754                 keg_relock(keg, zone);
 2755         }
 2756         MPASS(keg == slab->us_keg);
 2757 
 2758         /* Do we need to remove from any lists? */
 2759         if (slab->us_freecount+1 == keg->uk_ipers) {
 2760                 LIST_REMOVE(slab, us_link);
 2761                 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
 2762         } else if (slab->us_freecount == 0) {
 2763                 LIST_REMOVE(slab, us_link);
 2764                 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
 2765         }
 2766 
 2767         /* Slab management stuff */
 2768         freei = ((unsigned long)item - (unsigned long)slab->us_data)
 2769                 / keg->uk_rsize;
 2770 
 2771 #ifdef INVARIANTS
 2772         if (!skip)
 2773                 uma_dbg_free(zone, slab, item);
 2774 #endif
 2775 
 2776         if (keg->uk_flags & UMA_ZONE_REFCNT) {
 2777                 slabref = (uma_slabrefcnt_t)slab;
 2778                 slabref->us_freelist[freei].us_item = slab->us_firstfree;
 2779         } else {
 2780                 slab->us_freelist[freei].us_item = slab->us_firstfree;
 2781         }
 2782         slab->us_firstfree = freei;
 2783         slab->us_freecount++;
 2784 
 2785         /* Zone statistics */
 2786         keg->uk_free++;
 2787 
 2788         clearfull = 0;
 2789         if (keg->uk_flags & UMA_ZFLAG_FULL) {
 2790                 if (keg->uk_pages < keg->uk_maxpages) {
 2791                         keg->uk_flags &= ~UMA_ZFLAG_FULL;
 2792                         clearfull = 1;
 2793                 }
 2794 
 2795                 /* 
 2796                  * We can handle one more allocation. Since we're clearing ZFLAG_FULL,
 2797                  * wake up all procs blocked on pages. This should be uncommon, so 
 2798                  * keeping this simple for now (rather than adding count of blocked 
 2799                  * threads etc).
 2800                  */
 2801                 wakeup(keg);
 2802         }
 2803         if (clearfull) {
 2804                 zone_relock(zone, keg);
 2805                 zone->uz_flags &= ~UMA_ZFLAG_FULL;
 2806                 wakeup(zone);
 2807                 ZONE_UNLOCK(zone);
 2808         } else
 2809                 KEG_UNLOCK(keg);
 2810 }
 2811 
 2812 /* See uma.h */
 2813 int
 2814 uma_zone_set_max(uma_zone_t zone, int nitems)
 2815 {
 2816         uma_keg_t keg;
 2817 
 2818         ZONE_LOCK(zone);
 2819         keg = zone_first_keg(zone);
 2820         keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
 2821         if (keg->uk_maxpages * keg->uk_ipers < nitems)
 2822                 keg->uk_maxpages += keg->uk_ppera;
 2823         nitems = keg->uk_maxpages * keg->uk_ipers;
 2824         ZONE_UNLOCK(zone);
 2825 
 2826         return (nitems);
 2827 }
 2828 
 2829 /* See uma.h */
 2830 int
 2831 uma_zone_get_max(uma_zone_t zone)
 2832 {
 2833         int nitems;
 2834         uma_keg_t keg;
 2835 
 2836         ZONE_LOCK(zone);
 2837         keg = zone_first_keg(zone);
 2838         nitems = keg->uk_maxpages * keg->uk_ipers;
 2839         ZONE_UNLOCK(zone);
 2840 
 2841         return (nitems);
 2842 }
 2843 
 2844 /* See uma.h */
 2845 int
 2846 uma_zone_get_cur(uma_zone_t zone)
 2847 {
 2848         int64_t nitems;
 2849         u_int i;
 2850 
 2851         ZONE_LOCK(zone);
 2852         nitems = zone->uz_allocs - zone->uz_frees;
 2853         CPU_FOREACH(i) {
 2854                 /*
 2855                  * See the comment in sysctl_vm_zone_stats() regarding the
 2856                  * safety of accessing the per-cpu caches. With the zone lock
 2857                  * held, it is safe, but can potentially result in stale data.
 2858                  */
 2859                 nitems += zone->uz_cpu[i].uc_allocs -
 2860                     zone->uz_cpu[i].uc_frees;
 2861         }
 2862         ZONE_UNLOCK(zone);
 2863 
 2864         return (nitems < 0 ? 0 : nitems);
 2865 }
 2866 
 2867 /* See uma.h */
 2868 void
 2869 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
 2870 {
 2871         uma_keg_t keg;
 2872 
 2873         ZONE_LOCK(zone);
 2874         keg = zone_first_keg(zone);
 2875         KASSERT(keg->uk_pages == 0,
 2876             ("uma_zone_set_init on non-empty keg"));
 2877         keg->uk_init = uminit;
 2878         ZONE_UNLOCK(zone);
 2879 }
 2880 
 2881 /* See uma.h */
 2882 void
 2883 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
 2884 {
 2885         uma_keg_t keg;
 2886 
 2887         ZONE_LOCK(zone);
 2888         keg = zone_first_keg(zone);
 2889         KASSERT(keg->uk_pages == 0,
 2890             ("uma_zone_set_fini on non-empty keg"));
 2891         keg->uk_fini = fini;
 2892         ZONE_UNLOCK(zone);
 2893 }
 2894 
 2895 /* See uma.h */
 2896 void
 2897 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
 2898 {
 2899         ZONE_LOCK(zone);
 2900         KASSERT(zone_first_keg(zone)->uk_pages == 0,
 2901             ("uma_zone_set_zinit on non-empty keg"));
 2902         zone->uz_init = zinit;
 2903         ZONE_UNLOCK(zone);
 2904 }
 2905 
 2906 /* See uma.h */
 2907 void
 2908 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
 2909 {
 2910         ZONE_LOCK(zone);
 2911         KASSERT(zone_first_keg(zone)->uk_pages == 0,
 2912             ("uma_zone_set_zfini on non-empty keg"));
 2913         zone->uz_fini = zfini;
 2914         ZONE_UNLOCK(zone);
 2915 }
 2916 
 2917 /* See uma.h */
 2918 /* XXX uk_freef is not actually used with the zone locked */
 2919 void
 2920 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
 2921 {
 2922 
 2923         ZONE_LOCK(zone);
 2924         zone_first_keg(zone)->uk_freef = freef;
 2925         ZONE_UNLOCK(zone);
 2926 }
 2927 
 2928 /* See uma.h */
 2929 /* XXX uk_allocf is not actually used with the zone locked */
 2930 void
 2931 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
 2932 {
 2933         uma_keg_t keg;
 2934 
 2935         ZONE_LOCK(zone);
 2936         keg = zone_first_keg(zone);
 2937         keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
 2938         keg->uk_allocf = allocf;
 2939         ZONE_UNLOCK(zone);
 2940 }
 2941 
 2942 /* See uma.h */
 2943 int
 2944 uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
 2945 {
 2946         uma_keg_t keg;
 2947         vm_offset_t kva;
 2948         int pages;
 2949 
 2950         keg = zone_first_keg(zone);
 2951         pages = count / keg->uk_ipers;
 2952 
 2953         if (pages * keg->uk_ipers < count)
 2954                 pages++;
 2955 
 2956         kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
 2957 
 2958         if (kva == 0)
 2959                 return (0);
 2960         if (obj == NULL)
 2961                 obj = vm_object_allocate(OBJT_PHYS, pages);
 2962         else {
 2963                 VM_OBJECT_LOCK_INIT(obj, "uma object");
 2964                 _vm_object_allocate(OBJT_PHYS, pages, obj);
 2965         }
 2966         ZONE_LOCK(zone);
 2967         keg->uk_kva = kva;
 2968         keg->uk_obj = obj;
 2969         keg->uk_maxpages = pages;
 2970         keg->uk_allocf = obj_alloc;
 2971         keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
 2972         ZONE_UNLOCK(zone);
 2973         return (1);
 2974 }
 2975 
 2976 /* See uma.h */
 2977 void
 2978 uma_prealloc(uma_zone_t zone, int items)
 2979 {
 2980         int slabs;
 2981         uma_slab_t slab;
 2982         uma_keg_t keg;
 2983 
 2984         keg = zone_first_keg(zone);
 2985         ZONE_LOCK(zone);
 2986         slabs = items / keg->uk_ipers;
 2987         if (slabs * keg->uk_ipers < items)
 2988                 slabs++;
 2989         while (slabs > 0) {
 2990                 slab = keg_alloc_slab(keg, zone, M_WAITOK);
 2991                 if (slab == NULL)
 2992                         break;
 2993                 MPASS(slab->us_keg == keg);
 2994                 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
 2995                 slabs--;
 2996         }
 2997         ZONE_UNLOCK(zone);
 2998 }
 2999 
 3000 /* See uma.h */
 3001 u_int32_t *
 3002 uma_find_refcnt(uma_zone_t zone, void *item)
 3003 {
 3004         uma_slabrefcnt_t slabref;
 3005         uma_keg_t keg;
 3006         u_int32_t *refcnt;
 3007         int idx;
 3008 
 3009         slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
 3010             (~UMA_SLAB_MASK));
 3011         keg = slabref->us_keg;
 3012         KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT,
 3013             ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
 3014         idx = ((unsigned long)item - (unsigned long)slabref->us_data)
 3015             / keg->uk_rsize;
 3016         refcnt = &slabref->us_freelist[idx].us_refcnt;
 3017         return refcnt;
 3018 }
 3019 
 3020 /* See uma.h */
 3021 void
 3022 uma_reclaim(void)
 3023 {
 3024 #ifdef UMA_DEBUG
 3025         printf("UMA: vm asked us to release pages!\n");
 3026 #endif
 3027         bucket_enable();
 3028         zone_foreach(zone_drain);
 3029         /*
 3030          * Some slabs may have been freed but this zone will be visited early
 3031          * we visit again so that we can free pages that are empty once other
 3032          * zones are drained.  We have to do the same for buckets.
 3033          */
 3034         zone_drain(slabzone);
 3035         zone_drain(slabrefzone);
 3036         bucket_zone_drain();
 3037 }
 3038 
 3039 /* See uma.h */
 3040 int
 3041 uma_zone_exhausted(uma_zone_t zone)
 3042 {
 3043         int full;
 3044 
 3045         ZONE_LOCK(zone);
 3046         full = (zone->uz_flags & UMA_ZFLAG_FULL);
 3047         ZONE_UNLOCK(zone);
 3048         return (full);  
 3049 }
 3050 
 3051 int
 3052 uma_zone_exhausted_nolock(uma_zone_t zone)
 3053 {
 3054         return (zone->uz_flags & UMA_ZFLAG_FULL);
 3055 }
 3056 
 3057 void *
 3058 uma_large_malloc(int size, int wait)
 3059 {
 3060         void *mem;
 3061         uma_slab_t slab;
 3062         u_int8_t flags;
 3063 
 3064         slab = zone_alloc_item(slabzone, NULL, wait);
 3065         if (slab == NULL)
 3066                 return (NULL);
 3067         mem = page_alloc(NULL, size, &flags, wait);
 3068         if (mem) {
 3069                 vsetslab((vm_offset_t)mem, slab);
 3070                 slab->us_data = mem;
 3071                 slab->us_flags = flags | UMA_SLAB_MALLOC;
 3072                 slab->us_size = size;
 3073         } else {
 3074                 zone_free_item(slabzone, slab, NULL, SKIP_NONE,
 3075                     ZFREE_STATFAIL | ZFREE_STATFREE);
 3076         }
 3077 
 3078         return (mem);
 3079 }
 3080 
 3081 void
 3082 uma_large_free(uma_slab_t slab)
 3083 {
 3084         vsetobj((vm_offset_t)slab->us_data, kmem_object);
 3085         page_free(slab->us_data, slab->us_size, slab->us_flags);
 3086         zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
 3087 }
 3088 
 3089 void
 3090 uma_print_stats(void)
 3091 {
 3092         zone_foreach(uma_print_zone);
 3093 }
 3094 
 3095 static void
 3096 slab_print(uma_slab_t slab)
 3097 {
 3098         printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
 3099                 slab->us_keg, slab->us_data, slab->us_freecount,
 3100                 slab->us_firstfree);
 3101 }
 3102 
 3103 static void
 3104 cache_print(uma_cache_t cache)
 3105 {
 3106         printf("alloc: %p(%d), free: %p(%d)\n",
 3107                 cache->uc_allocbucket,
 3108                 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
 3109                 cache->uc_freebucket,
 3110                 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
 3111 }
 3112 
 3113 static void
 3114 uma_print_keg(uma_keg_t keg)
 3115 {
 3116         uma_slab_t slab;
 3117 
 3118         printf("keg: %s(%p) size %d(%d) flags %d ipers %d ppera %d "
 3119             "out %d free %d limit %d\n",
 3120             keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
 3121             keg->uk_ipers, keg->uk_ppera,
 3122             (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
 3123             (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
 3124         printf("Part slabs:\n");
 3125         LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
 3126                 slab_print(slab);
 3127         printf("Free slabs:\n");
 3128         LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
 3129                 slab_print(slab);
 3130         printf("Full slabs:\n");
 3131         LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
 3132                 slab_print(slab);
 3133 }
 3134 
 3135 void
 3136 uma_print_zone(uma_zone_t zone)
 3137 {
 3138         uma_cache_t cache;
 3139         uma_klink_t kl;
 3140         int i;
 3141 
 3142         printf("zone: %s(%p) size %d flags %d\n",
 3143             zone->uz_name, zone, zone->uz_size, zone->uz_flags);
 3144         LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
 3145                 uma_print_keg(kl->kl_keg);
 3146         CPU_FOREACH(i) {
 3147                 cache = &zone->uz_cpu[i];
 3148                 printf("CPU %d Cache:\n", i);
 3149                 cache_print(cache);
 3150         }
 3151 }
 3152 
 3153 #ifdef DDB
 3154 /*
 3155  * Generate statistics across both the zone and its per-cpu cache's.  Return
 3156  * desired statistics if the pointer is non-NULL for that statistic.
 3157  *
 3158  * Note: does not update the zone statistics, as it can't safely clear the
 3159  * per-CPU cache statistic.
 3160  *
 3161  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
 3162  * safe from off-CPU; we should modify the caches to track this information
 3163  * directly so that we don't have to.
 3164  */
 3165 static void
 3166 uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
 3167     u_int64_t *freesp, u_int64_t *sleepsp)
 3168 {
 3169         uma_cache_t cache;
 3170         u_int64_t allocs, frees, sleeps;
 3171         int cachefree, cpu;
 3172 
 3173         allocs = frees = sleeps = 0;
 3174         cachefree = 0;
 3175         CPU_FOREACH(cpu) {
 3176                 cache = &z->uz_cpu[cpu];
 3177                 if (cache->uc_allocbucket != NULL)
 3178                         cachefree += cache->uc_allocbucket->ub_cnt;
 3179                 if (cache->uc_freebucket != NULL)
 3180                         cachefree += cache->uc_freebucket->ub_cnt;
 3181                 allocs += cache->uc_allocs;
 3182                 frees += cache->uc_frees;
 3183         }
 3184         allocs += z->uz_allocs;
 3185         frees += z->uz_frees;
 3186         sleeps += z->uz_sleeps;
 3187         if (cachefreep != NULL)
 3188                 *cachefreep = cachefree;
 3189         if (allocsp != NULL)
 3190                 *allocsp = allocs;
 3191         if (freesp != NULL)
 3192                 *freesp = frees;
 3193         if (sleepsp != NULL)
 3194                 *sleepsp = sleeps;
 3195 }
 3196 #endif /* DDB */
 3197 
 3198 static int
 3199 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
 3200 {
 3201         uma_keg_t kz;
 3202         uma_zone_t z;
 3203         int count;
 3204 
 3205         count = 0;
 3206         mtx_lock(&uma_mtx);
 3207         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3208                 LIST_FOREACH(z, &kz->uk_zones, uz_link)
 3209                         count++;
 3210         }
 3211         mtx_unlock(&uma_mtx);
 3212         return (sysctl_handle_int(oidp, &count, 0, req));
 3213 }
 3214 
 3215 static int
 3216 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
 3217 {
 3218         struct uma_stream_header ush;
 3219         struct uma_type_header uth;
 3220         struct uma_percpu_stat ups;
 3221         uma_bucket_t bucket;
 3222         struct sbuf sbuf;
 3223         uma_cache_t cache;
 3224         uma_klink_t kl;
 3225         uma_keg_t kz;
 3226         uma_zone_t z;
 3227         uma_keg_t k;
 3228         int count, error, i;
 3229 
 3230         error = sysctl_wire_old_buffer(req, 0);
 3231         if (error != 0)
 3232                 return (error);
 3233         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
 3234 
 3235         count = 0;
 3236         mtx_lock(&uma_mtx);
 3237         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3238                 LIST_FOREACH(z, &kz->uk_zones, uz_link)
 3239                         count++;
 3240         }
 3241 
 3242         /*
 3243          * Insert stream header.
 3244          */
 3245         bzero(&ush, sizeof(ush));
 3246         ush.ush_version = UMA_STREAM_VERSION;
 3247         ush.ush_maxcpus = (mp_maxid + 1);
 3248         ush.ush_count = count;
 3249         (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
 3250 
 3251         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3252                 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
 3253                         bzero(&uth, sizeof(uth));
 3254                         ZONE_LOCK(z);
 3255                         strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
 3256                         uth.uth_align = kz->uk_align;
 3257                         uth.uth_size = kz->uk_size;
 3258                         uth.uth_rsize = kz->uk_rsize;
 3259                         LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
 3260                                 k = kl->kl_keg;
 3261                                 uth.uth_maxpages += k->uk_maxpages;
 3262                                 uth.uth_pages += k->uk_pages;
 3263                                 uth.uth_keg_free += k->uk_free;
 3264                                 uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
 3265                                     * k->uk_ipers;
 3266                         }
 3267 
 3268                         /*
 3269                          * A zone is secondary is it is not the first entry
 3270                          * on the keg's zone list.
 3271                          */
 3272                         if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
 3273                             (LIST_FIRST(&kz->uk_zones) != z))
 3274                                 uth.uth_zone_flags = UTH_ZONE_SECONDARY;
 3275 
 3276                         LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
 3277                                 uth.uth_zone_free += bucket->ub_cnt;
 3278                         uth.uth_allocs = z->uz_allocs;
 3279                         uth.uth_frees = z->uz_frees;
 3280                         uth.uth_fails = z->uz_fails;
 3281                         uth.uth_sleeps = z->uz_sleeps;
 3282                         (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
 3283                         /*
 3284                          * While it is not normally safe to access the cache
 3285                          * bucket pointers while not on the CPU that owns the
 3286                          * cache, we only allow the pointers to be exchanged
 3287                          * without the zone lock held, not invalidated, so
 3288                          * accept the possible race associated with bucket
 3289                          * exchange during monitoring.
 3290                          */
 3291                         for (i = 0; i < (mp_maxid + 1); i++) {
 3292                                 bzero(&ups, sizeof(ups));
 3293                                 if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
 3294                                         goto skip;
 3295                                 if (CPU_ABSENT(i))
 3296                                         goto skip;
 3297                                 cache = &z->uz_cpu[i];
 3298                                 if (cache->uc_allocbucket != NULL)
 3299                                         ups.ups_cache_free +=
 3300                                             cache->uc_allocbucket->ub_cnt;
 3301                                 if (cache->uc_freebucket != NULL)
 3302                                         ups.ups_cache_free +=
 3303                                             cache->uc_freebucket->ub_cnt;
 3304                                 ups.ups_allocs = cache->uc_allocs;
 3305                                 ups.ups_frees = cache->uc_frees;
 3306 skip:
 3307                                 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
 3308                         }
 3309                         ZONE_UNLOCK(z);
 3310                 }
 3311         }
 3312         mtx_unlock(&uma_mtx);
 3313         error = sbuf_finish(&sbuf);
 3314         sbuf_delete(&sbuf);
 3315         return (error);
 3316 }
 3317 
 3318 #ifdef DDB
 3319 DB_SHOW_COMMAND(uma, db_show_uma)
 3320 {
 3321         u_int64_t allocs, frees, sleeps;
 3322         uma_bucket_t bucket;
 3323         uma_keg_t kz;
 3324         uma_zone_t z;
 3325         int cachefree;
 3326 
 3327         db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
 3328             "Requests", "Sleeps");
 3329         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3330                 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
 3331                         if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
 3332                                 allocs = z->uz_allocs;
 3333                                 frees = z->uz_frees;
 3334                                 sleeps = z->uz_sleeps;
 3335                                 cachefree = 0;
 3336                         } else
 3337                                 uma_zone_sumstat(z, &cachefree, &allocs,
 3338                                     &frees, &sleeps);
 3339                         if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
 3340                             (LIST_FIRST(&kz->uk_zones) != z)))
 3341                                 cachefree += kz->uk_free;
 3342                         LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
 3343                                 cachefree += bucket->ub_cnt;
 3344                         db_printf("%18s %8ju %8jd %8d %12ju %8ju\n", z->uz_name,
 3345                             (uintmax_t)kz->uk_size,
 3346                             (intmax_t)(allocs - frees), cachefree,
 3347                             (uintmax_t)allocs, sleeps);
 3348                 }
 3349         }
 3350 }
 3351 #endif

Cache object: dba23399f7b6b23e2021f31a55463f95


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.