The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/uma_core.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org>
    3  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
    4  * Copyright (c) 2004-2006 Robert N. M. Watson
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice unmodified, this list of conditions, and the following
   12  *    disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 /*
   30  * uma_core.c  Implementation of the Universal Memory allocator
   31  *
   32  * This allocator is intended to replace the multitude of similar object caches
   33  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
   34  * effecient.  A primary design goal is to return unused memory to the rest of
   35  * the system.  This will make the system as a whole more flexible due to the
   36  * ability to move memory to subsystems which most need it instead of leaving
   37  * pools of reserved memory unused.
   38  *
   39  * The basic ideas stem from similar slab/zone based allocators whose algorithms
   40  * are well known.
   41  *
   42  */
   43 
   44 /*
   45  * TODO:
   46  *      - Improve memory usage for large allocations
   47  *      - Investigate cache size adjustments
   48  */
   49 
   50 #include <sys/cdefs.h>
   51 __FBSDID("$FreeBSD: releng/8.0/sys/vm/uma_core.c 194429 2009-06-18 07:27:11Z alc $");
   52 
   53 /* I should really use ktr.. */
   54 /*
   55 #define UMA_DEBUG 1
   56 #define UMA_DEBUG_ALLOC 1
   57 #define UMA_DEBUG_ALLOC_1 1
   58 */
   59 
   60 #include "opt_ddb.h"
   61 #include "opt_param.h"
   62 
   63 #include <sys/param.h>
   64 #include <sys/systm.h>
   65 #include <sys/kernel.h>
   66 #include <sys/types.h>
   67 #include <sys/queue.h>
   68 #include <sys/malloc.h>
   69 #include <sys/ktr.h>
   70 #include <sys/lock.h>
   71 #include <sys/sysctl.h>
   72 #include <sys/mutex.h>
   73 #include <sys/proc.h>
   74 #include <sys/sbuf.h>
   75 #include <sys/smp.h>
   76 #include <sys/vmmeter.h>
   77 
   78 #include <vm/vm.h>
   79 #include <vm/vm_object.h>
   80 #include <vm/vm_page.h>
   81 #include <vm/vm_param.h>
   82 #include <vm/vm_map.h>
   83 #include <vm/vm_kern.h>
   84 #include <vm/vm_extern.h>
   85 #include <vm/uma.h>
   86 #include <vm/uma_int.h>
   87 #include <vm/uma_dbg.h>
   88 
   89 #include <machine/vmparam.h>
   90 
   91 #include <ddb/ddb.h>
   92 
   93 /*
   94  * This is the zone and keg from which all zones are spawned.  The idea is that
   95  * even the zone & keg heads are allocated from the allocator, so we use the
   96  * bss section to bootstrap us.
   97  */
   98 static struct uma_keg masterkeg;
   99 static struct uma_zone masterzone_k;
  100 static struct uma_zone masterzone_z;
  101 static uma_zone_t kegs = &masterzone_k;
  102 static uma_zone_t zones = &masterzone_z;
  103 
  104 /* This is the zone from which all of uma_slab_t's are allocated. */
  105 static uma_zone_t slabzone;
  106 static uma_zone_t slabrefzone;  /* With refcounters (for UMA_ZONE_REFCNT) */
  107 
  108 /*
  109  * The initial hash tables come out of this zone so they can be allocated
  110  * prior to malloc coming up.
  111  */
  112 static uma_zone_t hashzone;
  113 
  114 /* The boot-time adjusted value for cache line alignment. */
  115 static int uma_align_cache = 64 - 1;
  116 
  117 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
  118 
  119 /*
  120  * Are we allowed to allocate buckets?
  121  */
  122 static int bucketdisable = 1;
  123 
  124 /* Linked list of all kegs in the system */
  125 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(&uma_kegs);
  126 
  127 /* This mutex protects the keg list */
  128 static struct mtx uma_mtx;
  129 
  130 /* Linked list of boot time pages */
  131 static LIST_HEAD(,uma_slab) uma_boot_pages =
  132     LIST_HEAD_INITIALIZER(&uma_boot_pages);
  133 
  134 /* This mutex protects the boot time pages list */
  135 static struct mtx uma_boot_pages_mtx;
  136 
  137 /* Is the VM done starting up? */
  138 static int booted = 0;
  139 
  140 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
  141 static u_int uma_max_ipers;
  142 static u_int uma_max_ipers_ref;
  143 
  144 /*
  145  * This is the handle used to schedule events that need to happen
  146  * outside of the allocation fast path.
  147  */
  148 static struct callout uma_callout;
  149 #define UMA_TIMEOUT     20              /* Seconds for callout interval. */
  150 
  151 /*
  152  * This structure is passed as the zone ctor arg so that I don't have to create
  153  * a special allocation function just for zones.
  154  */
  155 struct uma_zctor_args {
  156         char *name;
  157         size_t size;
  158         uma_ctor ctor;
  159         uma_dtor dtor;
  160         uma_init uminit;
  161         uma_fini fini;
  162         uma_keg_t keg;
  163         int align;
  164         u_int32_t flags;
  165 };
  166 
  167 struct uma_kctor_args {
  168         uma_zone_t zone;
  169         size_t size;
  170         uma_init uminit;
  171         uma_fini fini;
  172         int align;
  173         u_int32_t flags;
  174 };
  175 
  176 struct uma_bucket_zone {
  177         uma_zone_t      ubz_zone;
  178         char            *ubz_name;
  179         int             ubz_entries;
  180 };
  181 
  182 #define BUCKET_MAX      128
  183 
  184 struct uma_bucket_zone bucket_zones[] = {
  185         { NULL, "16 Bucket", 16 },
  186         { NULL, "32 Bucket", 32 },
  187         { NULL, "64 Bucket", 64 },
  188         { NULL, "128 Bucket", 128 },
  189         { NULL, NULL, 0}
  190 };
  191 
  192 #define BUCKET_SHIFT    4
  193 #define BUCKET_ZONES    ((BUCKET_MAX >> BUCKET_SHIFT) + 1)
  194 
  195 /*
  196  * bucket_size[] maps requested bucket sizes to zones that allocate a bucket
  197  * of approximately the right size.
  198  */
  199 static uint8_t bucket_size[BUCKET_ZONES];
  200 
  201 /*
  202  * Flags and enumerations to be passed to internal functions.
  203  */
  204 enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
  205 
  206 #define ZFREE_STATFAIL  0x00000001      /* Update zone failure statistic. */
  207 #define ZFREE_STATFREE  0x00000002      /* Update zone free statistic. */
  208 
  209 /* Prototypes.. */
  210 
  211 static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
  212 static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
  213 static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
  214 static void page_free(void *, int, u_int8_t);
  215 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
  216 static void cache_drain(uma_zone_t);
  217 static void bucket_drain(uma_zone_t, uma_bucket_t);
  218 static void bucket_cache_drain(uma_zone_t zone);
  219 static int keg_ctor(void *, int, void *, int);
  220 static void keg_dtor(void *, int, void *);
  221 static int zone_ctor(void *, int, void *, int);
  222 static void zone_dtor(void *, int, void *);
  223 static int zero_init(void *, int, int);
  224 static void keg_small_init(uma_keg_t keg);
  225 static void keg_large_init(uma_keg_t keg);
  226 static void zone_foreach(void (*zfunc)(uma_zone_t));
  227 static void zone_timeout(uma_zone_t zone);
  228 static int hash_alloc(struct uma_hash *);
  229 static int hash_expand(struct uma_hash *, struct uma_hash *);
  230 static void hash_free(struct uma_hash *hash);
  231 static void uma_timeout(void *);
  232 static void uma_startup3(void);
  233 static void *zone_alloc_item(uma_zone_t, void *, int);
  234 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip,
  235     int);
  236 static void bucket_enable(void);
  237 static void bucket_init(void);
  238 static uma_bucket_t bucket_alloc(int, int);
  239 static void bucket_free(uma_bucket_t);
  240 static void bucket_zone_drain(void);
  241 static int zone_alloc_bucket(uma_zone_t zone, int flags);
  242 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
  243 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
  244 static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
  245 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
  246     uma_fini fini, int align, u_int32_t flags);
  247 static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
  248 static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
  249 
  250 void uma_print_zone(uma_zone_t);
  251 void uma_print_stats(void);
  252 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
  253 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
  254 
  255 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
  256 
  257 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
  258     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
  259 
  260 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
  261     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
  262 
  263 /*
  264  * This routine checks to see whether or not it's safe to enable buckets.
  265  */
  266 
  267 static void
  268 bucket_enable(void)
  269 {
  270         if (cnt.v_free_count < cnt.v_free_min)
  271                 bucketdisable = 1;
  272         else
  273                 bucketdisable = 0;
  274 }
  275 
  276 /*
  277  * Initialize bucket_zones, the array of zones of buckets of various sizes.
  278  *
  279  * For each zone, calculate the memory required for each bucket, consisting
  280  * of the header and an array of pointers.  Initialize bucket_size[] to point
  281  * the range of appropriate bucket sizes at the zone.
  282  */
  283 static void
  284 bucket_init(void)
  285 {
  286         struct uma_bucket_zone *ubz;
  287         int i;
  288         int j;
  289 
  290         for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
  291                 int size;
  292 
  293                 ubz = &bucket_zones[j];
  294                 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
  295                 size += sizeof(void *) * ubz->ubz_entries;
  296                 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
  297                     NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
  298                     UMA_ZFLAG_INTERNAL | UMA_ZFLAG_BUCKET);
  299                 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
  300                         bucket_size[i >> BUCKET_SHIFT] = j;
  301         }
  302 }
  303 
  304 /*
  305  * Given a desired number of entries for a bucket, return the zone from which
  306  * to allocate the bucket.
  307  */
  308 static struct uma_bucket_zone *
  309 bucket_zone_lookup(int entries)
  310 {
  311         int idx;
  312 
  313         idx = howmany(entries, 1 << BUCKET_SHIFT);
  314         return (&bucket_zones[bucket_size[idx]]);
  315 }
  316 
  317 static uma_bucket_t
  318 bucket_alloc(int entries, int bflags)
  319 {
  320         struct uma_bucket_zone *ubz;
  321         uma_bucket_t bucket;
  322 
  323         /*
  324          * This is to stop us from allocating per cpu buckets while we're
  325          * running out of vm.boot_pages.  Otherwise, we would exhaust the
  326          * boot pages.  This also prevents us from allocating buckets in
  327          * low memory situations.
  328          */
  329         if (bucketdisable)
  330                 return (NULL);
  331 
  332         ubz = bucket_zone_lookup(entries);
  333         bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags);
  334         if (bucket) {
  335 #ifdef INVARIANTS
  336                 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
  337 #endif
  338                 bucket->ub_cnt = 0;
  339                 bucket->ub_entries = ubz->ubz_entries;
  340         }
  341 
  342         return (bucket);
  343 }
  344 
  345 static void
  346 bucket_free(uma_bucket_t bucket)
  347 {
  348         struct uma_bucket_zone *ubz;
  349 
  350         ubz = bucket_zone_lookup(bucket->ub_entries);
  351         zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
  352             ZFREE_STATFREE);
  353 }
  354 
  355 static void
  356 bucket_zone_drain(void)
  357 {
  358         struct uma_bucket_zone *ubz;
  359 
  360         for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
  361                 zone_drain(ubz->ubz_zone);
  362 }
  363 
  364 static inline uma_keg_t
  365 zone_first_keg(uma_zone_t zone)
  366 {
  367 
  368         return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
  369 }
  370 
  371 static void
  372 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
  373 {
  374         uma_klink_t klink;
  375 
  376         LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
  377                 kegfn(klink->kl_keg);
  378 }
  379 
  380 /*
  381  * Routine called by timeout which is used to fire off some time interval
  382  * based calculations.  (stats, hash size, etc.)
  383  *
  384  * Arguments:
  385  *      arg   Unused
  386  *
  387  * Returns:
  388  *      Nothing
  389  */
  390 static void
  391 uma_timeout(void *unused)
  392 {
  393         bucket_enable();
  394         zone_foreach(zone_timeout);
  395 
  396         /* Reschedule this event */
  397         callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
  398 }
  399 
  400 /*
  401  * Routine to perform timeout driven calculations.  This expands the
  402  * hashes and does per cpu statistics aggregation.
  403  *
  404  *  Returns nothing.
  405  */
  406 static void
  407 keg_timeout(uma_keg_t keg)
  408 {
  409 
  410         KEG_LOCK(keg);
  411         /*
  412          * Expand the keg hash table.
  413          *
  414          * This is done if the number of slabs is larger than the hash size.
  415          * What I'm trying to do here is completely reduce collisions.  This
  416          * may be a little aggressive.  Should I allow for two collisions max?
  417          */
  418         if (keg->uk_flags & UMA_ZONE_HASH &&
  419             keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
  420                 struct uma_hash newhash;
  421                 struct uma_hash oldhash;
  422                 int ret;
  423 
  424                 /*
  425                  * This is so involved because allocating and freeing
  426                  * while the keg lock is held will lead to deadlock.
  427                  * I have to do everything in stages and check for
  428                  * races.
  429                  */
  430                 newhash = keg->uk_hash;
  431                 KEG_UNLOCK(keg);
  432                 ret = hash_alloc(&newhash);
  433                 KEG_LOCK(keg);
  434                 if (ret) {
  435                         if (hash_expand(&keg->uk_hash, &newhash)) {
  436                                 oldhash = keg->uk_hash;
  437                                 keg->uk_hash = newhash;
  438                         } else
  439                                 oldhash = newhash;
  440 
  441                         KEG_UNLOCK(keg);
  442                         hash_free(&oldhash);
  443                         KEG_LOCK(keg);
  444                 }
  445         }
  446         KEG_UNLOCK(keg);
  447 }
  448 
  449 static void
  450 zone_timeout(uma_zone_t zone)
  451 {
  452 
  453         zone_foreach_keg(zone, &keg_timeout);
  454 }
  455 
  456 /*
  457  * Allocate and zero fill the next sized hash table from the appropriate
  458  * backing store.
  459  *
  460  * Arguments:
  461  *      hash  A new hash structure with the old hash size in uh_hashsize
  462  *
  463  * Returns:
  464  *      1 on sucess and 0 on failure.
  465  */
  466 static int
  467 hash_alloc(struct uma_hash *hash)
  468 {
  469         int oldsize;
  470         int alloc;
  471 
  472         oldsize = hash->uh_hashsize;
  473 
  474         /* We're just going to go to a power of two greater */
  475         if (oldsize)  {
  476                 hash->uh_hashsize = oldsize * 2;
  477                 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
  478                 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
  479                     M_UMAHASH, M_NOWAIT);
  480         } else {
  481                 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
  482                 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
  483                     M_WAITOK);
  484                 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
  485         }
  486         if (hash->uh_slab_hash) {
  487                 bzero(hash->uh_slab_hash, alloc);
  488                 hash->uh_hashmask = hash->uh_hashsize - 1;
  489                 return (1);
  490         }
  491 
  492         return (0);
  493 }
  494 
  495 /*
  496  * Expands the hash table for HASH zones.  This is done from zone_timeout
  497  * to reduce collisions.  This must not be done in the regular allocation
  498  * path, otherwise, we can recurse on the vm while allocating pages.
  499  *
  500  * Arguments:
  501  *      oldhash  The hash you want to expand
  502  *      newhash  The hash structure for the new table
  503  *
  504  * Returns:
  505  *      Nothing
  506  *
  507  * Discussion:
  508  */
  509 static int
  510 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
  511 {
  512         uma_slab_t slab;
  513         int hval;
  514         int i;
  515 
  516         if (!newhash->uh_slab_hash)
  517                 return (0);
  518 
  519         if (oldhash->uh_hashsize >= newhash->uh_hashsize)
  520                 return (0);
  521 
  522         /*
  523          * I need to investigate hash algorithms for resizing without a
  524          * full rehash.
  525          */
  526 
  527         for (i = 0; i < oldhash->uh_hashsize; i++)
  528                 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
  529                         slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
  530                         SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
  531                         hval = UMA_HASH(newhash, slab->us_data);
  532                         SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
  533                             slab, us_hlink);
  534                 }
  535 
  536         return (1);
  537 }
  538 
  539 /*
  540  * Free the hash bucket to the appropriate backing store.
  541  *
  542  * Arguments:
  543  *      slab_hash  The hash bucket we're freeing
  544  *      hashsize   The number of entries in that hash bucket
  545  *
  546  * Returns:
  547  *      Nothing
  548  */
  549 static void
  550 hash_free(struct uma_hash *hash)
  551 {
  552         if (hash->uh_slab_hash == NULL)
  553                 return;
  554         if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
  555                 zone_free_item(hashzone,
  556                     hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE);
  557         else
  558                 free(hash->uh_slab_hash, M_UMAHASH);
  559 }
  560 
  561 /*
  562  * Frees all outstanding items in a bucket
  563  *
  564  * Arguments:
  565  *      zone   The zone to free to, must be unlocked.
  566  *      bucket The free/alloc bucket with items, cpu queue must be locked.
  567  *
  568  * Returns:
  569  *      Nothing
  570  */
  571 
  572 static void
  573 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
  574 {
  575         void *item;
  576 
  577         if (bucket == NULL)
  578                 return;
  579 
  580         while (bucket->ub_cnt > 0)  {
  581                 bucket->ub_cnt--;
  582                 item = bucket->ub_bucket[bucket->ub_cnt];
  583 #ifdef INVARIANTS
  584                 bucket->ub_bucket[bucket->ub_cnt] = NULL;
  585                 KASSERT(item != NULL,
  586                     ("bucket_drain: botched ptr, item is NULL"));
  587 #endif
  588                 zone_free_item(zone, item, NULL, SKIP_DTOR, 0);
  589         }
  590 }
  591 
  592 /*
  593  * Drains the per cpu caches for a zone.
  594  *
  595  * NOTE: This may only be called while the zone is being turn down, and not
  596  * during normal operation.  This is necessary in order that we do not have
  597  * to migrate CPUs to drain the per-CPU caches.
  598  *
  599  * Arguments:
  600  *      zone     The zone to drain, must be unlocked.
  601  *
  602  * Returns:
  603  *      Nothing
  604  */
  605 static void
  606 cache_drain(uma_zone_t zone)
  607 {
  608         uma_cache_t cache;
  609         int cpu;
  610 
  611         /*
  612          * XXX: It is safe to not lock the per-CPU caches, because we're
  613          * tearing down the zone anyway.  I.e., there will be no further use
  614          * of the caches at this point.
  615          *
  616          * XXX: It would good to be able to assert that the zone is being
  617          * torn down to prevent improper use of cache_drain().
  618          *
  619          * XXX: We lock the zone before passing into bucket_cache_drain() as
  620          * it is used elsewhere.  Should the tear-down path be made special
  621          * there in some form?
  622          */
  623         for (cpu = 0; cpu <= mp_maxid; cpu++) {
  624                 if (CPU_ABSENT(cpu))
  625                         continue;
  626                 cache = &zone->uz_cpu[cpu];
  627                 bucket_drain(zone, cache->uc_allocbucket);
  628                 bucket_drain(zone, cache->uc_freebucket);
  629                 if (cache->uc_allocbucket != NULL)
  630                         bucket_free(cache->uc_allocbucket);
  631                 if (cache->uc_freebucket != NULL)
  632                         bucket_free(cache->uc_freebucket);
  633                 cache->uc_allocbucket = cache->uc_freebucket = NULL;
  634         }
  635         ZONE_LOCK(zone);
  636         bucket_cache_drain(zone);
  637         ZONE_UNLOCK(zone);
  638 }
  639 
  640 /*
  641  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
  642  */
  643 static void
  644 bucket_cache_drain(uma_zone_t zone)
  645 {
  646         uma_bucket_t bucket;
  647 
  648         /*
  649          * Drain the bucket queues and free the buckets, we just keep two per
  650          * cpu (alloc/free).
  651          */
  652         while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
  653                 LIST_REMOVE(bucket, ub_link);
  654                 ZONE_UNLOCK(zone);
  655                 bucket_drain(zone, bucket);
  656                 bucket_free(bucket);
  657                 ZONE_LOCK(zone);
  658         }
  659 
  660         /* Now we do the free queue.. */
  661         while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
  662                 LIST_REMOVE(bucket, ub_link);
  663                 bucket_free(bucket);
  664         }
  665 }
  666 
  667 /*
  668  * Frees pages from a keg back to the system.  This is done on demand from
  669  * the pageout daemon.
  670  *
  671  * Returns nothing.
  672  */
  673 static void
  674 keg_drain(uma_keg_t keg)
  675 {
  676         struct slabhead freeslabs = { 0 };
  677         uma_slab_t slab;
  678         uma_slab_t n;
  679         u_int8_t flags;
  680         u_int8_t *mem;
  681         int i;
  682 
  683         /*
  684          * We don't want to take pages from statically allocated kegs at this
  685          * time
  686          */
  687         if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
  688                 return;
  689 
  690 #ifdef UMA_DEBUG
  691         printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
  692 #endif
  693         KEG_LOCK(keg);
  694         if (keg->uk_free == 0)
  695                 goto finished;
  696 
  697         slab = LIST_FIRST(&keg->uk_free_slab);
  698         while (slab) {
  699                 n = LIST_NEXT(slab, us_link);
  700 
  701                 /* We have no where to free these to */
  702                 if (slab->us_flags & UMA_SLAB_BOOT) {
  703                         slab = n;
  704                         continue;
  705                 }
  706 
  707                 LIST_REMOVE(slab, us_link);
  708                 keg->uk_pages -= keg->uk_ppera;
  709                 keg->uk_free -= keg->uk_ipers;
  710 
  711                 if (keg->uk_flags & UMA_ZONE_HASH)
  712                         UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
  713 
  714                 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
  715 
  716                 slab = n;
  717         }
  718 finished:
  719         KEG_UNLOCK(keg);
  720 
  721         while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
  722                 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
  723                 if (keg->uk_fini)
  724                         for (i = 0; i < keg->uk_ipers; i++)
  725                                 keg->uk_fini(
  726                                     slab->us_data + (keg->uk_rsize * i),
  727                                     keg->uk_size);
  728                 flags = slab->us_flags;
  729                 mem = slab->us_data;
  730 
  731                 if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
  732                         vm_object_t obj;
  733 
  734                         if (flags & UMA_SLAB_KMEM)
  735                                 obj = kmem_object;
  736                         else if (flags & UMA_SLAB_KERNEL)
  737                                 obj = kernel_object;
  738                         else
  739                                 obj = NULL;
  740                         for (i = 0; i < keg->uk_ppera; i++)
  741                                 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
  742                                     obj);
  743                 }
  744                 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
  745                         zone_free_item(keg->uk_slabzone, slab, NULL,
  746                             SKIP_NONE, ZFREE_STATFREE);
  747 #ifdef UMA_DEBUG
  748                 printf("%s: Returning %d bytes.\n",
  749                     keg->uk_name, UMA_SLAB_SIZE * keg->uk_ppera);
  750 #endif
  751                 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
  752         }
  753 }
  754 
  755 static void
  756 zone_drain_wait(uma_zone_t zone, int waitok)
  757 {
  758 
  759         /*
  760          * Set draining to interlock with zone_dtor() so we can release our
  761          * locks as we go.  Only dtor() should do a WAITOK call since it
  762          * is the only call that knows the structure will still be available
  763          * when it wakes up.
  764          */
  765         ZONE_LOCK(zone);
  766         while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
  767                 if (waitok == M_NOWAIT)
  768                         goto out;
  769                 mtx_unlock(&uma_mtx);
  770                 msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
  771                 mtx_lock(&uma_mtx);
  772         }
  773         zone->uz_flags |= UMA_ZFLAG_DRAINING;
  774         bucket_cache_drain(zone);
  775         ZONE_UNLOCK(zone);
  776         /*
  777          * The DRAINING flag protects us from being freed while
  778          * we're running.  Normally the uma_mtx would protect us but we
  779          * must be able to release and acquire the right lock for each keg.
  780          */
  781         zone_foreach_keg(zone, &keg_drain);
  782         ZONE_LOCK(zone);
  783         zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
  784         wakeup(zone);
  785 out:
  786         ZONE_UNLOCK(zone);
  787 }
  788 
  789 void
  790 zone_drain(uma_zone_t zone)
  791 {
  792 
  793         zone_drain_wait(zone, M_NOWAIT);
  794 }
  795 
  796 /*
  797  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
  798  *
  799  * Arguments:
  800  *      wait  Shall we wait?
  801  *
  802  * Returns:
  803  *      The slab that was allocated or NULL if there is no memory and the
  804  *      caller specified M_NOWAIT.
  805  */
  806 static uma_slab_t
  807 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
  808 {
  809         uma_slabrefcnt_t slabref;
  810         uma_alloc allocf;
  811         uma_slab_t slab;
  812         u_int8_t *mem;
  813         u_int8_t flags;
  814         int i;
  815 
  816         mtx_assert(&keg->uk_lock, MA_OWNED);
  817         slab = NULL;
  818 
  819 #ifdef UMA_DEBUG
  820         printf("slab_zalloc:  Allocating a new slab for %s\n", keg->uk_name);
  821 #endif
  822         allocf = keg->uk_allocf;
  823         KEG_UNLOCK(keg);
  824 
  825         if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
  826                 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
  827                 if (slab == NULL) {
  828                         KEG_LOCK(keg);
  829                         return NULL;
  830                 }
  831         }
  832 
  833         /*
  834          * This reproduces the old vm_zone behavior of zero filling pages the
  835          * first time they are added to a zone.
  836          *
  837          * Malloced items are zeroed in uma_zalloc.
  838          */
  839 
  840         if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
  841                 wait |= M_ZERO;
  842         else
  843                 wait &= ~M_ZERO;
  844 
  845         /* zone is passed for legacy reasons. */
  846         mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
  847         if (mem == NULL) {
  848                 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
  849                         zone_free_item(keg->uk_slabzone, slab, NULL,
  850                             SKIP_NONE, ZFREE_STATFREE);
  851                 KEG_LOCK(keg);
  852                 return (NULL);
  853         }
  854 
  855         /* Point the slab into the allocated memory */
  856         if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
  857                 slab = (uma_slab_t )(mem + keg->uk_pgoff);
  858 
  859         if (keg->uk_flags & UMA_ZONE_VTOSLAB)
  860                 for (i = 0; i < keg->uk_ppera; i++)
  861                         vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
  862 
  863         slab->us_keg = keg;
  864         slab->us_data = mem;
  865         slab->us_freecount = keg->uk_ipers;
  866         slab->us_firstfree = 0;
  867         slab->us_flags = flags;
  868 
  869         if (keg->uk_flags & UMA_ZONE_REFCNT) {
  870                 slabref = (uma_slabrefcnt_t)slab;
  871                 for (i = 0; i < keg->uk_ipers; i++) {
  872                         slabref->us_freelist[i].us_refcnt = 0;
  873                         slabref->us_freelist[i].us_item = i+1;
  874                 }
  875         } else {
  876                 for (i = 0; i < keg->uk_ipers; i++)
  877                         slab->us_freelist[i].us_item = i+1;
  878         }
  879 
  880         if (keg->uk_init != NULL) {
  881                 for (i = 0; i < keg->uk_ipers; i++)
  882                         if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
  883                             keg->uk_size, wait) != 0)
  884                                 break;
  885                 if (i != keg->uk_ipers) {
  886                         if (keg->uk_fini != NULL) {
  887                                 for (i--; i > -1; i--)
  888                                         keg->uk_fini(slab->us_data +
  889                                             (keg->uk_rsize * i),
  890                                             keg->uk_size);
  891                         }
  892                         if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
  893                                 vm_object_t obj;
  894 
  895                                 if (flags & UMA_SLAB_KMEM)
  896                                         obj = kmem_object;
  897                                 else if (flags & UMA_SLAB_KERNEL)
  898                                         obj = kernel_object;
  899                                 else
  900                                         obj = NULL;
  901                                 for (i = 0; i < keg->uk_ppera; i++)
  902                                         vsetobj((vm_offset_t)mem +
  903                                             (i * PAGE_SIZE), obj);
  904                         }
  905                         if (keg->uk_flags & UMA_ZONE_OFFPAGE)
  906                                 zone_free_item(keg->uk_slabzone, slab,
  907                                     NULL, SKIP_NONE, ZFREE_STATFREE);
  908                         keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
  909                             flags);
  910                         KEG_LOCK(keg);
  911                         return (NULL);
  912                 }
  913         }
  914         KEG_LOCK(keg);
  915 
  916         if (keg->uk_flags & UMA_ZONE_HASH)
  917                 UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
  918 
  919         keg->uk_pages += keg->uk_ppera;
  920         keg->uk_free += keg->uk_ipers;
  921 
  922         return (slab);
  923 }
  924 
  925 /*
  926  * This function is intended to be used early on in place of page_alloc() so
  927  * that we may use the boot time page cache to satisfy allocations before
  928  * the VM is ready.
  929  */
  930 static void *
  931 startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
  932 {
  933         uma_keg_t keg;
  934         uma_slab_t tmps;
  935 
  936         keg = zone_first_keg(zone);
  937 
  938         /*
  939          * Check our small startup cache to see if it has pages remaining.
  940          */
  941         mtx_lock(&uma_boot_pages_mtx);
  942         if ((tmps = LIST_FIRST(&uma_boot_pages)) != NULL) {
  943                 LIST_REMOVE(tmps, us_link);
  944                 mtx_unlock(&uma_boot_pages_mtx);
  945                 *pflag = tmps->us_flags;
  946                 return (tmps->us_data);
  947         }
  948         mtx_unlock(&uma_boot_pages_mtx);
  949         if (booted == 0)
  950                 panic("UMA: Increase vm.boot_pages");
  951         /*
  952          * Now that we've booted reset these users to their real allocator.
  953          */
  954 #ifdef UMA_MD_SMALL_ALLOC
  955         keg->uk_allocf = uma_small_alloc;
  956 #else
  957         keg->uk_allocf = page_alloc;
  958 #endif
  959         return keg->uk_allocf(zone, bytes, pflag, wait);
  960 }
  961 
  962 /*
  963  * Allocates a number of pages from the system
  964  *
  965  * Arguments:
  966  *      bytes  The number of bytes requested
  967  *      wait  Shall we wait?
  968  *
  969  * Returns:
  970  *      A pointer to the alloced memory or possibly
  971  *      NULL if M_NOWAIT is set.
  972  */
  973 static void *
  974 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
  975 {
  976         void *p;        /* Returned page */
  977 
  978         *pflag = UMA_SLAB_KMEM;
  979         p = (void *) kmem_malloc(kmem_map, bytes, wait);
  980 
  981         return (p);
  982 }
  983 
  984 /*
  985  * Allocates a number of pages from within an object
  986  *
  987  * Arguments:
  988  *      bytes  The number of bytes requested
  989  *      wait   Shall we wait?
  990  *
  991  * Returns:
  992  *      A pointer to the alloced memory or possibly
  993  *      NULL if M_NOWAIT is set.
  994  */
  995 static void *
  996 obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
  997 {
  998         vm_object_t object;
  999         vm_offset_t retkva, zkva;
 1000         vm_page_t p;
 1001         int pages, startpages;
 1002         uma_keg_t keg;
 1003 
 1004         keg = zone_first_keg(zone);
 1005         object = keg->uk_obj;
 1006         retkva = 0;
 1007 
 1008         /*
 1009          * This looks a little weird since we're getting one page at a time.
 1010          */
 1011         VM_OBJECT_LOCK(object);
 1012         p = TAILQ_LAST(&object->memq, pglist);
 1013         pages = p != NULL ? p->pindex + 1 : 0;
 1014         startpages = pages;
 1015         zkva = keg->uk_kva + pages * PAGE_SIZE;
 1016         for (; bytes > 0; bytes -= PAGE_SIZE) {
 1017                 p = vm_page_alloc(object, pages,
 1018                     VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
 1019                 if (p == NULL) {
 1020                         if (pages != startpages)
 1021                                 pmap_qremove(retkva, pages - startpages);
 1022                         while (pages != startpages) {
 1023                                 pages--;
 1024                                 p = TAILQ_LAST(&object->memq, pglist);
 1025                                 vm_page_lock_queues();
 1026                                 vm_page_unwire(p, 0);
 1027                                 vm_page_free(p);
 1028                                 vm_page_unlock_queues();
 1029                         }
 1030                         retkva = 0;
 1031                         goto done;
 1032                 }
 1033                 pmap_qenter(zkva, &p, 1);
 1034                 if (retkva == 0)
 1035                         retkva = zkva;
 1036                 zkva += PAGE_SIZE;
 1037                 pages += 1;
 1038         }
 1039 done:
 1040         VM_OBJECT_UNLOCK(object);
 1041         *flags = UMA_SLAB_PRIV;
 1042 
 1043         return ((void *)retkva);
 1044 }
 1045 
 1046 /*
 1047  * Frees a number of pages to the system
 1048  *
 1049  * Arguments:
 1050  *      mem   A pointer to the memory to be freed
 1051  *      size  The size of the memory being freed
 1052  *      flags The original p->us_flags field
 1053  *
 1054  * Returns:
 1055  *      Nothing
 1056  */
 1057 static void
 1058 page_free(void *mem, int size, u_int8_t flags)
 1059 {
 1060         vm_map_t map;
 1061 
 1062         if (flags & UMA_SLAB_KMEM)
 1063                 map = kmem_map;
 1064         else if (flags & UMA_SLAB_KERNEL)
 1065                 map = kernel_map;
 1066         else
 1067                 panic("UMA: page_free used with invalid flags %d", flags);
 1068 
 1069         kmem_free(map, (vm_offset_t)mem, size);
 1070 }
 1071 
 1072 /*
 1073  * Zero fill initializer
 1074  *
 1075  * Arguments/Returns follow uma_init specifications
 1076  */
 1077 static int
 1078 zero_init(void *mem, int size, int flags)
 1079 {
 1080         bzero(mem, size);
 1081         return (0);
 1082 }
 1083 
 1084 /*
 1085  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
 1086  *
 1087  * Arguments
 1088  *      keg  The zone we should initialize
 1089  *
 1090  * Returns
 1091  *      Nothing
 1092  */
 1093 static void
 1094 keg_small_init(uma_keg_t keg)
 1095 {
 1096         u_int rsize;
 1097         u_int memused;
 1098         u_int wastedspace;
 1099         u_int shsize;
 1100 
 1101         KASSERT(keg != NULL, ("Keg is null in keg_small_init"));
 1102         rsize = keg->uk_size;
 1103 
 1104         if (rsize < UMA_SMALLEST_UNIT)
 1105                 rsize = UMA_SMALLEST_UNIT;
 1106         if (rsize & keg->uk_align)
 1107                 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
 1108 
 1109         keg->uk_rsize = rsize;
 1110         keg->uk_ppera = 1;
 1111 
 1112         if (keg->uk_flags & UMA_ZONE_REFCNT) {
 1113                 rsize += UMA_FRITMREF_SZ;       /* linkage & refcnt */
 1114                 shsize = sizeof(struct uma_slab_refcnt);
 1115         } else {
 1116                 rsize += UMA_FRITM_SZ;  /* Account for linkage */
 1117                 shsize = sizeof(struct uma_slab);
 1118         }
 1119 
 1120         keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
 1121         KASSERT(keg->uk_ipers != 0, ("keg_small_init: ipers is 0"));
 1122         memused = keg->uk_ipers * rsize + shsize;
 1123         wastedspace = UMA_SLAB_SIZE - memused;
 1124 
 1125         /*
 1126          * We can't do OFFPAGE if we're internal or if we've been
 1127          * asked to not go to the VM for buckets.  If we do this we
 1128          * may end up going to the VM (kmem_map) for slabs which we
 1129          * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
 1130          * result of UMA_ZONE_VM, which clearly forbids it.
 1131          */
 1132         if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
 1133             (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
 1134                 return;
 1135 
 1136         if ((wastedspace >= UMA_MAX_WASTE) &&
 1137             (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
 1138                 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
 1139                 KASSERT(keg->uk_ipers <= 255,
 1140                     ("keg_small_init: keg->uk_ipers too high!"));
 1141 #ifdef UMA_DEBUG
 1142                 printf("UMA decided we need offpage slab headers for "
 1143                     "keg: %s, calculated wastedspace = %d, "
 1144                     "maximum wasted space allowed = %d, "
 1145                     "calculated ipers = %d, "
 1146                     "new wasted space = %d\n", keg->uk_name, wastedspace,
 1147                     UMA_MAX_WASTE, keg->uk_ipers,
 1148                     UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
 1149 #endif
 1150                 keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1151                 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
 1152                         keg->uk_flags |= UMA_ZONE_HASH;
 1153         }
 1154 }
 1155 
 1156 /*
 1157  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
 1158  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
 1159  * more complicated.
 1160  *
 1161  * Arguments
 1162  *      keg  The keg we should initialize
 1163  *
 1164  * Returns
 1165  *      Nothing
 1166  */
 1167 static void
 1168 keg_large_init(uma_keg_t keg)
 1169 {
 1170         int pages;
 1171 
 1172         KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
 1173         KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
 1174             ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
 1175 
 1176         pages = keg->uk_size / UMA_SLAB_SIZE;
 1177 
 1178         /* Account for remainder */
 1179         if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
 1180                 pages++;
 1181 
 1182         keg->uk_ppera = pages;
 1183         keg->uk_ipers = 1;
 1184 
 1185         keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1186         if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
 1187                 keg->uk_flags |= UMA_ZONE_HASH;
 1188 
 1189         keg->uk_rsize = keg->uk_size;
 1190 }
 1191 
 1192 static void
 1193 keg_cachespread_init(uma_keg_t keg)
 1194 {
 1195         int alignsize;
 1196         int trailer;
 1197         int pages;
 1198         int rsize;
 1199 
 1200         alignsize = keg->uk_align + 1;
 1201         rsize = keg->uk_size;
 1202         /*
 1203          * We want one item to start on every align boundary in a page.  To
 1204          * do this we will span pages.  We will also extend the item by the
 1205          * size of align if it is an even multiple of align.  Otherwise, it
 1206          * would fall on the same boundary every time.
 1207          */
 1208         if (rsize & keg->uk_align)
 1209                 rsize = (rsize & ~keg->uk_align) + alignsize;
 1210         if ((rsize & alignsize) == 0)
 1211                 rsize += alignsize;
 1212         trailer = rsize - keg->uk_size;
 1213         pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
 1214         pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
 1215         keg->uk_rsize = rsize;
 1216         keg->uk_ppera = pages;
 1217         keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
 1218         keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
 1219         KASSERT(keg->uk_ipers <= uma_max_ipers,
 1220             ("keg_small_init: keg->uk_ipers too high(%d) increase max_ipers",
 1221             keg->uk_ipers));
 1222 }
 1223 
 1224 /*
 1225  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
 1226  * the keg onto the global keg list.
 1227  *
 1228  * Arguments/Returns follow uma_ctor specifications
 1229  *      udata  Actually uma_kctor_args
 1230  */
 1231 static int
 1232 keg_ctor(void *mem, int size, void *udata, int flags)
 1233 {
 1234         struct uma_kctor_args *arg = udata;
 1235         uma_keg_t keg = mem;
 1236         uma_zone_t zone;
 1237 
 1238         bzero(keg, size);
 1239         keg->uk_size = arg->size;
 1240         keg->uk_init = arg->uminit;
 1241         keg->uk_fini = arg->fini;
 1242         keg->uk_align = arg->align;
 1243         keg->uk_free = 0;
 1244         keg->uk_pages = 0;
 1245         keg->uk_flags = arg->flags;
 1246         keg->uk_allocf = page_alloc;
 1247         keg->uk_freef = page_free;
 1248         keg->uk_recurse = 0;
 1249         keg->uk_slabzone = NULL;
 1250 
 1251         /*
 1252          * The master zone is passed to us at keg-creation time.
 1253          */
 1254         zone = arg->zone;
 1255         keg->uk_name = zone->uz_name;
 1256 
 1257         if (arg->flags & UMA_ZONE_VM)
 1258                 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
 1259 
 1260         if (arg->flags & UMA_ZONE_ZINIT)
 1261                 keg->uk_init = zero_init;
 1262 
 1263         if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
 1264                 keg->uk_flags |= UMA_ZONE_VTOSLAB;
 1265 
 1266         /*
 1267          * The +UMA_FRITM_SZ added to uk_size is to account for the
 1268          * linkage that is added to the size in keg_small_init().  If
 1269          * we don't account for this here then we may end up in
 1270          * keg_small_init() with a calculated 'ipers' of 0.
 1271          */
 1272         if (keg->uk_flags & UMA_ZONE_REFCNT) {
 1273                 if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
 1274                         keg_cachespread_init(keg);
 1275                 else if ((keg->uk_size+UMA_FRITMREF_SZ) >
 1276                     (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
 1277                         keg_large_init(keg);
 1278                 else
 1279                         keg_small_init(keg);
 1280         } else {
 1281                 if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
 1282                         keg_cachespread_init(keg);
 1283                 else if ((keg->uk_size+UMA_FRITM_SZ) >
 1284                     (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
 1285                         keg_large_init(keg);
 1286                 else
 1287                         keg_small_init(keg);
 1288         }
 1289 
 1290         if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
 1291                 if (keg->uk_flags & UMA_ZONE_REFCNT)
 1292                         keg->uk_slabzone = slabrefzone;
 1293                 else
 1294                         keg->uk_slabzone = slabzone;
 1295         }
 1296 
 1297         /*
 1298          * If we haven't booted yet we need allocations to go through the
 1299          * startup cache until the vm is ready.
 1300          */
 1301         if (keg->uk_ppera == 1) {
 1302 #ifdef UMA_MD_SMALL_ALLOC
 1303                 keg->uk_allocf = uma_small_alloc;
 1304                 keg->uk_freef = uma_small_free;
 1305 #endif
 1306                 if (booted == 0)
 1307                         keg->uk_allocf = startup_alloc;
 1308         }
 1309 
 1310         /*
 1311          * Initialize keg's lock (shared among zones).
 1312          */
 1313         if (arg->flags & UMA_ZONE_MTXCLASS)
 1314                 KEG_LOCK_INIT(keg, 1);
 1315         else
 1316                 KEG_LOCK_INIT(keg, 0);
 1317 
 1318         /*
 1319          * If we're putting the slab header in the actual page we need to
 1320          * figure out where in each page it goes.  This calculates a right
 1321          * justified offset into the memory on an ALIGN_PTR boundary.
 1322          */
 1323         if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
 1324                 u_int totsize;
 1325 
 1326                 /* Size of the slab struct and free list */
 1327                 if (keg->uk_flags & UMA_ZONE_REFCNT)
 1328                         totsize = sizeof(struct uma_slab_refcnt) +
 1329                             keg->uk_ipers * UMA_FRITMREF_SZ;
 1330                 else
 1331                         totsize = sizeof(struct uma_slab) +
 1332                             keg->uk_ipers * UMA_FRITM_SZ;
 1333 
 1334                 if (totsize & UMA_ALIGN_PTR)
 1335                         totsize = (totsize & ~UMA_ALIGN_PTR) +
 1336                             (UMA_ALIGN_PTR + 1);
 1337                 keg->uk_pgoff = UMA_SLAB_SIZE - totsize;
 1338 
 1339                 if (keg->uk_flags & UMA_ZONE_REFCNT)
 1340                         totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
 1341                             + keg->uk_ipers * UMA_FRITMREF_SZ;
 1342                 else
 1343                         totsize = keg->uk_pgoff + sizeof(struct uma_slab)
 1344                             + keg->uk_ipers * UMA_FRITM_SZ;
 1345 
 1346                 /*
 1347                  * The only way the following is possible is if with our
 1348                  * UMA_ALIGN_PTR adjustments we are now bigger than
 1349                  * UMA_SLAB_SIZE.  I haven't checked whether this is
 1350                  * mathematically possible for all cases, so we make
 1351                  * sure here anyway.
 1352                  */
 1353                 if (totsize > UMA_SLAB_SIZE) {
 1354                         printf("zone %s ipers %d rsize %d size %d\n",
 1355                             zone->uz_name, keg->uk_ipers, keg->uk_rsize,
 1356                             keg->uk_size);
 1357                         panic("UMA slab won't fit.");
 1358                 }
 1359         }
 1360 
 1361         if (keg->uk_flags & UMA_ZONE_HASH)
 1362                 hash_alloc(&keg->uk_hash);
 1363 
 1364 #ifdef UMA_DEBUG
 1365         printf("UMA: %s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
 1366             zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
 1367             keg->uk_ipers, keg->uk_ppera,
 1368             (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
 1369 #endif
 1370 
 1371         LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
 1372 
 1373         mtx_lock(&uma_mtx);
 1374         LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
 1375         mtx_unlock(&uma_mtx);
 1376         return (0);
 1377 }
 1378 
 1379 /*
 1380  * Zone header ctor.  This initializes all fields, locks, etc.
 1381  *
 1382  * Arguments/Returns follow uma_ctor specifications
 1383  *      udata  Actually uma_zctor_args
 1384  */
 1385 static int
 1386 zone_ctor(void *mem, int size, void *udata, int flags)
 1387 {
 1388         struct uma_zctor_args *arg = udata;
 1389         uma_zone_t zone = mem;
 1390         uma_zone_t z;
 1391         uma_keg_t keg;
 1392 
 1393         bzero(zone, size);
 1394         zone->uz_name = arg->name;
 1395         zone->uz_ctor = arg->ctor;
 1396         zone->uz_dtor = arg->dtor;
 1397         zone->uz_slab = zone_fetch_slab;
 1398         zone->uz_init = NULL;
 1399         zone->uz_fini = NULL;
 1400         zone->uz_allocs = 0;
 1401         zone->uz_frees = 0;
 1402         zone->uz_fails = 0;
 1403         zone->uz_fills = zone->uz_count = 0;
 1404         zone->uz_flags = 0;
 1405         keg = arg->keg;
 1406 
 1407         if (arg->flags & UMA_ZONE_SECONDARY) {
 1408                 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
 1409                 zone->uz_init = arg->uminit;
 1410                 zone->uz_fini = arg->fini;
 1411                 zone->uz_lock = &keg->uk_lock;
 1412                 zone->uz_flags |= UMA_ZONE_SECONDARY;
 1413                 mtx_lock(&uma_mtx);
 1414                 ZONE_LOCK(zone);
 1415                 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
 1416                         if (LIST_NEXT(z, uz_link) == NULL) {
 1417                                 LIST_INSERT_AFTER(z, zone, uz_link);
 1418                                 break;
 1419                         }
 1420                 }
 1421                 ZONE_UNLOCK(zone);
 1422                 mtx_unlock(&uma_mtx);
 1423         } else if (keg == NULL) {
 1424                 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
 1425                     arg->align, arg->flags)) == NULL)
 1426                         return (ENOMEM);
 1427         } else {
 1428                 struct uma_kctor_args karg;
 1429                 int error;
 1430 
 1431                 /* We should only be here from uma_startup() */
 1432                 karg.size = arg->size;
 1433                 karg.uminit = arg->uminit;
 1434                 karg.fini = arg->fini;
 1435                 karg.align = arg->align;
 1436                 karg.flags = arg->flags;
 1437                 karg.zone = zone;
 1438                 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
 1439                     flags);
 1440                 if (error)
 1441                         return (error);
 1442         }
 1443         /*
 1444          * Link in the first keg.
 1445          */
 1446         zone->uz_klink.kl_keg = keg;
 1447         LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
 1448         zone->uz_lock = &keg->uk_lock;
 1449         zone->uz_size = keg->uk_size;
 1450         zone->uz_flags |= (keg->uk_flags &
 1451             (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
 1452 
 1453         /*
 1454          * Some internal zones don't have room allocated for the per cpu
 1455          * caches.  If we're internal, bail out here.
 1456          */
 1457         if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
 1458                 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
 1459                     ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
 1460                 return (0);
 1461         }
 1462 
 1463         if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
 1464                 zone->uz_count = BUCKET_MAX;
 1465         else if (keg->uk_ipers <= BUCKET_MAX)
 1466                 zone->uz_count = keg->uk_ipers;
 1467         else
 1468                 zone->uz_count = BUCKET_MAX;
 1469         return (0);
 1470 }
 1471 
 1472 /*
 1473  * Keg header dtor.  This frees all data, destroys locks, frees the hash
 1474  * table and removes the keg from the global list.
 1475  *
 1476  * Arguments/Returns follow uma_dtor specifications
 1477  *      udata  unused
 1478  */
 1479 static void
 1480 keg_dtor(void *arg, int size, void *udata)
 1481 {
 1482         uma_keg_t keg;
 1483 
 1484         keg = (uma_keg_t)arg;
 1485         KEG_LOCK(keg);
 1486         if (keg->uk_free != 0) {
 1487                 printf("Freed UMA keg was not empty (%d items). "
 1488                     " Lost %d pages of memory.\n",
 1489                     keg->uk_free, keg->uk_pages);
 1490         }
 1491         KEG_UNLOCK(keg);
 1492 
 1493         hash_free(&keg->uk_hash);
 1494 
 1495         KEG_LOCK_FINI(keg);
 1496 }
 1497 
 1498 /*
 1499  * Zone header dtor.
 1500  *
 1501  * Arguments/Returns follow uma_dtor specifications
 1502  *      udata  unused
 1503  */
 1504 static void
 1505 zone_dtor(void *arg, int size, void *udata)
 1506 {
 1507         uma_klink_t klink;
 1508         uma_zone_t zone;
 1509         uma_keg_t keg;
 1510 
 1511         zone = (uma_zone_t)arg;
 1512         keg = zone_first_keg(zone);
 1513 
 1514         if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
 1515                 cache_drain(zone);
 1516 
 1517         mtx_lock(&uma_mtx);
 1518         LIST_REMOVE(zone, uz_link);
 1519         mtx_unlock(&uma_mtx);
 1520         /*
 1521          * XXX there are some races here where
 1522          * the zone can be drained but zone lock
 1523          * released and then refilled before we
 1524          * remove it... we dont care for now
 1525          */
 1526         zone_drain_wait(zone, M_WAITOK);
 1527         /*
 1528          * Unlink all of our kegs.
 1529          */
 1530         while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
 1531                 klink->kl_keg = NULL;
 1532                 LIST_REMOVE(klink, kl_link);
 1533                 if (klink == &zone->uz_klink)
 1534                         continue;
 1535                 free(klink, M_TEMP);
 1536         }
 1537         /*
 1538          * We only destroy kegs from non secondary zones.
 1539          */
 1540         if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
 1541                 mtx_lock(&uma_mtx);
 1542                 LIST_REMOVE(keg, uk_link);
 1543                 mtx_unlock(&uma_mtx);
 1544                 zone_free_item(kegs, keg, NULL, SKIP_NONE,
 1545                     ZFREE_STATFREE);
 1546         }
 1547 }
 1548 
 1549 /*
 1550  * Traverses every zone in the system and calls a callback
 1551  *
 1552  * Arguments:
 1553  *      zfunc  A pointer to a function which accepts a zone
 1554  *              as an argument.
 1555  *
 1556  * Returns:
 1557  *      Nothing
 1558  */
 1559 static void
 1560 zone_foreach(void (*zfunc)(uma_zone_t))
 1561 {
 1562         uma_keg_t keg;
 1563         uma_zone_t zone;
 1564 
 1565         mtx_lock(&uma_mtx);
 1566         LIST_FOREACH(keg, &uma_kegs, uk_link) {
 1567                 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
 1568                         zfunc(zone);
 1569         }
 1570         mtx_unlock(&uma_mtx);
 1571 }
 1572 
 1573 /* Public functions */
 1574 /* See uma.h */
 1575 void
 1576 uma_startup(void *bootmem, int boot_pages)
 1577 {
 1578         struct uma_zctor_args args;
 1579         uma_slab_t slab;
 1580         u_int slabsize;
 1581         u_int objsize, totsize, wsize;
 1582         int i;
 1583 
 1584 #ifdef UMA_DEBUG
 1585         printf("Creating uma keg headers zone and keg.\n");
 1586 #endif
 1587         mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
 1588 
 1589         /*
 1590          * Figure out the maximum number of items-per-slab we'll have if
 1591          * we're using the OFFPAGE slab header to track free items, given
 1592          * all possible object sizes and the maximum desired wastage
 1593          * (UMA_MAX_WASTE).
 1594          *
 1595          * We iterate until we find an object size for
 1596          * which the calculated wastage in keg_small_init() will be
 1597          * enough to warrant OFFPAGE.  Since wastedspace versus objsize
 1598          * is an overall increasing see-saw function, we find the smallest
 1599          * objsize such that the wastage is always acceptable for objects
 1600          * with that objsize or smaller.  Since a smaller objsize always
 1601          * generates a larger possible uma_max_ipers, we use this computed
 1602          * objsize to calculate the largest ipers possible.  Since the
 1603          * ipers calculated for OFFPAGE slab headers is always larger than
 1604          * the ipers initially calculated in keg_small_init(), we use
 1605          * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
 1606          * obtain the maximum ipers possible for offpage slab headers.
 1607          *
 1608          * It should be noted that ipers versus objsize is an inversly
 1609          * proportional function which drops off rather quickly so as
 1610          * long as our UMA_MAX_WASTE is such that the objsize we calculate
 1611          * falls into the portion of the inverse relation AFTER the steep
 1612          * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
 1613          *
 1614          * Note that we have 8-bits (1 byte) to use as a freelist index
 1615          * inside the actual slab header itself and this is enough to
 1616          * accomodate us.  In the worst case, a UMA_SMALLEST_UNIT sized
 1617          * object with offpage slab header would have ipers =
 1618          * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
 1619          * 1 greater than what our byte-integer freelist index can
 1620          * accomodate, but we know that this situation never occurs as
 1621          * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
 1622          * that we need to go to offpage slab headers.  Or, if we do,
 1623          * then we trap that condition below and panic in the INVARIANTS case.
 1624          */
 1625         wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
 1626         totsize = wsize;
 1627         objsize = UMA_SMALLEST_UNIT;
 1628         while (totsize >= wsize) {
 1629                 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
 1630                     (objsize + UMA_FRITM_SZ);
 1631                 totsize *= (UMA_FRITM_SZ + objsize);
 1632                 objsize++;
 1633         }
 1634         if (objsize > UMA_SMALLEST_UNIT)
 1635                 objsize--;
 1636         uma_max_ipers = MAX(UMA_SLAB_SIZE / objsize, 64);
 1637 
 1638         wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
 1639         totsize = wsize;
 1640         objsize = UMA_SMALLEST_UNIT;
 1641         while (totsize >= wsize) {
 1642                 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
 1643                     (objsize + UMA_FRITMREF_SZ);
 1644                 totsize *= (UMA_FRITMREF_SZ + objsize);
 1645                 objsize++;
 1646         }
 1647         if (objsize > UMA_SMALLEST_UNIT)
 1648                 objsize--;
 1649         uma_max_ipers_ref = MAX(UMA_SLAB_SIZE / objsize, 64);
 1650 
 1651         KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
 1652             ("uma_startup: calculated uma_max_ipers values too large!"));
 1653 
 1654 #ifdef UMA_DEBUG
 1655         printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
 1656         printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
 1657             uma_max_ipers_ref);
 1658 #endif
 1659 
 1660         /* "manually" create the initial zone */
 1661         args.name = "UMA Kegs";
 1662         args.size = sizeof(struct uma_keg);
 1663         args.ctor = keg_ctor;
 1664         args.dtor = keg_dtor;
 1665         args.uminit = zero_init;
 1666         args.fini = NULL;
 1667         args.keg = &masterkeg;
 1668         args.align = 32 - 1;
 1669         args.flags = UMA_ZFLAG_INTERNAL;
 1670         /* The initial zone has no Per cpu queues so it's smaller */
 1671         zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
 1672 
 1673 #ifdef UMA_DEBUG
 1674         printf("Filling boot free list.\n");
 1675 #endif
 1676         for (i = 0; i < boot_pages; i++) {
 1677                 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
 1678                 slab->us_data = (u_int8_t *)slab;
 1679                 slab->us_flags = UMA_SLAB_BOOT;
 1680                 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
 1681         }
 1682         mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
 1683 
 1684 #ifdef UMA_DEBUG
 1685         printf("Creating uma zone headers zone and keg.\n");
 1686 #endif
 1687         args.name = "UMA Zones";
 1688         args.size = sizeof(struct uma_zone) +
 1689             (sizeof(struct uma_cache) * (mp_maxid + 1));
 1690         args.ctor = zone_ctor;
 1691         args.dtor = zone_dtor;
 1692         args.uminit = zero_init;
 1693         args.fini = NULL;
 1694         args.keg = NULL;
 1695         args.align = 32 - 1;
 1696         args.flags = UMA_ZFLAG_INTERNAL;
 1697         /* The initial zone has no Per cpu queues so it's smaller */
 1698         zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
 1699 
 1700 #ifdef UMA_DEBUG
 1701         printf("Initializing pcpu cache locks.\n");
 1702 #endif
 1703 #ifdef UMA_DEBUG
 1704         printf("Creating slab and hash zones.\n");
 1705 #endif
 1706 
 1707         /*
 1708          * This is the max number of free list items we'll have with
 1709          * offpage slabs.
 1710          */
 1711         slabsize = uma_max_ipers * UMA_FRITM_SZ;
 1712         slabsize += sizeof(struct uma_slab);
 1713 
 1714         /* Now make a zone for slab headers */
 1715         slabzone = uma_zcreate("UMA Slabs",
 1716                                 slabsize,
 1717                                 NULL, NULL, NULL, NULL,
 1718                                 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
 1719 
 1720         /*
 1721          * We also create a zone for the bigger slabs with reference
 1722          * counts in them, to accomodate UMA_ZONE_REFCNT zones.
 1723          */
 1724         slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
 1725         slabsize += sizeof(struct uma_slab_refcnt);
 1726         slabrefzone = uma_zcreate("UMA RCntSlabs",
 1727                                   slabsize,
 1728                                   NULL, NULL, NULL, NULL,
 1729                                   UMA_ALIGN_PTR,
 1730                                   UMA_ZFLAG_INTERNAL);
 1731 
 1732         hashzone = uma_zcreate("UMA Hash",
 1733             sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
 1734             NULL, NULL, NULL, NULL,
 1735             UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
 1736 
 1737         bucket_init();
 1738 
 1739 #if defined(UMA_MD_SMALL_ALLOC) && !defined(UMA_MD_SMALL_ALLOC_NEEDS_VM)
 1740         booted = 1;
 1741 #endif
 1742 
 1743 #ifdef UMA_DEBUG
 1744         printf("UMA startup complete.\n");
 1745 #endif
 1746 }
 1747 
 1748 /* see uma.h */
 1749 void
 1750 uma_startup2(void)
 1751 {
 1752         booted = 1;
 1753         bucket_enable();
 1754 #ifdef UMA_DEBUG
 1755         printf("UMA startup2 complete.\n");
 1756 #endif
 1757 }
 1758 
 1759 /*
 1760  * Initialize our callout handle
 1761  *
 1762  */
 1763 
 1764 static void
 1765 uma_startup3(void)
 1766 {
 1767 #ifdef UMA_DEBUG
 1768         printf("Starting callout.\n");
 1769 #endif
 1770         callout_init(&uma_callout, CALLOUT_MPSAFE);
 1771         callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
 1772 #ifdef UMA_DEBUG
 1773         printf("UMA startup3 complete.\n");
 1774 #endif
 1775 }
 1776 
 1777 static uma_keg_t
 1778 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
 1779                 int align, u_int32_t flags)
 1780 {
 1781         struct uma_kctor_args args;
 1782 
 1783         args.size = size;
 1784         args.uminit = uminit;
 1785         args.fini = fini;
 1786         args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
 1787         args.flags = flags;
 1788         args.zone = zone;
 1789         return (zone_alloc_item(kegs, &args, M_WAITOK));
 1790 }
 1791 
 1792 /* See uma.h */
 1793 void
 1794 uma_set_align(int align)
 1795 {
 1796 
 1797         if (align != UMA_ALIGN_CACHE)
 1798                 uma_align_cache = align;
 1799 }
 1800 
 1801 /* See uma.h */
 1802 uma_zone_t
 1803 uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
 1804                 uma_init uminit, uma_fini fini, int align, u_int32_t flags)
 1805 
 1806 {
 1807         struct uma_zctor_args args;
 1808 
 1809         /* This stuff is essential for the zone ctor */
 1810         args.name = name;
 1811         args.size = size;
 1812         args.ctor = ctor;
 1813         args.dtor = dtor;
 1814         args.uminit = uminit;
 1815         args.fini = fini;
 1816         args.align = align;
 1817         args.flags = flags;
 1818         args.keg = NULL;
 1819 
 1820         return (zone_alloc_item(zones, &args, M_WAITOK));
 1821 }
 1822 
 1823 /* See uma.h */
 1824 uma_zone_t
 1825 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
 1826                     uma_init zinit, uma_fini zfini, uma_zone_t master)
 1827 {
 1828         struct uma_zctor_args args;
 1829         uma_keg_t keg;
 1830 
 1831         keg = zone_first_keg(master);
 1832         args.name = name;
 1833         args.size = keg->uk_size;
 1834         args.ctor = ctor;
 1835         args.dtor = dtor;
 1836         args.uminit = zinit;
 1837         args.fini = zfini;
 1838         args.align = keg->uk_align;
 1839         args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
 1840         args.keg = keg;
 1841 
 1842         /* XXX Attaches only one keg of potentially many. */
 1843         return (zone_alloc_item(zones, &args, M_WAITOK));
 1844 }
 1845 
 1846 static void
 1847 zone_lock_pair(uma_zone_t a, uma_zone_t b)
 1848 {
 1849         if (a < b) {
 1850                 ZONE_LOCK(a);
 1851                 mtx_lock_flags(b->uz_lock, MTX_DUPOK);
 1852         } else {
 1853                 ZONE_LOCK(b);
 1854                 mtx_lock_flags(a->uz_lock, MTX_DUPOK);
 1855         }
 1856 }
 1857 
 1858 static void
 1859 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
 1860 {
 1861 
 1862         ZONE_UNLOCK(a);
 1863         ZONE_UNLOCK(b);
 1864 }
 1865 
 1866 int
 1867 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
 1868 {
 1869         uma_klink_t klink;
 1870         uma_klink_t kl;
 1871         int error;
 1872 
 1873         error = 0;
 1874         klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
 1875 
 1876         zone_lock_pair(zone, master);
 1877         /*
 1878          * zone must use vtoslab() to resolve objects and must already be
 1879          * a secondary.
 1880          */
 1881         if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
 1882             != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
 1883                 error = EINVAL;
 1884                 goto out;
 1885         }
 1886         /*
 1887          * The new master must also use vtoslab().
 1888          */
 1889         if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
 1890                 error = EINVAL;
 1891                 goto out;
 1892         }
 1893         /*
 1894          * Both must either be refcnt, or not be refcnt.
 1895          */
 1896         if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
 1897             (master->uz_flags & UMA_ZONE_REFCNT)) {
 1898                 error = EINVAL;
 1899                 goto out;
 1900         }
 1901         /*
 1902          * The underlying object must be the same size.  rsize
 1903          * may be different.
 1904          */
 1905         if (master->uz_size != zone->uz_size) {
 1906                 error = E2BIG;
 1907                 goto out;
 1908         }
 1909         /*
 1910          * Put it at the end of the list.
 1911          */
 1912         klink->kl_keg = zone_first_keg(master);
 1913         LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
 1914                 if (LIST_NEXT(kl, kl_link) == NULL) {
 1915                         LIST_INSERT_AFTER(kl, klink, kl_link);
 1916                         break;
 1917                 }
 1918         }
 1919         klink = NULL;
 1920         zone->uz_flags |= UMA_ZFLAG_MULTI;
 1921         zone->uz_slab = zone_fetch_slab_multi;
 1922 
 1923 out:
 1924         zone_unlock_pair(zone, master);
 1925         if (klink != NULL)
 1926                 free(klink, M_TEMP);
 1927 
 1928         return (error);
 1929 }
 1930 
 1931 
 1932 /* See uma.h */
 1933 void
 1934 uma_zdestroy(uma_zone_t zone)
 1935 {
 1936 
 1937         zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
 1938 }
 1939 
 1940 /* See uma.h */
 1941 void *
 1942 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
 1943 {
 1944         void *item;
 1945         uma_cache_t cache;
 1946         uma_bucket_t bucket;
 1947         int cpu;
 1948 
 1949         /* This is the fast path allocation */
 1950 #ifdef UMA_DEBUG_ALLOC_1
 1951         printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
 1952 #endif
 1953         CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
 1954             zone->uz_name, flags);
 1955 
 1956         if (flags & M_WAITOK) {
 1957                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
 1958                     "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
 1959         }
 1960 
 1961         /*
 1962          * If possible, allocate from the per-CPU cache.  There are two
 1963          * requirements for safe access to the per-CPU cache: (1) the thread
 1964          * accessing the cache must not be preempted or yield during access,
 1965          * and (2) the thread must not migrate CPUs without switching which
 1966          * cache it accesses.  We rely on a critical section to prevent
 1967          * preemption and migration.  We release the critical section in
 1968          * order to acquire the zone mutex if we are unable to allocate from
 1969          * the current cache; when we re-acquire the critical section, we
 1970          * must detect and handle migration if it has occurred.
 1971          */
 1972 zalloc_restart:
 1973         critical_enter();
 1974         cpu = curcpu;
 1975         cache = &zone->uz_cpu[cpu];
 1976 
 1977 zalloc_start:
 1978         bucket = cache->uc_allocbucket;
 1979 
 1980         if (bucket) {
 1981                 if (bucket->ub_cnt > 0) {
 1982                         bucket->ub_cnt--;
 1983                         item = bucket->ub_bucket[bucket->ub_cnt];
 1984 #ifdef INVARIANTS
 1985                         bucket->ub_bucket[bucket->ub_cnt] = NULL;
 1986 #endif
 1987                         KASSERT(item != NULL,
 1988                             ("uma_zalloc: Bucket pointer mangled."));
 1989                         cache->uc_allocs++;
 1990                         critical_exit();
 1991 #ifdef INVARIANTS
 1992                         ZONE_LOCK(zone);
 1993                         uma_dbg_alloc(zone, NULL, item);
 1994                         ZONE_UNLOCK(zone);
 1995 #endif
 1996                         if (zone->uz_ctor != NULL) {
 1997                                 if (zone->uz_ctor(item, zone->uz_size,
 1998                                     udata, flags) != 0) {
 1999                                         zone_free_item(zone, item, udata,
 2000                                             SKIP_DTOR, ZFREE_STATFAIL |
 2001                                             ZFREE_STATFREE);
 2002                                         return (NULL);
 2003                                 }
 2004                         }
 2005                         if (flags & M_ZERO)
 2006                                 bzero(item, zone->uz_size);
 2007                         return (item);
 2008                 } else if (cache->uc_freebucket) {
 2009                         /*
 2010                          * We have run out of items in our allocbucket.
 2011                          * See if we can switch with our free bucket.
 2012                          */
 2013                         if (cache->uc_freebucket->ub_cnt > 0) {
 2014 #ifdef UMA_DEBUG_ALLOC
 2015                                 printf("uma_zalloc: Swapping empty with"
 2016                                     " alloc.\n");
 2017 #endif
 2018                                 bucket = cache->uc_freebucket;
 2019                                 cache->uc_freebucket = cache->uc_allocbucket;
 2020                                 cache->uc_allocbucket = bucket;
 2021 
 2022                                 goto zalloc_start;
 2023                         }
 2024                 }
 2025         }
 2026         /*
 2027          * Attempt to retrieve the item from the per-CPU cache has failed, so
 2028          * we must go back to the zone.  This requires the zone lock, so we
 2029          * must drop the critical section, then re-acquire it when we go back
 2030          * to the cache.  Since the critical section is released, we may be
 2031          * preempted or migrate.  As such, make sure not to maintain any
 2032          * thread-local state specific to the cache from prior to releasing
 2033          * the critical section.
 2034          */
 2035         critical_exit();
 2036         ZONE_LOCK(zone);
 2037         critical_enter();
 2038         cpu = curcpu;
 2039         cache = &zone->uz_cpu[cpu];
 2040         bucket = cache->uc_allocbucket;
 2041         if (bucket != NULL) {
 2042                 if (bucket->ub_cnt > 0) {
 2043                         ZONE_UNLOCK(zone);
 2044                         goto zalloc_start;
 2045                 }
 2046                 bucket = cache->uc_freebucket;
 2047                 if (bucket != NULL && bucket->ub_cnt > 0) {
 2048                         ZONE_UNLOCK(zone);
 2049                         goto zalloc_start;
 2050                 }
 2051         }
 2052 
 2053         /* Since we have locked the zone we may as well send back our stats */
 2054         zone->uz_allocs += cache->uc_allocs;
 2055         cache->uc_allocs = 0;
 2056         zone->uz_frees += cache->uc_frees;
 2057         cache->uc_frees = 0;
 2058 
 2059         /* Our old one is now a free bucket */
 2060         if (cache->uc_allocbucket) {
 2061                 KASSERT(cache->uc_allocbucket->ub_cnt == 0,
 2062                     ("uma_zalloc_arg: Freeing a non free bucket."));
 2063                 LIST_INSERT_HEAD(&zone->uz_free_bucket,
 2064                     cache->uc_allocbucket, ub_link);
 2065                 cache->uc_allocbucket = NULL;
 2066         }
 2067 
 2068         /* Check the free list for a new alloc bucket */
 2069         if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
 2070                 KASSERT(bucket->ub_cnt != 0,
 2071                     ("uma_zalloc_arg: Returning an empty bucket."));
 2072 
 2073                 LIST_REMOVE(bucket, ub_link);
 2074                 cache->uc_allocbucket = bucket;
 2075                 ZONE_UNLOCK(zone);
 2076                 goto zalloc_start;
 2077         }
 2078         /* We are no longer associated with this CPU. */
 2079         critical_exit();
 2080 
 2081         /* Bump up our uz_count so we get here less */
 2082         if (zone->uz_count < BUCKET_MAX)
 2083                 zone->uz_count++;
 2084 
 2085         /*
 2086          * Now lets just fill a bucket and put it on the free list.  If that
 2087          * works we'll restart the allocation from the begining.
 2088          */
 2089         if (zone_alloc_bucket(zone, flags)) {
 2090                 ZONE_UNLOCK(zone);
 2091                 goto zalloc_restart;
 2092         }
 2093         ZONE_UNLOCK(zone);
 2094         /*
 2095          * We may not be able to get a bucket so return an actual item.
 2096          */
 2097 #ifdef UMA_DEBUG
 2098         printf("uma_zalloc_arg: Bucketzone returned NULL\n");
 2099 #endif
 2100 
 2101         item = zone_alloc_item(zone, udata, flags);
 2102         return (item);
 2103 }
 2104 
 2105 static uma_slab_t
 2106 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
 2107 {
 2108         uma_slab_t slab;
 2109 
 2110         mtx_assert(&keg->uk_lock, MA_OWNED);
 2111         slab = NULL;
 2112 
 2113         for (;;) {
 2114                 /*
 2115                  * Find a slab with some space.  Prefer slabs that are partially
 2116                  * used over those that are totally full.  This helps to reduce
 2117                  * fragmentation.
 2118                  */
 2119                 if (keg->uk_free != 0) {
 2120                         if (!LIST_EMPTY(&keg->uk_part_slab)) {
 2121                                 slab = LIST_FIRST(&keg->uk_part_slab);
 2122                         } else {
 2123                                 slab = LIST_FIRST(&keg->uk_free_slab);
 2124                                 LIST_REMOVE(slab, us_link);
 2125                                 LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
 2126                                     us_link);
 2127                         }
 2128                         MPASS(slab->us_keg == keg);
 2129                         return (slab);
 2130                 }
 2131 
 2132                 /*
 2133                  * M_NOVM means don't ask at all!
 2134                  */
 2135                 if (flags & M_NOVM)
 2136                         break;
 2137 
 2138                 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
 2139                         keg->uk_flags |= UMA_ZFLAG_FULL;
 2140                         /*
 2141                          * If this is not a multi-zone, set the FULL bit.
 2142                          * Otherwise slab_multi() takes care of it.
 2143                          */
 2144                         if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0)
 2145                                 zone->uz_flags |= UMA_ZFLAG_FULL;
 2146                         if (flags & M_NOWAIT)
 2147                                 break;
 2148                         msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
 2149                         continue;
 2150                 }
 2151                 keg->uk_recurse++;
 2152                 slab = keg_alloc_slab(keg, zone, flags);
 2153                 keg->uk_recurse--;
 2154                 /*
 2155                  * If we got a slab here it's safe to mark it partially used
 2156                  * and return.  We assume that the caller is going to remove
 2157                  * at least one item.
 2158                  */
 2159                 if (slab) {
 2160                         MPASS(slab->us_keg == keg);
 2161                         LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
 2162                         return (slab);
 2163                 }
 2164                 /*
 2165                  * We might not have been able to get a slab but another cpu
 2166                  * could have while we were unlocked.  Check again before we
 2167                  * fail.
 2168                  */
 2169                 flags |= M_NOVM;
 2170         }
 2171         return (slab);
 2172 }
 2173 
 2174 static inline void
 2175 zone_relock(uma_zone_t zone, uma_keg_t keg)
 2176 {
 2177         if (zone->uz_lock != &keg->uk_lock) {
 2178                 KEG_UNLOCK(keg);
 2179                 ZONE_LOCK(zone);
 2180         }
 2181 }
 2182 
 2183 static inline void
 2184 keg_relock(uma_keg_t keg, uma_zone_t zone)
 2185 {
 2186         if (zone->uz_lock != &keg->uk_lock) {
 2187                 ZONE_UNLOCK(zone);
 2188                 KEG_LOCK(keg);
 2189         }
 2190 }
 2191 
 2192 static uma_slab_t
 2193 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
 2194 {
 2195         uma_slab_t slab;
 2196 
 2197         if (keg == NULL)
 2198                 keg = zone_first_keg(zone);
 2199         /*
 2200          * This is to prevent us from recursively trying to allocate
 2201          * buckets.  The problem is that if an allocation forces us to
 2202          * grab a new bucket we will call page_alloc, which will go off
 2203          * and cause the vm to allocate vm_map_entries.  If we need new
 2204          * buckets there too we will recurse in kmem_alloc and bad
 2205          * things happen.  So instead we return a NULL bucket, and make
 2206          * the code that allocates buckets smart enough to deal with it
 2207          */
 2208         if (keg->uk_flags & UMA_ZFLAG_BUCKET && keg->uk_recurse != 0)
 2209                 return (NULL);
 2210 
 2211         for (;;) {
 2212                 slab = keg_fetch_slab(keg, zone, flags);
 2213                 if (slab)
 2214                         return (slab);
 2215                 if (flags & (M_NOWAIT | M_NOVM))
 2216                         break;
 2217         }
 2218         return (NULL);
 2219 }
 2220 
 2221 /*
 2222  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
 2223  * with the keg locked.  Caller must call zone_relock() afterwards if the
 2224  * zone lock is required.  On NULL the zone lock is held.
 2225  *
 2226  * The last pointer is used to seed the search.  It is not required.
 2227  */
 2228 static uma_slab_t
 2229 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
 2230 {
 2231         uma_klink_t klink;
 2232         uma_slab_t slab;
 2233         uma_keg_t keg;
 2234         int flags;
 2235         int empty;
 2236         int full;
 2237 
 2238         /*
 2239          * Don't wait on the first pass.  This will skip limit tests
 2240          * as well.  We don't want to block if we can find a provider
 2241          * without blocking.
 2242          */
 2243         flags = (rflags & ~M_WAITOK) | M_NOWAIT;
 2244         /*
 2245          * Use the last slab allocated as a hint for where to start
 2246          * the search.
 2247          */
 2248         if (last) {
 2249                 slab = keg_fetch_slab(last, zone, flags);
 2250                 if (slab)
 2251                         return (slab);
 2252                 zone_relock(zone, last);
 2253                 last = NULL;
 2254         }
 2255         /*
 2256          * Loop until we have a slab incase of transient failures
 2257          * while M_WAITOK is specified.  I'm not sure this is 100%
 2258          * required but we've done it for so long now.
 2259          */
 2260         for (;;) {
 2261                 empty = 0;
 2262                 full = 0;
 2263                 /*
 2264                  * Search the available kegs for slabs.  Be careful to hold the
 2265                  * correct lock while calling into the keg layer.
 2266                  */
 2267                 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
 2268                         keg = klink->kl_keg;
 2269                         keg_relock(keg, zone);
 2270                         if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
 2271                                 slab = keg_fetch_slab(keg, zone, flags);
 2272                                 if (slab)
 2273                                         return (slab);
 2274                         }
 2275                         if (keg->uk_flags & UMA_ZFLAG_FULL)
 2276                                 full++;
 2277                         else
 2278                                 empty++;
 2279                         zone_relock(zone, keg);
 2280                 }
 2281                 if (rflags & (M_NOWAIT | M_NOVM))
 2282                         break;
 2283                 flags = rflags;
 2284                 /*
 2285                  * All kegs are full.  XXX We can't atomically check all kegs
 2286                  * and sleep so just sleep for a short period and retry.
 2287                  */
 2288                 if (full && !empty) {
 2289                         zone->uz_flags |= UMA_ZFLAG_FULL;
 2290                         msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
 2291                         zone->uz_flags &= ~UMA_ZFLAG_FULL;
 2292                         continue;
 2293                 }
 2294         }
 2295         return (NULL);
 2296 }
 2297 
 2298 static void *
 2299 slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
 2300 {
 2301         uma_keg_t keg;
 2302         uma_slabrefcnt_t slabref;
 2303         void *item;
 2304         u_int8_t freei;
 2305 
 2306         keg = slab->us_keg;
 2307         mtx_assert(&keg->uk_lock, MA_OWNED);
 2308 
 2309         freei = slab->us_firstfree;
 2310         if (keg->uk_flags & UMA_ZONE_REFCNT) {
 2311                 slabref = (uma_slabrefcnt_t)slab;
 2312                 slab->us_firstfree = slabref->us_freelist[freei].us_item;
 2313         } else {
 2314                 slab->us_firstfree = slab->us_freelist[freei].us_item;
 2315         }
 2316         item = slab->us_data + (keg->uk_rsize * freei);
 2317 
 2318         slab->us_freecount--;
 2319         keg->uk_free--;
 2320 #ifdef INVARIANTS
 2321         uma_dbg_alloc(zone, slab, item);
 2322 #endif
 2323         /* Move this slab to the full list */
 2324         if (slab->us_freecount == 0) {
 2325                 LIST_REMOVE(slab, us_link);
 2326                 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
 2327         }
 2328 
 2329         return (item);
 2330 }
 2331 
 2332 static int
 2333 zone_alloc_bucket(uma_zone_t zone, int flags)
 2334 {
 2335         uma_bucket_t bucket;
 2336         uma_slab_t slab;
 2337         uma_keg_t keg;
 2338         int16_t saved;
 2339         int max, origflags = flags;
 2340 
 2341         /*
 2342          * Try this zone's free list first so we don't allocate extra buckets.
 2343          */
 2344         if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
 2345                 KASSERT(bucket->ub_cnt == 0,
 2346                     ("zone_alloc_bucket: Bucket on free list is not empty."));
 2347                 LIST_REMOVE(bucket, ub_link);
 2348         } else {
 2349                 int bflags;
 2350 
 2351                 bflags = (flags & ~M_ZERO);
 2352                 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
 2353                         bflags |= M_NOVM;
 2354 
 2355                 ZONE_UNLOCK(zone);
 2356                 bucket = bucket_alloc(zone->uz_count, bflags);
 2357                 ZONE_LOCK(zone);
 2358         }
 2359 
 2360         if (bucket == NULL) {
 2361                 return (0);
 2362         }
 2363 
 2364 #ifdef SMP
 2365         /*
 2366          * This code is here to limit the number of simultaneous bucket fills
 2367          * for any given zone to the number of per cpu caches in this zone. This
 2368          * is done so that we don't allocate more memory than we really need.
 2369          */
 2370         if (zone->uz_fills >= mp_ncpus)
 2371                 goto done;
 2372 
 2373 #endif
 2374         zone->uz_fills++;
 2375 
 2376         max = MIN(bucket->ub_entries, zone->uz_count);
 2377         /* Try to keep the buckets totally full */
 2378         saved = bucket->ub_cnt;
 2379         slab = NULL;
 2380         keg = NULL;
 2381         while (bucket->ub_cnt < max &&
 2382             (slab = zone->uz_slab(zone, keg, flags)) != NULL) {
 2383                 keg = slab->us_keg;
 2384                 while (slab->us_freecount && bucket->ub_cnt < max) {
 2385                         bucket->ub_bucket[bucket->ub_cnt++] =
 2386                             slab_alloc_item(zone, slab);
 2387                 }
 2388 
 2389                 /* Don't block on the next fill */
 2390                 flags |= M_NOWAIT;
 2391         }
 2392         if (slab)
 2393                 zone_relock(zone, keg);
 2394 
 2395         /*
 2396          * We unlock here because we need to call the zone's init.
 2397          * It should be safe to unlock because the slab dealt with
 2398          * above is already on the appropriate list within the keg
 2399          * and the bucket we filled is not yet on any list, so we
 2400          * own it.
 2401          */
 2402         if (zone->uz_init != NULL) {
 2403                 int i;
 2404 
 2405                 ZONE_UNLOCK(zone);
 2406                 for (i = saved; i < bucket->ub_cnt; i++)
 2407                         if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
 2408                             origflags) != 0)
 2409                                 break;
 2410                 /*
 2411                  * If we couldn't initialize the whole bucket, put the
 2412                  * rest back onto the freelist.
 2413                  */
 2414                 if (i != bucket->ub_cnt) {
 2415                         int j;
 2416 
 2417                         for (j = i; j < bucket->ub_cnt; j++) {
 2418                                 zone_free_item(zone, bucket->ub_bucket[j],
 2419                                     NULL, SKIP_FINI, 0);
 2420 #ifdef INVARIANTS
 2421                                 bucket->ub_bucket[j] = NULL;
 2422 #endif
 2423                         }
 2424                         bucket->ub_cnt = i;
 2425                 }
 2426                 ZONE_LOCK(zone);
 2427         }
 2428 
 2429         zone->uz_fills--;
 2430         if (bucket->ub_cnt != 0) {
 2431                 LIST_INSERT_HEAD(&zone->uz_full_bucket,
 2432                     bucket, ub_link);
 2433                 return (1);
 2434         }
 2435 #ifdef SMP
 2436 done:
 2437 #endif
 2438         bucket_free(bucket);
 2439 
 2440         return (0);
 2441 }
 2442 /*
 2443  * Allocates an item for an internal zone
 2444  *
 2445  * Arguments
 2446  *      zone   The zone to alloc for.
 2447  *      udata  The data to be passed to the constructor.
 2448  *      flags  M_WAITOK, M_NOWAIT, M_ZERO.
 2449  *
 2450  * Returns
 2451  *      NULL if there is no memory and M_NOWAIT is set
 2452  *      An item if successful
 2453  */
 2454 
 2455 static void *
 2456 zone_alloc_item(uma_zone_t zone, void *udata, int flags)
 2457 {
 2458         uma_slab_t slab;
 2459         void *item;
 2460 
 2461         item = NULL;
 2462 
 2463 #ifdef UMA_DEBUG_ALLOC
 2464         printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
 2465 #endif
 2466         ZONE_LOCK(zone);
 2467 
 2468         slab = zone->uz_slab(zone, NULL, flags);
 2469         if (slab == NULL) {
 2470                 zone->uz_fails++;
 2471                 ZONE_UNLOCK(zone);
 2472                 return (NULL);
 2473         }
 2474 
 2475         item = slab_alloc_item(zone, slab);
 2476 
 2477         zone_relock(zone, slab->us_keg);
 2478         zone->uz_allocs++;
 2479         ZONE_UNLOCK(zone);
 2480 
 2481         /*
 2482          * We have to call both the zone's init (not the keg's init)
 2483          * and the zone's ctor.  This is because the item is going from
 2484          * a keg slab directly to the user, and the user is expecting it
 2485          * to be both zone-init'd as well as zone-ctor'd.
 2486          */
 2487         if (zone->uz_init != NULL) {
 2488                 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
 2489                         zone_free_item(zone, item, udata, SKIP_FINI,
 2490                             ZFREE_STATFAIL | ZFREE_STATFREE);
 2491                         return (NULL);
 2492                 }
 2493         }
 2494         if (zone->uz_ctor != NULL) {
 2495                 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
 2496                         zone_free_item(zone, item, udata, SKIP_DTOR,
 2497                             ZFREE_STATFAIL | ZFREE_STATFREE);
 2498                         return (NULL);
 2499                 }
 2500         }
 2501         if (flags & M_ZERO)
 2502                 bzero(item, zone->uz_size);
 2503 
 2504         return (item);
 2505 }
 2506 
 2507 /* See uma.h */
 2508 void
 2509 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
 2510 {
 2511         uma_cache_t cache;
 2512         uma_bucket_t bucket;
 2513         int bflags;
 2514         int cpu;
 2515 
 2516 #ifdef UMA_DEBUG_ALLOC_1
 2517         printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
 2518 #endif
 2519         CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
 2520             zone->uz_name);
 2521 
 2522         if (zone->uz_dtor)
 2523                 zone->uz_dtor(item, zone->uz_size, udata);
 2524 
 2525 #ifdef INVARIANTS
 2526         ZONE_LOCK(zone);
 2527         if (zone->uz_flags & UMA_ZONE_MALLOC)
 2528                 uma_dbg_free(zone, udata, item);
 2529         else
 2530                 uma_dbg_free(zone, NULL, item);
 2531         ZONE_UNLOCK(zone);
 2532 #endif
 2533         /*
 2534          * The race here is acceptable.  If we miss it we'll just have to wait
 2535          * a little longer for the limits to be reset.
 2536          */
 2537         if (zone->uz_flags & UMA_ZFLAG_FULL)
 2538                 goto zfree_internal;
 2539 
 2540         /*
 2541          * If possible, free to the per-CPU cache.  There are two
 2542          * requirements for safe access to the per-CPU cache: (1) the thread
 2543          * accessing the cache must not be preempted or yield during access,
 2544          * and (2) the thread must not migrate CPUs without switching which
 2545          * cache it accesses.  We rely on a critical section to prevent
 2546          * preemption and migration.  We release the critical section in
 2547          * order to acquire the zone mutex if we are unable to free to the
 2548          * current cache; when we re-acquire the critical section, we must
 2549          * detect and handle migration if it has occurred.
 2550          */
 2551 zfree_restart:
 2552         critical_enter();
 2553         cpu = curcpu;
 2554         cache = &zone->uz_cpu[cpu];
 2555 
 2556 zfree_start:
 2557         bucket = cache->uc_freebucket;
 2558 
 2559         if (bucket) {
 2560                 /*
 2561                  * Do we have room in our bucket? It is OK for this uz count
 2562                  * check to be slightly out of sync.
 2563                  */
 2564 
 2565                 if (bucket->ub_cnt < bucket->ub_entries) {
 2566                         KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
 2567                             ("uma_zfree: Freeing to non free bucket index."));
 2568                         bucket->ub_bucket[bucket->ub_cnt] = item;
 2569                         bucket->ub_cnt++;
 2570                         cache->uc_frees++;
 2571                         critical_exit();
 2572                         return;
 2573                 } else if (cache->uc_allocbucket) {
 2574 #ifdef UMA_DEBUG_ALLOC
 2575                         printf("uma_zfree: Swapping buckets.\n");
 2576 #endif
 2577                         /*
 2578                          * We have run out of space in our freebucket.
 2579                          * See if we can switch with our alloc bucket.
 2580                          */
 2581                         if (cache->uc_allocbucket->ub_cnt <
 2582                             cache->uc_freebucket->ub_cnt) {
 2583                                 bucket = cache->uc_freebucket;
 2584                                 cache->uc_freebucket = cache->uc_allocbucket;
 2585                                 cache->uc_allocbucket = bucket;
 2586                                 goto zfree_start;
 2587                         }
 2588                 }
 2589         }
 2590         /*
 2591          * We can get here for two reasons:
 2592          *
 2593          * 1) The buckets are NULL
 2594          * 2) The alloc and free buckets are both somewhat full.
 2595          *
 2596          * We must go back the zone, which requires acquiring the zone lock,
 2597          * which in turn means we must release and re-acquire the critical
 2598          * section.  Since the critical section is released, we may be
 2599          * preempted or migrate.  As such, make sure not to maintain any
 2600          * thread-local state specific to the cache from prior to releasing
 2601          * the critical section.
 2602          */
 2603         critical_exit();
 2604         ZONE_LOCK(zone);
 2605         critical_enter();
 2606         cpu = curcpu;
 2607         cache = &zone->uz_cpu[cpu];
 2608         if (cache->uc_freebucket != NULL) {
 2609                 if (cache->uc_freebucket->ub_cnt <
 2610                     cache->uc_freebucket->ub_entries) {
 2611                         ZONE_UNLOCK(zone);
 2612                         goto zfree_start;
 2613                 }
 2614                 if (cache->uc_allocbucket != NULL &&
 2615                     (cache->uc_allocbucket->ub_cnt <
 2616                     cache->uc_freebucket->ub_cnt)) {
 2617                         ZONE_UNLOCK(zone);
 2618                         goto zfree_start;
 2619                 }
 2620         }
 2621 
 2622         /* Since we have locked the zone we may as well send back our stats */
 2623         zone->uz_allocs += cache->uc_allocs;
 2624         cache->uc_allocs = 0;
 2625         zone->uz_frees += cache->uc_frees;
 2626         cache->uc_frees = 0;
 2627 
 2628         bucket = cache->uc_freebucket;
 2629         cache->uc_freebucket = NULL;
 2630 
 2631         /* Can we throw this on the zone full list? */
 2632         if (bucket != NULL) {
 2633 #ifdef UMA_DEBUG_ALLOC
 2634                 printf("uma_zfree: Putting old bucket on the free list.\n");
 2635 #endif
 2636                 /* ub_cnt is pointing to the last free item */
 2637                 KASSERT(bucket->ub_cnt != 0,
 2638                     ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
 2639                 LIST_INSERT_HEAD(&zone->uz_full_bucket,
 2640                     bucket, ub_link);
 2641         }
 2642         if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
 2643                 LIST_REMOVE(bucket, ub_link);
 2644                 ZONE_UNLOCK(zone);
 2645                 cache->uc_freebucket = bucket;
 2646                 goto zfree_start;
 2647         }
 2648         /* We are no longer associated with this CPU. */
 2649         critical_exit();
 2650 
 2651         /* And the zone.. */
 2652         ZONE_UNLOCK(zone);
 2653 
 2654 #ifdef UMA_DEBUG_ALLOC
 2655         printf("uma_zfree: Allocating new free bucket.\n");
 2656 #endif
 2657         bflags = M_NOWAIT;
 2658 
 2659         if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
 2660                 bflags |= M_NOVM;
 2661         bucket = bucket_alloc(zone->uz_count, bflags);
 2662         if (bucket) {
 2663                 ZONE_LOCK(zone);
 2664                 LIST_INSERT_HEAD(&zone->uz_free_bucket,
 2665                     bucket, ub_link);
 2666                 ZONE_UNLOCK(zone);
 2667                 goto zfree_restart;
 2668         }
 2669 
 2670         /*
 2671          * If nothing else caught this, we'll just do an internal free.
 2672          */
 2673 zfree_internal:
 2674         zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE);
 2675 
 2676         return;
 2677 }
 2678 
 2679 /*
 2680  * Frees an item to an INTERNAL zone or allocates a free bucket
 2681  *
 2682  * Arguments:
 2683  *      zone   The zone to free to
 2684  *      item   The item we're freeing
 2685  *      udata  User supplied data for the dtor
 2686  *      skip   Skip dtors and finis
 2687  */
 2688 static void
 2689 zone_free_item(uma_zone_t zone, void *item, void *udata,
 2690     enum zfreeskip skip, int flags)
 2691 {
 2692         uma_slab_t slab;
 2693         uma_slabrefcnt_t slabref;
 2694         uma_keg_t keg;
 2695         u_int8_t *mem;
 2696         u_int8_t freei;
 2697         int clearfull;
 2698 
 2699         if (skip < SKIP_DTOR && zone->uz_dtor)
 2700                 zone->uz_dtor(item, zone->uz_size, udata);
 2701 
 2702         if (skip < SKIP_FINI && zone->uz_fini)
 2703                 zone->uz_fini(item, zone->uz_size);
 2704 
 2705         ZONE_LOCK(zone);
 2706 
 2707         if (flags & ZFREE_STATFAIL)
 2708                 zone->uz_fails++;
 2709         if (flags & ZFREE_STATFREE)
 2710                 zone->uz_frees++;
 2711 
 2712         if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
 2713                 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
 2714                 keg = zone_first_keg(zone); /* Must only be one. */
 2715                 if (zone->uz_flags & UMA_ZONE_HASH) {
 2716                         slab = hash_sfind(&keg->uk_hash, mem);
 2717                 } else {
 2718                         mem += keg->uk_pgoff;
 2719                         slab = (uma_slab_t)mem;
 2720                 }
 2721         } else {
 2722                 /* This prevents redundant lookups via free(). */
 2723                 if ((zone->uz_flags & UMA_ZONE_MALLOC) && udata != NULL)
 2724                         slab = (uma_slab_t)udata;
 2725                 else
 2726                         slab = vtoslab((vm_offset_t)item);
 2727                 keg = slab->us_keg;
 2728                 keg_relock(keg, zone);
 2729         }
 2730         MPASS(keg == slab->us_keg);
 2731 
 2732         /* Do we need to remove from any lists? */
 2733         if (slab->us_freecount+1 == keg->uk_ipers) {
 2734                 LIST_REMOVE(slab, us_link);
 2735                 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
 2736         } else if (slab->us_freecount == 0) {
 2737                 LIST_REMOVE(slab, us_link);
 2738                 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
 2739         }
 2740 
 2741         /* Slab management stuff */
 2742         freei = ((unsigned long)item - (unsigned long)slab->us_data)
 2743                 / keg->uk_rsize;
 2744 
 2745 #ifdef INVARIANTS
 2746         if (!skip)
 2747                 uma_dbg_free(zone, slab, item);
 2748 #endif
 2749 
 2750         if (keg->uk_flags & UMA_ZONE_REFCNT) {
 2751                 slabref = (uma_slabrefcnt_t)slab;
 2752                 slabref->us_freelist[freei].us_item = slab->us_firstfree;
 2753         } else {
 2754                 slab->us_freelist[freei].us_item = slab->us_firstfree;
 2755         }
 2756         slab->us_firstfree = freei;
 2757         slab->us_freecount++;
 2758 
 2759         /* Zone statistics */
 2760         keg->uk_free++;
 2761 
 2762         clearfull = 0;
 2763         if (keg->uk_flags & UMA_ZFLAG_FULL) {
 2764                 if (keg->uk_pages < keg->uk_maxpages) {
 2765                         keg->uk_flags &= ~UMA_ZFLAG_FULL;
 2766                         clearfull = 1;
 2767                 }
 2768 
 2769                 /* 
 2770                  * We can handle one more allocation. Since we're clearing ZFLAG_FULL,
 2771                  * wake up all procs blocked on pages. This should be uncommon, so 
 2772                  * keeping this simple for now (rather than adding count of blocked 
 2773                  * threads etc).
 2774                  */
 2775                 wakeup(keg);
 2776         }
 2777         if (clearfull) {
 2778                 zone_relock(zone, keg);
 2779                 zone->uz_flags &= ~UMA_ZFLAG_FULL;
 2780                 wakeup(zone);
 2781                 ZONE_UNLOCK(zone);
 2782         } else
 2783                 KEG_UNLOCK(keg);
 2784 }
 2785 
 2786 /* See uma.h */
 2787 void
 2788 uma_zone_set_max(uma_zone_t zone, int nitems)
 2789 {
 2790         uma_keg_t keg;
 2791 
 2792         ZONE_LOCK(zone);
 2793         keg = zone_first_keg(zone);
 2794         keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
 2795         if (keg->uk_maxpages * keg->uk_ipers < nitems)
 2796                 keg->uk_maxpages += keg->uk_ppera;
 2797 
 2798         ZONE_UNLOCK(zone);
 2799 }
 2800 
 2801 /* See uma.h */
 2802 void
 2803 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
 2804 {
 2805         uma_keg_t keg;
 2806 
 2807         ZONE_LOCK(zone);
 2808         keg = zone_first_keg(zone);
 2809         KASSERT(keg->uk_pages == 0,
 2810             ("uma_zone_set_init on non-empty keg"));
 2811         keg->uk_init = uminit;
 2812         ZONE_UNLOCK(zone);
 2813 }
 2814 
 2815 /* See uma.h */
 2816 void
 2817 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
 2818 {
 2819         uma_keg_t keg;
 2820 
 2821         ZONE_LOCK(zone);
 2822         keg = zone_first_keg(zone);
 2823         KASSERT(keg->uk_pages == 0,
 2824             ("uma_zone_set_fini on non-empty keg"));
 2825         keg->uk_fini = fini;
 2826         ZONE_UNLOCK(zone);
 2827 }
 2828 
 2829 /* See uma.h */
 2830 void
 2831 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
 2832 {
 2833         ZONE_LOCK(zone);
 2834         KASSERT(zone_first_keg(zone)->uk_pages == 0,
 2835             ("uma_zone_set_zinit on non-empty keg"));
 2836         zone->uz_init = zinit;
 2837         ZONE_UNLOCK(zone);
 2838 }
 2839 
 2840 /* See uma.h */
 2841 void
 2842 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
 2843 {
 2844         ZONE_LOCK(zone);
 2845         KASSERT(zone_first_keg(zone)->uk_pages == 0,
 2846             ("uma_zone_set_zfini on non-empty keg"));
 2847         zone->uz_fini = zfini;
 2848         ZONE_UNLOCK(zone);
 2849 }
 2850 
 2851 /* See uma.h */
 2852 /* XXX uk_freef is not actually used with the zone locked */
 2853 void
 2854 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
 2855 {
 2856 
 2857         ZONE_LOCK(zone);
 2858         zone_first_keg(zone)->uk_freef = freef;
 2859         ZONE_UNLOCK(zone);
 2860 }
 2861 
 2862 /* See uma.h */
 2863 /* XXX uk_allocf is not actually used with the zone locked */
 2864 void
 2865 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
 2866 {
 2867         uma_keg_t keg;
 2868 
 2869         ZONE_LOCK(zone);
 2870         keg = zone_first_keg(zone);
 2871         keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
 2872         keg->uk_allocf = allocf;
 2873         ZONE_UNLOCK(zone);
 2874 }
 2875 
 2876 /* See uma.h */
 2877 int
 2878 uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
 2879 {
 2880         uma_keg_t keg;
 2881         vm_offset_t kva;
 2882         int pages;
 2883 
 2884         keg = zone_first_keg(zone);
 2885         pages = count / keg->uk_ipers;
 2886 
 2887         if (pages * keg->uk_ipers < count)
 2888                 pages++;
 2889 
 2890         kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
 2891 
 2892         if (kva == 0)
 2893                 return (0);
 2894         if (obj == NULL) {
 2895                 obj = vm_object_allocate(OBJT_DEFAULT,
 2896                     pages);
 2897         } else {
 2898                 VM_OBJECT_LOCK_INIT(obj, "uma object");
 2899                 _vm_object_allocate(OBJT_DEFAULT,
 2900                     pages, obj);
 2901         }
 2902         ZONE_LOCK(zone);
 2903         keg->uk_kva = kva;
 2904         keg->uk_obj = obj;
 2905         keg->uk_maxpages = pages;
 2906         keg->uk_allocf = obj_alloc;
 2907         keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
 2908         ZONE_UNLOCK(zone);
 2909         return (1);
 2910 }
 2911 
 2912 /* See uma.h */
 2913 void
 2914 uma_prealloc(uma_zone_t zone, int items)
 2915 {
 2916         int slabs;
 2917         uma_slab_t slab;
 2918         uma_keg_t keg;
 2919 
 2920         keg = zone_first_keg(zone);
 2921         ZONE_LOCK(zone);
 2922         slabs = items / keg->uk_ipers;
 2923         if (slabs * keg->uk_ipers < items)
 2924                 slabs++;
 2925         while (slabs > 0) {
 2926                 slab = keg_alloc_slab(keg, zone, M_WAITOK);
 2927                 if (slab == NULL)
 2928                         break;
 2929                 MPASS(slab->us_keg == keg);
 2930                 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
 2931                 slabs--;
 2932         }
 2933         ZONE_UNLOCK(zone);
 2934 }
 2935 
 2936 /* See uma.h */
 2937 u_int32_t *
 2938 uma_find_refcnt(uma_zone_t zone, void *item)
 2939 {
 2940         uma_slabrefcnt_t slabref;
 2941         uma_keg_t keg;
 2942         u_int32_t *refcnt;
 2943         int idx;
 2944 
 2945         slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
 2946             (~UMA_SLAB_MASK));
 2947         keg = slabref->us_keg;
 2948         KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT,
 2949             ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
 2950         idx = ((unsigned long)item - (unsigned long)slabref->us_data)
 2951             / keg->uk_rsize;
 2952         refcnt = &slabref->us_freelist[idx].us_refcnt;
 2953         return refcnt;
 2954 }
 2955 
 2956 /* See uma.h */
 2957 void
 2958 uma_reclaim(void)
 2959 {
 2960 #ifdef UMA_DEBUG
 2961         printf("UMA: vm asked us to release pages!\n");
 2962 #endif
 2963         bucket_enable();
 2964         zone_foreach(zone_drain);
 2965         /*
 2966          * Some slabs may have been freed but this zone will be visited early
 2967          * we visit again so that we can free pages that are empty once other
 2968          * zones are drained.  We have to do the same for buckets.
 2969          */
 2970         zone_drain(slabzone);
 2971         zone_drain(slabrefzone);
 2972         bucket_zone_drain();
 2973 }
 2974 
 2975 /* See uma.h */
 2976 int
 2977 uma_zone_exhausted(uma_zone_t zone)
 2978 {
 2979         int full;
 2980 
 2981         ZONE_LOCK(zone);
 2982         full = (zone->uz_flags & UMA_ZFLAG_FULL);
 2983         ZONE_UNLOCK(zone);
 2984         return (full);  
 2985 }
 2986 
 2987 int
 2988 uma_zone_exhausted_nolock(uma_zone_t zone)
 2989 {
 2990         return (zone->uz_flags & UMA_ZFLAG_FULL);
 2991 }
 2992 
 2993 void *
 2994 uma_large_malloc(int size, int wait)
 2995 {
 2996         void *mem;
 2997         uma_slab_t slab;
 2998         u_int8_t flags;
 2999 
 3000         slab = zone_alloc_item(slabzone, NULL, wait);
 3001         if (slab == NULL)
 3002                 return (NULL);
 3003         mem = page_alloc(NULL, size, &flags, wait);
 3004         if (mem) {
 3005                 vsetslab((vm_offset_t)mem, slab);
 3006                 slab->us_data = mem;
 3007                 slab->us_flags = flags | UMA_SLAB_MALLOC;
 3008                 slab->us_size = size;
 3009         } else {
 3010                 zone_free_item(slabzone, slab, NULL, SKIP_NONE,
 3011                     ZFREE_STATFAIL | ZFREE_STATFREE);
 3012         }
 3013 
 3014         return (mem);
 3015 }
 3016 
 3017 void
 3018 uma_large_free(uma_slab_t slab)
 3019 {
 3020         vsetobj((vm_offset_t)slab->us_data, kmem_object);
 3021         page_free(slab->us_data, slab->us_size, slab->us_flags);
 3022         zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
 3023 }
 3024 
 3025 void
 3026 uma_print_stats(void)
 3027 {
 3028         zone_foreach(uma_print_zone);
 3029 }
 3030 
 3031 static void
 3032 slab_print(uma_slab_t slab)
 3033 {
 3034         printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
 3035                 slab->us_keg, slab->us_data, slab->us_freecount,
 3036                 slab->us_firstfree);
 3037 }
 3038 
 3039 static void
 3040 cache_print(uma_cache_t cache)
 3041 {
 3042         printf("alloc: %p(%d), free: %p(%d)\n",
 3043                 cache->uc_allocbucket,
 3044                 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
 3045                 cache->uc_freebucket,
 3046                 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
 3047 }
 3048 
 3049 static void
 3050 uma_print_keg(uma_keg_t keg)
 3051 {
 3052         uma_slab_t slab;
 3053 
 3054         printf("keg: %s(%p) size %d(%d) flags %d ipers %d ppera %d "
 3055             "out %d free %d limit %d\n",
 3056             keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
 3057             keg->uk_ipers, keg->uk_ppera,
 3058             (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
 3059             (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
 3060         printf("Part slabs:\n");
 3061         LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
 3062                 slab_print(slab);
 3063         printf("Free slabs:\n");
 3064         LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
 3065                 slab_print(slab);
 3066         printf("Full slabs:\n");
 3067         LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
 3068                 slab_print(slab);
 3069 }
 3070 
 3071 void
 3072 uma_print_zone(uma_zone_t zone)
 3073 {
 3074         uma_cache_t cache;
 3075         uma_klink_t kl;
 3076         int i;
 3077 
 3078         printf("zone: %s(%p) size %d flags %d\n",
 3079             zone->uz_name, zone, zone->uz_size, zone->uz_flags);
 3080         LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
 3081                 uma_print_keg(kl->kl_keg);
 3082         for (i = 0; i <= mp_maxid; i++) {
 3083                 if (CPU_ABSENT(i))
 3084                         continue;
 3085                 cache = &zone->uz_cpu[i];
 3086                 printf("CPU %d Cache:\n", i);
 3087                 cache_print(cache);
 3088         }
 3089 }
 3090 
 3091 #ifdef DDB
 3092 /*
 3093  * Generate statistics across both the zone and its per-cpu cache's.  Return
 3094  * desired statistics if the pointer is non-NULL for that statistic.
 3095  *
 3096  * Note: does not update the zone statistics, as it can't safely clear the
 3097  * per-CPU cache statistic.
 3098  *
 3099  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
 3100  * safe from off-CPU; we should modify the caches to track this information
 3101  * directly so that we don't have to.
 3102  */
 3103 static void
 3104 uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
 3105     u_int64_t *freesp)
 3106 {
 3107         uma_cache_t cache;
 3108         u_int64_t allocs, frees;
 3109         int cachefree, cpu;
 3110 
 3111         allocs = frees = 0;
 3112         cachefree = 0;
 3113         for (cpu = 0; cpu <= mp_maxid; cpu++) {
 3114                 if (CPU_ABSENT(cpu))
 3115                         continue;
 3116                 cache = &z->uz_cpu[cpu];
 3117                 if (cache->uc_allocbucket != NULL)
 3118                         cachefree += cache->uc_allocbucket->ub_cnt;
 3119                 if (cache->uc_freebucket != NULL)
 3120                         cachefree += cache->uc_freebucket->ub_cnt;
 3121                 allocs += cache->uc_allocs;
 3122                 frees += cache->uc_frees;
 3123         }
 3124         allocs += z->uz_allocs;
 3125         frees += z->uz_frees;
 3126         if (cachefreep != NULL)
 3127                 *cachefreep = cachefree;
 3128         if (allocsp != NULL)
 3129                 *allocsp = allocs;
 3130         if (freesp != NULL)
 3131                 *freesp = frees;
 3132 }
 3133 #endif /* DDB */
 3134 
 3135 static int
 3136 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
 3137 {
 3138         uma_keg_t kz;
 3139         uma_zone_t z;
 3140         int count;
 3141 
 3142         count = 0;
 3143         mtx_lock(&uma_mtx);
 3144         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3145                 LIST_FOREACH(z, &kz->uk_zones, uz_link)
 3146                         count++;
 3147         }
 3148         mtx_unlock(&uma_mtx);
 3149         return (sysctl_handle_int(oidp, &count, 0, req));
 3150 }
 3151 
 3152 static int
 3153 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
 3154 {
 3155         struct uma_stream_header ush;
 3156         struct uma_type_header uth;
 3157         struct uma_percpu_stat ups;
 3158         uma_bucket_t bucket;
 3159         struct sbuf sbuf;
 3160         uma_cache_t cache;
 3161         uma_klink_t kl;
 3162         uma_keg_t kz;
 3163         uma_zone_t z;
 3164         uma_keg_t k;
 3165         char *buffer;
 3166         int buflen, count, error, i;
 3167 
 3168         mtx_lock(&uma_mtx);
 3169 restart:
 3170         mtx_assert(&uma_mtx, MA_OWNED);
 3171         count = 0;
 3172         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3173                 LIST_FOREACH(z, &kz->uk_zones, uz_link)
 3174                         count++;
 3175         }
 3176         mtx_unlock(&uma_mtx);
 3177 
 3178         buflen = sizeof(ush) + count * (sizeof(uth) + sizeof(ups) *
 3179             (mp_maxid + 1)) + 1;
 3180         buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
 3181 
 3182         mtx_lock(&uma_mtx);
 3183         i = 0;
 3184         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3185                 LIST_FOREACH(z, &kz->uk_zones, uz_link)
 3186                         i++;
 3187         }
 3188         if (i > count) {
 3189                 free(buffer, M_TEMP);
 3190                 goto restart;
 3191         }
 3192         count =  i;
 3193 
 3194         sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
 3195 
 3196         /*
 3197          * Insert stream header.
 3198          */
 3199         bzero(&ush, sizeof(ush));
 3200         ush.ush_version = UMA_STREAM_VERSION;
 3201         ush.ush_maxcpus = (mp_maxid + 1);
 3202         ush.ush_count = count;
 3203         if (sbuf_bcat(&sbuf, &ush, sizeof(ush)) < 0) {
 3204                 mtx_unlock(&uma_mtx);
 3205                 error = ENOMEM;
 3206                 goto out;
 3207         }
 3208 
 3209         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3210                 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
 3211                         bzero(&uth, sizeof(uth));
 3212                         ZONE_LOCK(z);
 3213                         strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
 3214                         uth.uth_align = kz->uk_align;
 3215                         uth.uth_size = kz->uk_size;
 3216                         uth.uth_rsize = kz->uk_rsize;
 3217                         LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
 3218                                 k = kl->kl_keg;
 3219                                 uth.uth_maxpages += k->uk_maxpages;
 3220                                 uth.uth_pages += k->uk_pages;
 3221                                 uth.uth_keg_free += k->uk_free;
 3222                                 uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
 3223                                     * k->uk_ipers;
 3224                         }
 3225 
 3226                         /*
 3227                          * A zone is secondary is it is not the first entry
 3228                          * on the keg's zone list.
 3229                          */
 3230                         if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
 3231                             (LIST_FIRST(&kz->uk_zones) != z))
 3232                                 uth.uth_zone_flags = UTH_ZONE_SECONDARY;
 3233 
 3234                         LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
 3235                                 uth.uth_zone_free += bucket->ub_cnt;
 3236                         uth.uth_allocs = z->uz_allocs;
 3237                         uth.uth_frees = z->uz_frees;
 3238                         uth.uth_fails = z->uz_fails;
 3239                         if (sbuf_bcat(&sbuf, &uth, sizeof(uth)) < 0) {
 3240                                 ZONE_UNLOCK(z);
 3241                                 mtx_unlock(&uma_mtx);
 3242                                 error = ENOMEM;
 3243                                 goto out;
 3244                         }
 3245                         /*
 3246                          * While it is not normally safe to access the cache
 3247                          * bucket pointers while not on the CPU that owns the
 3248                          * cache, we only allow the pointers to be exchanged
 3249                          * without the zone lock held, not invalidated, so
 3250                          * accept the possible race associated with bucket
 3251                          * exchange during monitoring.
 3252                          */
 3253                         for (i = 0; i < (mp_maxid + 1); i++) {
 3254                                 bzero(&ups, sizeof(ups));
 3255                                 if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
 3256                                         goto skip;
 3257                                 if (CPU_ABSENT(i))
 3258                                         goto skip;
 3259                                 cache = &z->uz_cpu[i];
 3260                                 if (cache->uc_allocbucket != NULL)
 3261                                         ups.ups_cache_free +=
 3262                                             cache->uc_allocbucket->ub_cnt;
 3263                                 if (cache->uc_freebucket != NULL)
 3264                                         ups.ups_cache_free +=
 3265                                             cache->uc_freebucket->ub_cnt;
 3266                                 ups.ups_allocs = cache->uc_allocs;
 3267                                 ups.ups_frees = cache->uc_frees;
 3268 skip:
 3269                                 if (sbuf_bcat(&sbuf, &ups, sizeof(ups)) < 0) {
 3270                                         ZONE_UNLOCK(z);
 3271                                         mtx_unlock(&uma_mtx);
 3272                                         error = ENOMEM;
 3273                                         goto out;
 3274                                 }
 3275                         }
 3276                         ZONE_UNLOCK(z);
 3277                 }
 3278         }
 3279         mtx_unlock(&uma_mtx);
 3280         sbuf_finish(&sbuf);
 3281         error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
 3282 out:
 3283         free(buffer, M_TEMP);
 3284         return (error);
 3285 }
 3286 
 3287 #ifdef DDB
 3288 DB_SHOW_COMMAND(uma, db_show_uma)
 3289 {
 3290         u_int64_t allocs, frees;
 3291         uma_bucket_t bucket;
 3292         uma_keg_t kz;
 3293         uma_zone_t z;
 3294         int cachefree;
 3295 
 3296         db_printf("%18s %8s %8s %8s %12s\n", "Zone", "Size", "Used", "Free",
 3297             "Requests");
 3298         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3299                 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
 3300                         if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
 3301                                 allocs = z->uz_allocs;
 3302                                 frees = z->uz_frees;
 3303                                 cachefree = 0;
 3304                         } else
 3305                                 uma_zone_sumstat(z, &cachefree, &allocs,
 3306                                     &frees);
 3307                         if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
 3308                             (LIST_FIRST(&kz->uk_zones) != z)))
 3309                                 cachefree += kz->uk_free;
 3310                         LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
 3311                                 cachefree += bucket->ub_cnt;
 3312                         db_printf("%18s %8ju %8jd %8d %12ju\n", z->uz_name,
 3313                             (uintmax_t)kz->uk_size,
 3314                             (intmax_t)(allocs - frees), cachefree,
 3315                             (uintmax_t)allocs);
 3316                 }
 3317         }
 3318 }
 3319 #endif

Cache object: 39c3e9259f2a17acb001b951c42acf2a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.