The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/uma_core.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
    3  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
    4  * Copyright (c) 2004-2006 Robert N. M. Watson
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice unmodified, this list of conditions, and the following
   12  *    disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 /*
   30  * uma_core.c  Implementation of the Universal Memory allocator
   31  *
   32  * This allocator is intended to replace the multitude of similar object caches
   33  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
   34  * effecient.  A primary design goal is to return unused memory to the rest of
   35  * the system.  This will make the system as a whole more flexible due to the
   36  * ability to move memory to subsystems which most need it instead of leaving
   37  * pools of reserved memory unused.
   38  *
   39  * The basic ideas stem from similar slab/zone based allocators whose algorithms
   40  * are well known.
   41  *
   42  */
   43 
   44 /*
   45  * TODO:
   46  *      - Improve memory usage for large allocations
   47  *      - Investigate cache size adjustments
   48  */
   49 
   50 #include <sys/cdefs.h>
   51 __FBSDID("$FreeBSD$");
   52 
   53 /* I should really use ktr.. */
   54 /*
   55 #define UMA_DEBUG 1
   56 #define UMA_DEBUG_ALLOC 1
   57 #define UMA_DEBUG_ALLOC_1 1
   58 */
   59 
   60 #include "opt_ddb.h"
   61 #include "opt_param.h"
   62 #include "opt_vm.h"
   63 
   64 #include <sys/param.h>
   65 #include <sys/systm.h>
   66 #include <sys/bitset.h>
   67 #include <sys/eventhandler.h>
   68 #include <sys/kernel.h>
   69 #include <sys/types.h>
   70 #include <sys/queue.h>
   71 #include <sys/malloc.h>
   72 #include <sys/ktr.h>
   73 #include <sys/lock.h>
   74 #include <sys/sysctl.h>
   75 #include <sys/mutex.h>
   76 #include <sys/proc.h>
   77 #include <sys/rwlock.h>
   78 #include <sys/sbuf.h>
   79 #include <sys/sched.h>
   80 #include <sys/smp.h>
   81 #include <sys/vmmeter.h>
   82 
   83 #include <vm/vm.h>
   84 #include <vm/vm_object.h>
   85 #include <vm/vm_page.h>
   86 #include <vm/vm_pageout.h>
   87 #include <vm/vm_param.h>
   88 #include <vm/vm_map.h>
   89 #include <vm/vm_kern.h>
   90 #include <vm/vm_extern.h>
   91 #include <vm/uma.h>
   92 #include <vm/uma_int.h>
   93 #include <vm/uma_dbg.h>
   94 
   95 #include <ddb/ddb.h>
   96 
   97 #ifdef DEBUG_MEMGUARD
   98 #include <vm/memguard.h>
   99 #endif
  100 
  101 /*
  102  * This is the zone and keg from which all zones are spawned.  The idea is that
  103  * even the zone & keg heads are allocated from the allocator, so we use the
  104  * bss section to bootstrap us.
  105  */
  106 static struct uma_keg masterkeg;
  107 static struct uma_zone masterzone_k;
  108 static struct uma_zone masterzone_z;
  109 static uma_zone_t kegs = &masterzone_k;
  110 static uma_zone_t zones = &masterzone_z;
  111 
  112 /* This is the zone from which all of uma_slab_t's are allocated. */
  113 static uma_zone_t slabzone;
  114 static uma_zone_t slabrefzone;  /* With refcounters (for UMA_ZONE_REFCNT) */
  115 
  116 /*
  117  * The initial hash tables come out of this zone so they can be allocated
  118  * prior to malloc coming up.
  119  */
  120 static uma_zone_t hashzone;
  121 
  122 /* The boot-time adjusted value for cache line alignment. */
  123 int uma_align_cache = 64 - 1;
  124 
  125 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
  126 
  127 /*
  128  * Are we allowed to allocate buckets?
  129  */
  130 static int bucketdisable = 1;
  131 
  132 /* Linked list of all kegs in the system */
  133 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
  134 
  135 /* Linked list of all cache-only zones in the system */
  136 static LIST_HEAD(,uma_zone) uma_cachezones =
  137     LIST_HEAD_INITIALIZER(uma_cachezones);
  138 
  139 /* This RW lock protects the keg list */
  140 static struct rwlock_padalign uma_rwlock;
  141 
  142 /* Linked list of boot time pages */
  143 static LIST_HEAD(,uma_slab) uma_boot_pages =
  144     LIST_HEAD_INITIALIZER(uma_boot_pages);
  145 
  146 /* This mutex protects the boot time pages list */
  147 static struct mtx_padalign uma_boot_pages_mtx;
  148 
  149 static struct sx uma_drain_lock;
  150 
  151 /* Is the VM done starting up? */
  152 static int booted = 0;
  153 #define UMA_STARTUP     1
  154 #define UMA_STARTUP2    2
  155 
  156 /*
  157  * Only mbuf clusters use ref zones.  Just provide enough references
  158  * to support the one user.  New code should not use the ref facility.
  159  */
  160 static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES;
  161 
  162 /*
  163  * This is the handle used to schedule events that need to happen
  164  * outside of the allocation fast path.
  165  */
  166 static struct callout uma_callout;
  167 #define UMA_TIMEOUT     20              /* Seconds for callout interval. */
  168 
  169 /*
  170  * This structure is passed as the zone ctor arg so that I don't have to create
  171  * a special allocation function just for zones.
  172  */
  173 struct uma_zctor_args {
  174         const char *name;
  175         size_t size;
  176         uma_ctor ctor;
  177         uma_dtor dtor;
  178         uma_init uminit;
  179         uma_fini fini;
  180         uma_import import;
  181         uma_release release;
  182         void *arg;
  183         uma_keg_t keg;
  184         int align;
  185         uint32_t flags;
  186 };
  187 
  188 struct uma_kctor_args {
  189         uma_zone_t zone;
  190         size_t size;
  191         uma_init uminit;
  192         uma_fini fini;
  193         int align;
  194         uint32_t flags;
  195 };
  196 
  197 struct uma_bucket_zone {
  198         uma_zone_t      ubz_zone;
  199         char            *ubz_name;
  200         int             ubz_entries;    /* Number of items it can hold. */
  201         int             ubz_maxsize;    /* Maximum allocation size per-item. */
  202 };
  203 
  204 /*
  205  * Compute the actual number of bucket entries to pack them in power
  206  * of two sizes for more efficient space utilization.
  207  */
  208 #define BUCKET_SIZE(n)                                          \
  209     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
  210 
  211 #define BUCKET_MAX      BUCKET_SIZE(256)
  212 
  213 struct uma_bucket_zone bucket_zones[] = {
  214         { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
  215         { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
  216         { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
  217         { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
  218         { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
  219         { NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
  220         { NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
  221         { NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
  222         { NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
  223         { NULL, NULL, 0}
  224 };
  225 
  226 /*
  227  * Flags and enumerations to be passed to internal functions.
  228  */
  229 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
  230 
  231 /* Prototypes.. */
  232 
  233 static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
  234 static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
  235 static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
  236 static void page_free(void *, vm_size_t, uint8_t);
  237 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
  238 static void cache_drain(uma_zone_t);
  239 static void bucket_drain(uma_zone_t, uma_bucket_t);
  240 static void bucket_cache_drain(uma_zone_t zone);
  241 static int keg_ctor(void *, int, void *, int);
  242 static void keg_dtor(void *, int, void *);
  243 static int zone_ctor(void *, int, void *, int);
  244 static void zone_dtor(void *, int, void *);
  245 static int zero_init(void *, int, int);
  246 static void keg_small_init(uma_keg_t keg);
  247 static void keg_large_init(uma_keg_t keg);
  248 static void zone_foreach(void (*zfunc)(uma_zone_t));
  249 static void zone_timeout(uma_zone_t zone);
  250 static int hash_alloc(struct uma_hash *);
  251 static int hash_expand(struct uma_hash *, struct uma_hash *);
  252 static void hash_free(struct uma_hash *hash);
  253 static void uma_timeout(void *);
  254 static void uma_startup3(void);
  255 static void *zone_alloc_item(uma_zone_t, void *, int);
  256 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
  257 static void bucket_enable(void);
  258 static void bucket_init(void);
  259 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
  260 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
  261 static void bucket_zone_drain(void);
  262 static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
  263 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
  264 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
  265 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
  266 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
  267 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
  268     uma_fini fini, int align, uint32_t flags);
  269 static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
  270 static void zone_release(uma_zone_t zone, void **bucket, int cnt);
  271 static void uma_zero_item(void *item, uma_zone_t zone);
  272 
  273 void uma_print_zone(uma_zone_t);
  274 void uma_print_stats(void);
  275 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
  276 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
  277 
  278 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
  279 
  280 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
  281     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
  282 
  283 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
  284     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
  285 
  286 static int zone_warnings = 1;
  287 TUNABLE_INT("vm.zone_warnings", &zone_warnings);
  288 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RW, &zone_warnings, 0,
  289     "Warn when UMA zones becomes full");
  290 
  291 /*
  292  * This routine checks to see whether or not it's safe to enable buckets.
  293  */
  294 static void
  295 bucket_enable(void)
  296 {
  297         bucketdisable = vm_page_count_min();
  298 }
  299 
  300 /*
  301  * Initialize bucket_zones, the array of zones of buckets of various sizes.
  302  *
  303  * For each zone, calculate the memory required for each bucket, consisting
  304  * of the header and an array of pointers.
  305  */
  306 static void
  307 bucket_init(void)
  308 {
  309         struct uma_bucket_zone *ubz;
  310         int size;
  311 
  312         for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
  313                 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
  314                 size += sizeof(void *) * ubz->ubz_entries;
  315                 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
  316                     NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
  317                     UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
  318         }
  319 }
  320 
  321 /*
  322  * Given a desired number of entries for a bucket, return the zone from which
  323  * to allocate the bucket.
  324  */
  325 static struct uma_bucket_zone *
  326 bucket_zone_lookup(int entries)
  327 {
  328         struct uma_bucket_zone *ubz;
  329 
  330         for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
  331                 if (ubz->ubz_entries >= entries)
  332                         return (ubz);
  333         ubz--;
  334         return (ubz);
  335 }
  336 
  337 static int
  338 bucket_select(int size)
  339 {
  340         struct uma_bucket_zone *ubz;
  341 
  342         ubz = &bucket_zones[0];
  343         if (size > ubz->ubz_maxsize)
  344                 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
  345 
  346         for (; ubz->ubz_entries != 0; ubz++)
  347                 if (ubz->ubz_maxsize < size)
  348                         break;
  349         ubz--;
  350         return (ubz->ubz_entries);
  351 }
  352 
  353 static uma_bucket_t
  354 bucket_alloc(uma_zone_t zone, void *udata, int flags)
  355 {
  356         struct uma_bucket_zone *ubz;
  357         uma_bucket_t bucket;
  358 
  359         /*
  360          * This is to stop us from allocating per cpu buckets while we're
  361          * running out of vm.boot_pages.  Otherwise, we would exhaust the
  362          * boot pages.  This also prevents us from allocating buckets in
  363          * low memory situations.
  364          */
  365         if (bucketdisable)
  366                 return (NULL);
  367         /*
  368          * To limit bucket recursion we store the original zone flags
  369          * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
  370          * NOVM flag to persist even through deep recursions.  We also
  371          * store ZFLAG_BUCKET once we have recursed attempting to allocate
  372          * a bucket for a bucket zone so we do not allow infinite bucket
  373          * recursion.  This cookie will even persist to frees of unused
  374          * buckets via the allocation path or bucket allocations in the
  375          * free path.
  376          */
  377         if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
  378                 udata = (void *)(uintptr_t)zone->uz_flags;
  379         else {
  380                 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
  381                         return (NULL);
  382                 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
  383         }
  384         if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
  385                 flags |= M_NOVM;
  386         ubz = bucket_zone_lookup(zone->uz_count);
  387         if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
  388                 ubz++;
  389         bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
  390         if (bucket) {
  391 #ifdef INVARIANTS
  392                 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
  393 #endif
  394                 bucket->ub_cnt = 0;
  395                 bucket->ub_entries = ubz->ubz_entries;
  396         }
  397 
  398         return (bucket);
  399 }
  400 
  401 static void
  402 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
  403 {
  404         struct uma_bucket_zone *ubz;
  405 
  406         KASSERT(bucket->ub_cnt == 0,
  407             ("bucket_free: Freeing a non free bucket."));
  408         if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
  409                 udata = (void *)(uintptr_t)zone->uz_flags;
  410         ubz = bucket_zone_lookup(bucket->ub_entries);
  411         uma_zfree_arg(ubz->ubz_zone, bucket, udata);
  412 }
  413 
  414 static void
  415 bucket_zone_drain(void)
  416 {
  417         struct uma_bucket_zone *ubz;
  418 
  419         for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
  420                 zone_drain(ubz->ubz_zone);
  421 }
  422 
  423 static void
  424 zone_log_warning(uma_zone_t zone)
  425 {
  426         static const struct timeval warninterval = { 300, 0 };
  427 
  428         if (!zone_warnings || zone->uz_warning == NULL)
  429                 return;
  430 
  431         if (ratecheck(&zone->uz_ratecheck, &warninterval))
  432                 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
  433 }
  434 
  435 static void
  436 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
  437 {
  438         uma_klink_t klink;
  439 
  440         LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
  441                 kegfn(klink->kl_keg);
  442 }
  443 
  444 /*
  445  * Routine called by timeout which is used to fire off some time interval
  446  * based calculations.  (stats, hash size, etc.)
  447  *
  448  * Arguments:
  449  *      arg   Unused
  450  *
  451  * Returns:
  452  *      Nothing
  453  */
  454 static void
  455 uma_timeout(void *unused)
  456 {
  457         bucket_enable();
  458         zone_foreach(zone_timeout);
  459 
  460         /* Reschedule this event */
  461         callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
  462 }
  463 
  464 /*
  465  * Routine to perform timeout driven calculations.  This expands the
  466  * hashes and does per cpu statistics aggregation.
  467  *
  468  *  Returns nothing.
  469  */
  470 static void
  471 keg_timeout(uma_keg_t keg)
  472 {
  473 
  474         KEG_LOCK(keg);
  475         /*
  476          * Expand the keg hash table.
  477          *
  478          * This is done if the number of slabs is larger than the hash size.
  479          * What I'm trying to do here is completely reduce collisions.  This
  480          * may be a little aggressive.  Should I allow for two collisions max?
  481          */
  482         if (keg->uk_flags & UMA_ZONE_HASH &&
  483             keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
  484                 struct uma_hash newhash;
  485                 struct uma_hash oldhash;
  486                 int ret;
  487 
  488                 /*
  489                  * This is so involved because allocating and freeing
  490                  * while the keg lock is held will lead to deadlock.
  491                  * I have to do everything in stages and check for
  492                  * races.
  493                  */
  494                 newhash = keg->uk_hash;
  495                 KEG_UNLOCK(keg);
  496                 ret = hash_alloc(&newhash);
  497                 KEG_LOCK(keg);
  498                 if (ret) {
  499                         if (hash_expand(&keg->uk_hash, &newhash)) {
  500                                 oldhash = keg->uk_hash;
  501                                 keg->uk_hash = newhash;
  502                         } else
  503                                 oldhash = newhash;
  504 
  505                         KEG_UNLOCK(keg);
  506                         hash_free(&oldhash);
  507                         return;
  508                 }
  509         }
  510         KEG_UNLOCK(keg);
  511 }
  512 
  513 static void
  514 zone_timeout(uma_zone_t zone)
  515 {
  516 
  517         zone_foreach_keg(zone, &keg_timeout);
  518 }
  519 
  520 /*
  521  * Allocate and zero fill the next sized hash table from the appropriate
  522  * backing store.
  523  *
  524  * Arguments:
  525  *      hash  A new hash structure with the old hash size in uh_hashsize
  526  *
  527  * Returns:
  528  *      1 on sucess and 0 on failure.
  529  */
  530 static int
  531 hash_alloc(struct uma_hash *hash)
  532 {
  533         int oldsize;
  534         int alloc;
  535 
  536         oldsize = hash->uh_hashsize;
  537 
  538         /* We're just going to go to a power of two greater */
  539         if (oldsize)  {
  540                 hash->uh_hashsize = oldsize * 2;
  541                 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
  542                 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
  543                     M_UMAHASH, M_NOWAIT);
  544         } else {
  545                 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
  546                 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
  547                     M_WAITOK);
  548                 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
  549         }
  550         if (hash->uh_slab_hash) {
  551                 bzero(hash->uh_slab_hash, alloc);
  552                 hash->uh_hashmask = hash->uh_hashsize - 1;
  553                 return (1);
  554         }
  555 
  556         return (0);
  557 }
  558 
  559 /*
  560  * Expands the hash table for HASH zones.  This is done from zone_timeout
  561  * to reduce collisions.  This must not be done in the regular allocation
  562  * path, otherwise, we can recurse on the vm while allocating pages.
  563  *
  564  * Arguments:
  565  *      oldhash  The hash you want to expand
  566  *      newhash  The hash structure for the new table
  567  *
  568  * Returns:
  569  *      Nothing
  570  *
  571  * Discussion:
  572  */
  573 static int
  574 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
  575 {
  576         uma_slab_t slab;
  577         int hval;
  578         int i;
  579 
  580         if (!newhash->uh_slab_hash)
  581                 return (0);
  582 
  583         if (oldhash->uh_hashsize >= newhash->uh_hashsize)
  584                 return (0);
  585 
  586         /*
  587          * I need to investigate hash algorithms for resizing without a
  588          * full rehash.
  589          */
  590 
  591         for (i = 0; i < oldhash->uh_hashsize; i++)
  592                 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
  593                         slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
  594                         SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
  595                         hval = UMA_HASH(newhash, slab->us_data);
  596                         SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
  597                             slab, us_hlink);
  598                 }
  599 
  600         return (1);
  601 }
  602 
  603 /*
  604  * Free the hash bucket to the appropriate backing store.
  605  *
  606  * Arguments:
  607  *      slab_hash  The hash bucket we're freeing
  608  *      hashsize   The number of entries in that hash bucket
  609  *
  610  * Returns:
  611  *      Nothing
  612  */
  613 static void
  614 hash_free(struct uma_hash *hash)
  615 {
  616         if (hash->uh_slab_hash == NULL)
  617                 return;
  618         if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
  619                 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
  620         else
  621                 free(hash->uh_slab_hash, M_UMAHASH);
  622 }
  623 
  624 /*
  625  * Frees all outstanding items in a bucket
  626  *
  627  * Arguments:
  628  *      zone   The zone to free to, must be unlocked.
  629  *      bucket The free/alloc bucket with items, cpu queue must be locked.
  630  *
  631  * Returns:
  632  *      Nothing
  633  */
  634 
  635 static void
  636 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
  637 {
  638         int i;
  639 
  640         if (bucket == NULL)
  641                 return;
  642 
  643         if (zone->uz_fini)
  644                 for (i = 0; i < bucket->ub_cnt; i++) 
  645                         zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
  646         zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
  647         bucket->ub_cnt = 0;
  648 }
  649 
  650 /*
  651  * Drains the per cpu caches for a zone.
  652  *
  653  * NOTE: This may only be called while the zone is being turn down, and not
  654  * during normal operation.  This is necessary in order that we do not have
  655  * to migrate CPUs to drain the per-CPU caches.
  656  *
  657  * Arguments:
  658  *      zone     The zone to drain, must be unlocked.
  659  *
  660  * Returns:
  661  *      Nothing
  662  */
  663 static void
  664 cache_drain(uma_zone_t zone)
  665 {
  666         uma_cache_t cache;
  667         int cpu;
  668 
  669         /*
  670          * XXX: It is safe to not lock the per-CPU caches, because we're
  671          * tearing down the zone anyway.  I.e., there will be no further use
  672          * of the caches at this point.
  673          *
  674          * XXX: It would good to be able to assert that the zone is being
  675          * torn down to prevent improper use of cache_drain().
  676          *
  677          * XXX: We lock the zone before passing into bucket_cache_drain() as
  678          * it is used elsewhere.  Should the tear-down path be made special
  679          * there in some form?
  680          */
  681         CPU_FOREACH(cpu) {
  682                 cache = &zone->uz_cpu[cpu];
  683                 bucket_drain(zone, cache->uc_allocbucket);
  684                 bucket_drain(zone, cache->uc_freebucket);
  685                 if (cache->uc_allocbucket != NULL)
  686                         bucket_free(zone, cache->uc_allocbucket, NULL);
  687                 if (cache->uc_freebucket != NULL)
  688                         bucket_free(zone, cache->uc_freebucket, NULL);
  689                 cache->uc_allocbucket = cache->uc_freebucket = NULL;
  690         }
  691         ZONE_LOCK(zone);
  692         bucket_cache_drain(zone);
  693         ZONE_UNLOCK(zone);
  694 }
  695 
  696 static void
  697 cache_shrink(uma_zone_t zone)
  698 {
  699 
  700         if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
  701                 return;
  702 
  703         ZONE_LOCK(zone);
  704         zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
  705         ZONE_UNLOCK(zone);
  706 }
  707 
  708 static void
  709 cache_drain_safe_cpu(uma_zone_t zone)
  710 {
  711         uma_cache_t cache;
  712         uma_bucket_t b1, b2;
  713 
  714         if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
  715                 return;
  716 
  717         b1 = b2 = NULL;
  718         ZONE_LOCK(zone);
  719         critical_enter();
  720         cache = &zone->uz_cpu[curcpu];
  721         if (cache->uc_allocbucket) {
  722                 if (cache->uc_allocbucket->ub_cnt != 0)
  723                         LIST_INSERT_HEAD(&zone->uz_buckets,
  724                             cache->uc_allocbucket, ub_link);
  725                 else
  726                         b1 = cache->uc_allocbucket;
  727                 cache->uc_allocbucket = NULL;
  728         }
  729         if (cache->uc_freebucket) {
  730                 if (cache->uc_freebucket->ub_cnt != 0)
  731                         LIST_INSERT_HEAD(&zone->uz_buckets,
  732                             cache->uc_freebucket, ub_link);
  733                 else
  734                         b2 = cache->uc_freebucket;
  735                 cache->uc_freebucket = NULL;
  736         }
  737         critical_exit();
  738         ZONE_UNLOCK(zone);
  739         if (b1)
  740                 bucket_free(zone, b1, NULL);
  741         if (b2)
  742                 bucket_free(zone, b2, NULL);
  743 }
  744 
  745 /*
  746  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
  747  * This is an expensive call because it needs to bind to all CPUs
  748  * one by one and enter a critical section on each of them in order
  749  * to safely access their cache buckets.
  750  * Zone lock must not be held on call this function.
  751  */
  752 static void
  753 cache_drain_safe(uma_zone_t zone)
  754 {
  755         int cpu;
  756 
  757         /*
  758          * Polite bucket sizes shrinking was not enouth, shrink aggressively.
  759          */
  760         if (zone)
  761                 cache_shrink(zone);
  762         else
  763                 zone_foreach(cache_shrink);
  764 
  765         CPU_FOREACH(cpu) {
  766                 thread_lock(curthread);
  767                 sched_bind(curthread, cpu);
  768                 thread_unlock(curthread);
  769 
  770                 if (zone)
  771                         cache_drain_safe_cpu(zone);
  772                 else
  773                         zone_foreach(cache_drain_safe_cpu);
  774         }
  775         thread_lock(curthread);
  776         sched_unbind(curthread);
  777         thread_unlock(curthread);
  778 }
  779 
  780 /*
  781  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
  782  */
  783 static void
  784 bucket_cache_drain(uma_zone_t zone)
  785 {
  786         uma_bucket_t bucket;
  787 
  788         /*
  789          * Drain the bucket queues and free the buckets, we just keep two per
  790          * cpu (alloc/free).
  791          */
  792         while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
  793                 LIST_REMOVE(bucket, ub_link);
  794                 ZONE_UNLOCK(zone);
  795                 bucket_drain(zone, bucket);
  796                 bucket_free(zone, bucket, NULL);
  797                 ZONE_LOCK(zone);
  798         }
  799 
  800         /*
  801          * Shrink further bucket sizes.  Price of single zone lock collision
  802          * is probably lower then price of global cache drain.
  803          */
  804         if (zone->uz_count > zone->uz_count_min)
  805                 zone->uz_count--;
  806 }
  807 
  808 static void
  809 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
  810 {
  811         uint8_t *mem;
  812         int i;
  813         uint8_t flags;
  814 
  815         mem = slab->us_data;
  816         flags = slab->us_flags;
  817         i = start;
  818         if (keg->uk_fini != NULL) {
  819                 for (i--; i > -1; i--)
  820                         keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
  821                             keg->uk_size);
  822         }
  823         if (keg->uk_flags & UMA_ZONE_OFFPAGE)
  824                 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
  825 #ifdef UMA_DEBUG
  826         printf("%s: Returning %d bytes.\n", keg->uk_name,
  827             PAGE_SIZE * keg->uk_ppera);
  828 #endif
  829         keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
  830 }
  831 
  832 /*
  833  * Frees pages from a keg back to the system.  This is done on demand from
  834  * the pageout daemon.
  835  *
  836  * Returns nothing.
  837  */
  838 static void
  839 keg_drain(uma_keg_t keg)
  840 {
  841         struct slabhead freeslabs = { 0 };
  842         uma_slab_t slab;
  843         uma_slab_t n;
  844 
  845         /*
  846          * We don't want to take pages from statically allocated kegs at this
  847          * time
  848          */
  849         if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
  850                 return;
  851 
  852 #ifdef UMA_DEBUG
  853         printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
  854 #endif
  855         KEG_LOCK(keg);
  856         if (keg->uk_free == 0)
  857                 goto finished;
  858 
  859         slab = LIST_FIRST(&keg->uk_free_slab);
  860         while (slab) {
  861                 n = LIST_NEXT(slab, us_link);
  862 
  863                 /* We have no where to free these to */
  864                 if (slab->us_flags & UMA_SLAB_BOOT) {
  865                         slab = n;
  866                         continue;
  867                 }
  868 
  869                 LIST_REMOVE(slab, us_link);
  870                 keg->uk_pages -= keg->uk_ppera;
  871                 keg->uk_free -= keg->uk_ipers;
  872 
  873                 if (keg->uk_flags & UMA_ZONE_HASH)
  874                         UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
  875 
  876                 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
  877 
  878                 slab = n;
  879         }
  880 finished:
  881         KEG_UNLOCK(keg);
  882 
  883         while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
  884                 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
  885                 keg_free_slab(keg, slab, keg->uk_ipers);
  886         }
  887 }
  888 
  889 static void
  890 zone_drain_wait(uma_zone_t zone, int waitok)
  891 {
  892 
  893         /*
  894          * Set draining to interlock with zone_dtor() so we can release our
  895          * locks as we go.  Only dtor() should do a WAITOK call since it
  896          * is the only call that knows the structure will still be available
  897          * when it wakes up.
  898          */
  899         ZONE_LOCK(zone);
  900         while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
  901                 if (waitok == M_NOWAIT)
  902                         goto out;
  903                 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
  904         }
  905         zone->uz_flags |= UMA_ZFLAG_DRAINING;
  906         bucket_cache_drain(zone);
  907         ZONE_UNLOCK(zone);
  908         /*
  909          * The DRAINING flag protects us from being freed while
  910          * we're running.  Normally the uma_rwlock would protect us but we
  911          * must be able to release and acquire the right lock for each keg.
  912          */
  913         zone_foreach_keg(zone, &keg_drain);
  914         ZONE_LOCK(zone);
  915         zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
  916         wakeup(zone);
  917 out:
  918         ZONE_UNLOCK(zone);
  919 }
  920 
  921 void
  922 zone_drain(uma_zone_t zone)
  923 {
  924 
  925         zone_drain_wait(zone, M_NOWAIT);
  926 }
  927 
  928 /*
  929  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
  930  *
  931  * Arguments:
  932  *      wait  Shall we wait?
  933  *
  934  * Returns:
  935  *      The slab that was allocated or NULL if there is no memory and the
  936  *      caller specified M_NOWAIT.
  937  */
  938 static uma_slab_t
  939 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
  940 {
  941         uma_slabrefcnt_t slabref;
  942         uma_alloc allocf;
  943         uma_slab_t slab;
  944         uint8_t *mem;
  945         uint8_t flags;
  946         int i;
  947 
  948         mtx_assert(&keg->uk_lock, MA_OWNED);
  949         slab = NULL;
  950         mem = NULL;
  951 
  952 #ifdef UMA_DEBUG
  953         printf("alloc_slab:  Allocating a new slab for %s\n", keg->uk_name);
  954 #endif
  955         allocf = keg->uk_allocf;
  956         KEG_UNLOCK(keg);
  957 
  958         if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
  959                 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
  960                 if (slab == NULL)
  961                         goto out;
  962         }
  963 
  964         /*
  965          * This reproduces the old vm_zone behavior of zero filling pages the
  966          * first time they are added to a zone.
  967          *
  968          * Malloced items are zeroed in uma_zalloc.
  969          */
  970 
  971         if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
  972                 wait |= M_ZERO;
  973         else
  974                 wait &= ~M_ZERO;
  975 
  976         if (keg->uk_flags & UMA_ZONE_NODUMP)
  977                 wait |= M_NODUMP;
  978 
  979         /* zone is passed for legacy reasons. */
  980         mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
  981         if (mem == NULL) {
  982                 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
  983                         zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
  984                 slab = NULL;
  985                 goto out;
  986         }
  987 
  988         /* Point the slab into the allocated memory */
  989         if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
  990                 slab = (uma_slab_t )(mem + keg->uk_pgoff);
  991 
  992         if (keg->uk_flags & UMA_ZONE_VTOSLAB)
  993                 for (i = 0; i < keg->uk_ppera; i++)
  994                         vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
  995 
  996         slab->us_keg = keg;
  997         slab->us_data = mem;
  998         slab->us_freecount = keg->uk_ipers;
  999         slab->us_flags = flags;
 1000         BIT_FILL(SLAB_SETSIZE, &slab->us_free);
 1001 #ifdef INVARIANTS
 1002         BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
 1003 #endif
 1004         if (keg->uk_flags & UMA_ZONE_REFCNT) {
 1005                 slabref = (uma_slabrefcnt_t)slab;
 1006                 for (i = 0; i < keg->uk_ipers; i++)
 1007                         slabref->us_refcnt[i] = 0;
 1008         }
 1009 
 1010         if (keg->uk_init != NULL) {
 1011                 for (i = 0; i < keg->uk_ipers; i++)
 1012                         if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
 1013                             keg->uk_size, wait) != 0)
 1014                                 break;
 1015                 if (i != keg->uk_ipers) {
 1016                         keg_free_slab(keg, slab, i);
 1017                         slab = NULL;
 1018                         goto out;
 1019                 }
 1020         }
 1021 out:
 1022         KEG_LOCK(keg);
 1023 
 1024         if (slab != NULL) {
 1025                 if (keg->uk_flags & UMA_ZONE_HASH)
 1026                         UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
 1027 
 1028                 keg->uk_pages += keg->uk_ppera;
 1029                 keg->uk_free += keg->uk_ipers;
 1030         }
 1031 
 1032         return (slab);
 1033 }
 1034 
 1035 /*
 1036  * This function is intended to be used early on in place of page_alloc() so
 1037  * that we may use the boot time page cache to satisfy allocations before
 1038  * the VM is ready.
 1039  */
 1040 static void *
 1041 startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
 1042 {
 1043         uma_keg_t keg;
 1044         uma_slab_t tmps;
 1045         int pages, check_pages;
 1046 
 1047         keg = zone_first_keg(zone);
 1048         pages = howmany(bytes, PAGE_SIZE);
 1049         check_pages = pages - 1;
 1050         KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
 1051 
 1052         /*
 1053          * Check our small startup cache to see if it has pages remaining.
 1054          */
 1055         mtx_lock(&uma_boot_pages_mtx);
 1056 
 1057         /* First check if we have enough room. */
 1058         tmps = LIST_FIRST(&uma_boot_pages);
 1059         while (tmps != NULL && check_pages-- > 0)
 1060                 tmps = LIST_NEXT(tmps, us_link);
 1061         if (tmps != NULL) {
 1062                 /*
 1063                  * It's ok to lose tmps references.  The last one will
 1064                  * have tmps->us_data pointing to the start address of
 1065                  * "pages" contiguous pages of memory.
 1066                  */
 1067                 while (pages-- > 0) {
 1068                         tmps = LIST_FIRST(&uma_boot_pages);
 1069                         LIST_REMOVE(tmps, us_link);
 1070                 }
 1071                 mtx_unlock(&uma_boot_pages_mtx);
 1072                 *pflag = tmps->us_flags;
 1073                 return (tmps->us_data);
 1074         }
 1075         mtx_unlock(&uma_boot_pages_mtx);
 1076         if (booted < UMA_STARTUP2)
 1077                 panic("UMA: Increase vm.boot_pages");
 1078         /*
 1079          * Now that we've booted reset these users to their real allocator.
 1080          */
 1081 #ifdef UMA_MD_SMALL_ALLOC
 1082         keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
 1083 #else
 1084         keg->uk_allocf = page_alloc;
 1085 #endif
 1086         return keg->uk_allocf(zone, bytes, pflag, wait);
 1087 }
 1088 
 1089 /*
 1090  * Allocates a number of pages from the system
 1091  *
 1092  * Arguments:
 1093  *      bytes  The number of bytes requested
 1094  *      wait  Shall we wait?
 1095  *
 1096  * Returns:
 1097  *      A pointer to the alloced memory or possibly
 1098  *      NULL if M_NOWAIT is set.
 1099  */
 1100 static void *
 1101 page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
 1102 {
 1103         void *p;        /* Returned page */
 1104 
 1105         *pflag = UMA_SLAB_KMEM;
 1106         p = (void *) kmem_malloc(kmem_arena, bytes, wait);
 1107 
 1108         return (p);
 1109 }
 1110 
 1111 /*
 1112  * Allocates a number of pages from within an object
 1113  *
 1114  * Arguments:
 1115  *      bytes  The number of bytes requested
 1116  *      wait   Shall we wait?
 1117  *
 1118  * Returns:
 1119  *      A pointer to the alloced memory or possibly
 1120  *      NULL if M_NOWAIT is set.
 1121  */
 1122 static void *
 1123 noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
 1124 {
 1125         TAILQ_HEAD(, vm_page) alloctail;
 1126         u_long npages;
 1127         vm_offset_t retkva, zkva;
 1128         vm_page_t p, p_next;
 1129         uma_keg_t keg;
 1130 
 1131         TAILQ_INIT(&alloctail);
 1132         keg = zone_first_keg(zone);
 1133 
 1134         npages = howmany(bytes, PAGE_SIZE);
 1135         while (npages > 0) {
 1136                 p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
 1137                     VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
 1138                 if (p != NULL) {
 1139                         /*
 1140                          * Since the page does not belong to an object, its
 1141                          * listq is unused.
 1142                          */
 1143                         TAILQ_INSERT_TAIL(&alloctail, p, listq);
 1144                         npages--;
 1145                         continue;
 1146                 }
 1147                 if (wait & M_WAITOK) {
 1148                         VM_WAIT;
 1149                         continue;
 1150                 }
 1151 
 1152                 /*
 1153                  * Page allocation failed, free intermediate pages and
 1154                  * exit.
 1155                  */
 1156                 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
 1157                         vm_page_unwire(p, 0);
 1158                         vm_page_free(p); 
 1159                 }
 1160                 return (NULL);
 1161         }
 1162         *flags = UMA_SLAB_PRIV;
 1163         zkva = keg->uk_kva +
 1164             atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
 1165         retkva = zkva;
 1166         TAILQ_FOREACH(p, &alloctail, listq) {
 1167                 pmap_qenter(zkva, &p, 1);
 1168                 zkva += PAGE_SIZE;
 1169         }
 1170 
 1171         return ((void *)retkva);
 1172 }
 1173 
 1174 /*
 1175  * Frees a number of pages to the system
 1176  *
 1177  * Arguments:
 1178  *      mem   A pointer to the memory to be freed
 1179  *      size  The size of the memory being freed
 1180  *      flags The original p->us_flags field
 1181  *
 1182  * Returns:
 1183  *      Nothing
 1184  */
 1185 static void
 1186 page_free(void *mem, vm_size_t size, uint8_t flags)
 1187 {
 1188         struct vmem *vmem;
 1189 
 1190         if (flags & UMA_SLAB_KMEM)
 1191                 vmem = kmem_arena;
 1192         else if (flags & UMA_SLAB_KERNEL)
 1193                 vmem = kernel_arena;
 1194         else
 1195                 panic("UMA: page_free used with invalid flags %d", flags);
 1196 
 1197         kmem_free(vmem, (vm_offset_t)mem, size);
 1198 }
 1199 
 1200 /*
 1201  * Zero fill initializer
 1202  *
 1203  * Arguments/Returns follow uma_init specifications
 1204  */
 1205 static int
 1206 zero_init(void *mem, int size, int flags)
 1207 {
 1208         bzero(mem, size);
 1209         return (0);
 1210 }
 1211 
 1212 /*
 1213  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
 1214  *
 1215  * Arguments
 1216  *      keg  The zone we should initialize
 1217  *
 1218  * Returns
 1219  *      Nothing
 1220  */
 1221 static void
 1222 keg_small_init(uma_keg_t keg)
 1223 {
 1224         u_int rsize;
 1225         u_int memused;
 1226         u_int wastedspace;
 1227         u_int shsize;
 1228         u_int slabsize;
 1229 
 1230         if (keg->uk_flags & UMA_ZONE_PCPU) {
 1231                 u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
 1232 
 1233                 slabsize = sizeof(struct pcpu);
 1234                 keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
 1235                     PAGE_SIZE);
 1236         } else {
 1237                 slabsize = UMA_SLAB_SIZE;
 1238                 keg->uk_ppera = 1;
 1239         }
 1240 
 1241         /*
 1242          * Calculate the size of each allocation (rsize) according to
 1243          * alignment.  If the requested size is smaller than we have
 1244          * allocation bits for we round it up.
 1245          */
 1246         rsize = keg->uk_size;
 1247         if (rsize < slabsize / SLAB_SETSIZE)
 1248                 rsize = slabsize / SLAB_SETSIZE;
 1249         if (rsize & keg->uk_align)
 1250                 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
 1251         keg->uk_rsize = rsize;
 1252 
 1253         KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
 1254             keg->uk_rsize < sizeof(struct pcpu),
 1255             ("%s: size %u too large", __func__, keg->uk_rsize));
 1256 
 1257         if (keg->uk_flags & UMA_ZONE_REFCNT)
 1258                 rsize += sizeof(uint32_t);
 1259 
 1260         if (keg->uk_flags & UMA_ZONE_OFFPAGE)
 1261                 shsize = 0;
 1262         else 
 1263                 shsize = sizeof(struct uma_slab);
 1264 
 1265         keg->uk_ipers = (slabsize - shsize) / rsize;
 1266         KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
 1267             ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
 1268 
 1269         memused = keg->uk_ipers * rsize + shsize;
 1270         wastedspace = slabsize - memused;
 1271 
 1272         /*
 1273          * We can't do OFFPAGE if we're internal or if we've been
 1274          * asked to not go to the VM for buckets.  If we do this we
 1275          * may end up going to the VM  for slabs which we do not
 1276          * want to do if we're UMA_ZFLAG_CACHEONLY as a result
 1277          * of UMA_ZONE_VM, which clearly forbids it.
 1278          */
 1279         if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
 1280             (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
 1281                 return;
 1282 
 1283         /*
 1284          * See if using an OFFPAGE slab will limit our waste.  Only do
 1285          * this if it permits more items per-slab.
 1286          *
 1287          * XXX We could try growing slabsize to limit max waste as well.
 1288          * Historically this was not done because the VM could not
 1289          * efficiently handle contiguous allocations.
 1290          */
 1291         if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
 1292             (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
 1293                 keg->uk_ipers = slabsize / keg->uk_rsize;
 1294                 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
 1295                     ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
 1296 #ifdef UMA_DEBUG
 1297                 printf("UMA decided we need offpage slab headers for "
 1298                     "keg: %s, calculated wastedspace = %d, "
 1299                     "maximum wasted space allowed = %d, "
 1300                     "calculated ipers = %d, "
 1301                     "new wasted space = %d\n", keg->uk_name, wastedspace,
 1302                     slabsize / UMA_MAX_WASTE, keg->uk_ipers,
 1303                     slabsize - keg->uk_ipers * keg->uk_rsize);
 1304 #endif
 1305                 keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1306         }
 1307 
 1308         if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
 1309             (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
 1310                 keg->uk_flags |= UMA_ZONE_HASH;
 1311 }
 1312 
 1313 /*
 1314  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
 1315  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
 1316  * more complicated.
 1317  *
 1318  * Arguments
 1319  *      keg  The keg we should initialize
 1320  *
 1321  * Returns
 1322  *      Nothing
 1323  */
 1324 static void
 1325 keg_large_init(uma_keg_t keg)
 1326 {
 1327         u_int shsize;
 1328 
 1329         KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
 1330         KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
 1331             ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
 1332         KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
 1333             ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
 1334 
 1335         keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
 1336         keg->uk_ipers = 1;
 1337         keg->uk_rsize = keg->uk_size;
 1338 
 1339         /* We can't do OFFPAGE if we're internal, bail out here. */
 1340         if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
 1341                 return;
 1342 
 1343         /* Check whether we have enough space to not do OFFPAGE. */
 1344         if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
 1345                 shsize = sizeof(struct uma_slab);
 1346                 if (keg->uk_flags & UMA_ZONE_REFCNT)
 1347                         shsize += keg->uk_ipers * sizeof(uint32_t);
 1348                 if (shsize & UMA_ALIGN_PTR)
 1349                         shsize = (shsize & ~UMA_ALIGN_PTR) +
 1350                             (UMA_ALIGN_PTR + 1);
 1351 
 1352                 if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize)
 1353                         keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1354         }
 1355 
 1356         if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
 1357             (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
 1358                 keg->uk_flags |= UMA_ZONE_HASH;
 1359 }
 1360 
 1361 static void
 1362 keg_cachespread_init(uma_keg_t keg)
 1363 {
 1364         int alignsize;
 1365         int trailer;
 1366         int pages;
 1367         int rsize;
 1368 
 1369         KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
 1370             ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
 1371 
 1372         alignsize = keg->uk_align + 1;
 1373         rsize = keg->uk_size;
 1374         /*
 1375          * We want one item to start on every align boundary in a page.  To
 1376          * do this we will span pages.  We will also extend the item by the
 1377          * size of align if it is an even multiple of align.  Otherwise, it
 1378          * would fall on the same boundary every time.
 1379          */
 1380         if (rsize & keg->uk_align)
 1381                 rsize = (rsize & ~keg->uk_align) + alignsize;
 1382         if ((rsize & alignsize) == 0)
 1383                 rsize += alignsize;
 1384         trailer = rsize - keg->uk_size;
 1385         pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
 1386         pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
 1387         keg->uk_rsize = rsize;
 1388         keg->uk_ppera = pages;
 1389         keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
 1390         keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
 1391         KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
 1392             ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
 1393             keg->uk_ipers));
 1394 }
 1395 
 1396 /*
 1397  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
 1398  * the keg onto the global keg list.
 1399  *
 1400  * Arguments/Returns follow uma_ctor specifications
 1401  *      udata  Actually uma_kctor_args
 1402  */
 1403 static int
 1404 keg_ctor(void *mem, int size, void *udata, int flags)
 1405 {
 1406         struct uma_kctor_args *arg = udata;
 1407         uma_keg_t keg = mem;
 1408         uma_zone_t zone;
 1409 
 1410         bzero(keg, size);
 1411         keg->uk_size = arg->size;
 1412         keg->uk_init = arg->uminit;
 1413         keg->uk_fini = arg->fini;
 1414         keg->uk_align = arg->align;
 1415         keg->uk_free = 0;
 1416         keg->uk_reserve = 0;
 1417         keg->uk_pages = 0;
 1418         keg->uk_flags = arg->flags;
 1419         keg->uk_allocf = page_alloc;
 1420         keg->uk_freef = page_free;
 1421         keg->uk_slabzone = NULL;
 1422 
 1423         /*
 1424          * The master zone is passed to us at keg-creation time.
 1425          */
 1426         zone = arg->zone;
 1427         keg->uk_name = zone->uz_name;
 1428 
 1429         if (arg->flags & UMA_ZONE_VM)
 1430                 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
 1431 
 1432         if (arg->flags & UMA_ZONE_ZINIT)
 1433                 keg->uk_init = zero_init;
 1434 
 1435         if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
 1436                 keg->uk_flags |= UMA_ZONE_VTOSLAB;
 1437 
 1438         if (arg->flags & UMA_ZONE_PCPU)
 1439 #ifdef SMP
 1440                 keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1441 #else
 1442                 keg->uk_flags &= ~UMA_ZONE_PCPU;
 1443 #endif
 1444 
 1445         if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
 1446                 keg_cachespread_init(keg);
 1447         } else if (keg->uk_flags & UMA_ZONE_REFCNT) {
 1448                 if (keg->uk_size >
 1449                     (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) -
 1450                     sizeof(uint32_t)))
 1451                         keg_large_init(keg);
 1452                 else
 1453                         keg_small_init(keg);
 1454         } else {
 1455                 if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
 1456                         keg_large_init(keg);
 1457                 else
 1458                         keg_small_init(keg);
 1459         }
 1460 
 1461         if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
 1462                 if (keg->uk_flags & UMA_ZONE_REFCNT) {
 1463                         if (keg->uk_ipers > uma_max_ipers_ref)
 1464                                 panic("Too many ref items per zone: %d > %d\n",
 1465                                     keg->uk_ipers, uma_max_ipers_ref);
 1466                         keg->uk_slabzone = slabrefzone;
 1467                 } else
 1468                         keg->uk_slabzone = slabzone;
 1469         }
 1470 
 1471         /*
 1472          * If we haven't booted yet we need allocations to go through the
 1473          * startup cache until the vm is ready.
 1474          */
 1475         if (keg->uk_ppera == 1) {
 1476 #ifdef UMA_MD_SMALL_ALLOC
 1477                 keg->uk_allocf = uma_small_alloc;
 1478                 keg->uk_freef = uma_small_free;
 1479 
 1480                 if (booted < UMA_STARTUP)
 1481                         keg->uk_allocf = startup_alloc;
 1482 #else
 1483                 if (booted < UMA_STARTUP2)
 1484                         keg->uk_allocf = startup_alloc;
 1485 #endif
 1486         } else if (booted < UMA_STARTUP2 &&
 1487             (keg->uk_flags & UMA_ZFLAG_INTERNAL))
 1488                 keg->uk_allocf = startup_alloc;
 1489 
 1490         /*
 1491          * Initialize keg's lock
 1492          */
 1493         KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
 1494 
 1495         /*
 1496          * If we're putting the slab header in the actual page we need to
 1497          * figure out where in each page it goes.  This calculates a right
 1498          * justified offset into the memory on an ALIGN_PTR boundary.
 1499          */
 1500         if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
 1501                 u_int totsize;
 1502 
 1503                 /* Size of the slab struct and free list */
 1504                 totsize = sizeof(struct uma_slab);
 1505 
 1506                 /* Size of the reference counts. */
 1507                 if (keg->uk_flags & UMA_ZONE_REFCNT)
 1508                         totsize += keg->uk_ipers * sizeof(uint32_t);
 1509 
 1510                 if (totsize & UMA_ALIGN_PTR)
 1511                         totsize = (totsize & ~UMA_ALIGN_PTR) +
 1512                             (UMA_ALIGN_PTR + 1);
 1513                 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
 1514 
 1515                 /*
 1516                  * The only way the following is possible is if with our
 1517                  * UMA_ALIGN_PTR adjustments we are now bigger than
 1518                  * UMA_SLAB_SIZE.  I haven't checked whether this is
 1519                  * mathematically possible for all cases, so we make
 1520                  * sure here anyway.
 1521                  */
 1522                 totsize = keg->uk_pgoff + sizeof(struct uma_slab);
 1523                 if (keg->uk_flags & UMA_ZONE_REFCNT)
 1524                         totsize += keg->uk_ipers * sizeof(uint32_t);
 1525                 if (totsize > PAGE_SIZE * keg->uk_ppera) {
 1526                         printf("zone %s ipers %d rsize %d size %d\n",
 1527                             zone->uz_name, keg->uk_ipers, keg->uk_rsize,
 1528                             keg->uk_size);
 1529                         panic("UMA slab won't fit.");
 1530                 }
 1531         }
 1532 
 1533         if (keg->uk_flags & UMA_ZONE_HASH)
 1534                 hash_alloc(&keg->uk_hash);
 1535 
 1536 #ifdef UMA_DEBUG
 1537         printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n",
 1538             zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
 1539             keg->uk_ipers, keg->uk_ppera,
 1540             (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
 1541             keg->uk_free);
 1542 #endif
 1543 
 1544         LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
 1545 
 1546         rw_wlock(&uma_rwlock);
 1547         LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
 1548         rw_wunlock(&uma_rwlock);
 1549         return (0);
 1550 }
 1551 
 1552 /*
 1553  * Zone header ctor.  This initializes all fields, locks, etc.
 1554  *
 1555  * Arguments/Returns follow uma_ctor specifications
 1556  *      udata  Actually uma_zctor_args
 1557  */
 1558 static int
 1559 zone_ctor(void *mem, int size, void *udata, int flags)
 1560 {
 1561         struct uma_zctor_args *arg = udata;
 1562         uma_zone_t zone = mem;
 1563         uma_zone_t z;
 1564         uma_keg_t keg;
 1565 
 1566         bzero(zone, size);
 1567         zone->uz_name = arg->name;
 1568         zone->uz_ctor = arg->ctor;
 1569         zone->uz_dtor = arg->dtor;
 1570         zone->uz_slab = zone_fetch_slab;
 1571         zone->uz_init = NULL;
 1572         zone->uz_fini = NULL;
 1573         zone->uz_allocs = 0;
 1574         zone->uz_frees = 0;
 1575         zone->uz_fails = 0;
 1576         zone->uz_sleeps = 0;
 1577         zone->uz_count = 0;
 1578         zone->uz_count_min = 0;
 1579         zone->uz_flags = 0;
 1580         zone->uz_warning = NULL;
 1581         timevalclear(&zone->uz_ratecheck);
 1582         keg = arg->keg;
 1583 
 1584         ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
 1585 
 1586         /*
 1587          * This is a pure cache zone, no kegs.
 1588          */
 1589         if (arg->import) {
 1590                 if (arg->flags & UMA_ZONE_VM)
 1591                         arg->flags |= UMA_ZFLAG_CACHEONLY;
 1592                 zone->uz_flags = arg->flags;
 1593                 zone->uz_size = arg->size;
 1594                 zone->uz_import = arg->import;
 1595                 zone->uz_release = arg->release;
 1596                 zone->uz_arg = arg->arg;
 1597                 zone->uz_lockptr = &zone->uz_lock;
 1598                 rw_wlock(&uma_rwlock);
 1599                 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
 1600                 rw_wunlock(&uma_rwlock);
 1601                 goto out;
 1602         }
 1603 
 1604         /*
 1605          * Use the regular zone/keg/slab allocator.
 1606          */
 1607         zone->uz_import = (uma_import)zone_import;
 1608         zone->uz_release = (uma_release)zone_release;
 1609         zone->uz_arg = zone; 
 1610 
 1611         if (arg->flags & UMA_ZONE_SECONDARY) {
 1612                 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
 1613                 zone->uz_init = arg->uminit;
 1614                 zone->uz_fini = arg->fini;
 1615                 zone->uz_lockptr = &keg->uk_lock;
 1616                 zone->uz_flags |= UMA_ZONE_SECONDARY;
 1617                 rw_wlock(&uma_rwlock);
 1618                 ZONE_LOCK(zone);
 1619                 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
 1620                         if (LIST_NEXT(z, uz_link) == NULL) {
 1621                                 LIST_INSERT_AFTER(z, zone, uz_link);
 1622                                 break;
 1623                         }
 1624                 }
 1625                 ZONE_UNLOCK(zone);
 1626                 rw_wunlock(&uma_rwlock);
 1627         } else if (keg == NULL) {
 1628                 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
 1629                     arg->align, arg->flags)) == NULL)
 1630                         return (ENOMEM);
 1631         } else {
 1632                 struct uma_kctor_args karg;
 1633                 int error;
 1634 
 1635                 /* We should only be here from uma_startup() */
 1636                 karg.size = arg->size;
 1637                 karg.uminit = arg->uminit;
 1638                 karg.fini = arg->fini;
 1639                 karg.align = arg->align;
 1640                 karg.flags = arg->flags;
 1641                 karg.zone = zone;
 1642                 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
 1643                     flags);
 1644                 if (error)
 1645                         return (error);
 1646         }
 1647 
 1648         /*
 1649          * Link in the first keg.
 1650          */
 1651         zone->uz_klink.kl_keg = keg;
 1652         LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
 1653         zone->uz_lockptr = &keg->uk_lock;
 1654         zone->uz_size = keg->uk_size;
 1655         zone->uz_flags |= (keg->uk_flags &
 1656             (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
 1657 
 1658         /*
 1659          * Some internal zones don't have room allocated for the per cpu
 1660          * caches.  If we're internal, bail out here.
 1661          */
 1662         if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
 1663                 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
 1664                     ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
 1665                 return (0);
 1666         }
 1667 
 1668 out:
 1669         if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
 1670                 zone->uz_count = bucket_select(zone->uz_size);
 1671         else
 1672                 zone->uz_count = BUCKET_MAX;
 1673         zone->uz_count_min = zone->uz_count;
 1674 
 1675         return (0);
 1676 }
 1677 
 1678 /*
 1679  * Keg header dtor.  This frees all data, destroys locks, frees the hash
 1680  * table and removes the keg from the global list.
 1681  *
 1682  * Arguments/Returns follow uma_dtor specifications
 1683  *      udata  unused
 1684  */
 1685 static void
 1686 keg_dtor(void *arg, int size, void *udata)
 1687 {
 1688         uma_keg_t keg;
 1689 
 1690         keg = (uma_keg_t)arg;
 1691         KEG_LOCK(keg);
 1692         if (keg->uk_free != 0) {
 1693                 printf("Freed UMA keg (%s) was not empty (%d items). "
 1694                     " Lost %d pages of memory.\n",
 1695                     keg->uk_name ? keg->uk_name : "",
 1696                     keg->uk_free, keg->uk_pages);
 1697         }
 1698         KEG_UNLOCK(keg);
 1699 
 1700         hash_free(&keg->uk_hash);
 1701 
 1702         KEG_LOCK_FINI(keg);
 1703 }
 1704 
 1705 /*
 1706  * Zone header dtor.
 1707  *
 1708  * Arguments/Returns follow uma_dtor specifications
 1709  *      udata  unused
 1710  */
 1711 static void
 1712 zone_dtor(void *arg, int size, void *udata)
 1713 {
 1714         uma_klink_t klink;
 1715         uma_zone_t zone;
 1716         uma_keg_t keg;
 1717 
 1718         zone = (uma_zone_t)arg;
 1719         keg = zone_first_keg(zone);
 1720 
 1721         if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
 1722                 cache_drain(zone);
 1723 
 1724         rw_wlock(&uma_rwlock);
 1725         LIST_REMOVE(zone, uz_link);
 1726         rw_wunlock(&uma_rwlock);
 1727         /*
 1728          * XXX there are some races here where
 1729          * the zone can be drained but zone lock
 1730          * released and then refilled before we
 1731          * remove it... we dont care for now
 1732          */
 1733         zone_drain_wait(zone, M_WAITOK);
 1734         /*
 1735          * Unlink all of our kegs.
 1736          */
 1737         while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
 1738                 klink->kl_keg = NULL;
 1739                 LIST_REMOVE(klink, kl_link);
 1740                 if (klink == &zone->uz_klink)
 1741                         continue;
 1742                 free(klink, M_TEMP);
 1743         }
 1744         /*
 1745          * We only destroy kegs from non secondary zones.
 1746          */
 1747         if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
 1748                 rw_wlock(&uma_rwlock);
 1749                 LIST_REMOVE(keg, uk_link);
 1750                 rw_wunlock(&uma_rwlock);
 1751                 zone_free_item(kegs, keg, NULL, SKIP_NONE);
 1752         }
 1753         ZONE_LOCK_FINI(zone);
 1754 }
 1755 
 1756 /*
 1757  * Traverses every zone in the system and calls a callback
 1758  *
 1759  * Arguments:
 1760  *      zfunc  A pointer to a function which accepts a zone
 1761  *              as an argument.
 1762  *
 1763  * Returns:
 1764  *      Nothing
 1765  */
 1766 static void
 1767 zone_foreach(void (*zfunc)(uma_zone_t))
 1768 {
 1769         uma_keg_t keg;
 1770         uma_zone_t zone;
 1771 
 1772         rw_rlock(&uma_rwlock);
 1773         LIST_FOREACH(keg, &uma_kegs, uk_link) {
 1774                 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
 1775                         zfunc(zone);
 1776         }
 1777         rw_runlock(&uma_rwlock);
 1778 }
 1779 
 1780 /* Public functions */
 1781 /* See uma.h */
 1782 void
 1783 uma_startup(void *bootmem, int boot_pages)
 1784 {
 1785         struct uma_zctor_args args;
 1786         uma_slab_t slab;
 1787         u_int slabsize;
 1788         int i;
 1789 
 1790 #ifdef UMA_DEBUG
 1791         printf("Creating uma keg headers zone and keg.\n");
 1792 #endif
 1793         rw_init(&uma_rwlock, "UMA lock");
 1794 
 1795         /* "manually" create the initial zone */
 1796         memset(&args, 0, sizeof(args));
 1797         args.name = "UMA Kegs";
 1798         args.size = sizeof(struct uma_keg);
 1799         args.ctor = keg_ctor;
 1800         args.dtor = keg_dtor;
 1801         args.uminit = zero_init;
 1802         args.fini = NULL;
 1803         args.keg = &masterkeg;
 1804         args.align = 32 - 1;
 1805         args.flags = UMA_ZFLAG_INTERNAL;
 1806         /* The initial zone has no Per cpu queues so it's smaller */
 1807         zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
 1808 
 1809 #ifdef UMA_DEBUG
 1810         printf("Filling boot free list.\n");
 1811 #endif
 1812         for (i = 0; i < boot_pages; i++) {
 1813                 slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE));
 1814                 slab->us_data = (uint8_t *)slab;
 1815                 slab->us_flags = UMA_SLAB_BOOT;
 1816                 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
 1817         }
 1818         mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
 1819 
 1820 #ifdef UMA_DEBUG
 1821         printf("Creating uma zone headers zone and keg.\n");
 1822 #endif
 1823         args.name = "UMA Zones";
 1824         args.size = sizeof(struct uma_zone) +
 1825             (sizeof(struct uma_cache) * (mp_maxid + 1));
 1826         args.ctor = zone_ctor;
 1827         args.dtor = zone_dtor;
 1828         args.uminit = zero_init;
 1829         args.fini = NULL;
 1830         args.keg = NULL;
 1831         args.align = 32 - 1;
 1832         args.flags = UMA_ZFLAG_INTERNAL;
 1833         /* The initial zone has no Per cpu queues so it's smaller */
 1834         zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
 1835 
 1836 #ifdef UMA_DEBUG
 1837         printf("Initializing pcpu cache locks.\n");
 1838 #endif
 1839 #ifdef UMA_DEBUG
 1840         printf("Creating slab and hash zones.\n");
 1841 #endif
 1842 
 1843         /* Now make a zone for slab headers */
 1844         slabzone = uma_zcreate("UMA Slabs",
 1845                                 sizeof(struct uma_slab),
 1846                                 NULL, NULL, NULL, NULL,
 1847                                 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
 1848 
 1849         /*
 1850          * We also create a zone for the bigger slabs with reference
 1851          * counts in them, to accomodate UMA_ZONE_REFCNT zones.
 1852          */
 1853         slabsize = sizeof(struct uma_slab_refcnt);
 1854         slabsize += uma_max_ipers_ref * sizeof(uint32_t);
 1855         slabrefzone = uma_zcreate("UMA RCntSlabs",
 1856                                   slabsize,
 1857                                   NULL, NULL, NULL, NULL,
 1858                                   UMA_ALIGN_PTR,
 1859                                   UMA_ZFLAG_INTERNAL);
 1860 
 1861         hashzone = uma_zcreate("UMA Hash",
 1862             sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
 1863             NULL, NULL, NULL, NULL,
 1864             UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
 1865 
 1866         bucket_init();
 1867 
 1868         booted = UMA_STARTUP;
 1869 
 1870 #ifdef UMA_DEBUG
 1871         printf("UMA startup complete.\n");
 1872 #endif
 1873 }
 1874 
 1875 /* see uma.h */
 1876 void
 1877 uma_startup2(void)
 1878 {
 1879         booted = UMA_STARTUP2;
 1880         bucket_enable();
 1881         sx_init(&uma_drain_lock, "umadrain");
 1882 #ifdef UMA_DEBUG
 1883         printf("UMA startup2 complete.\n");
 1884 #endif
 1885 }
 1886 
 1887 /*
 1888  * Initialize our callout handle
 1889  *
 1890  */
 1891 
 1892 static void
 1893 uma_startup3(void)
 1894 {
 1895 #ifdef UMA_DEBUG
 1896         printf("Starting callout.\n");
 1897 #endif
 1898         callout_init(&uma_callout, 1);
 1899         callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
 1900 #ifdef UMA_DEBUG
 1901         printf("UMA startup3 complete.\n");
 1902 #endif
 1903 }
 1904 
 1905 static uma_keg_t
 1906 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
 1907                 int align, uint32_t flags)
 1908 {
 1909         struct uma_kctor_args args;
 1910 
 1911         args.size = size;
 1912         args.uminit = uminit;
 1913         args.fini = fini;
 1914         args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
 1915         args.flags = flags;
 1916         args.zone = zone;
 1917         return (zone_alloc_item(kegs, &args, M_WAITOK));
 1918 }
 1919 
 1920 /* See uma.h */
 1921 void
 1922 uma_set_align(int align)
 1923 {
 1924 
 1925         if (align != UMA_ALIGN_CACHE)
 1926                 uma_align_cache = align;
 1927 }
 1928 
 1929 /* See uma.h */
 1930 uma_zone_t
 1931 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
 1932                 uma_init uminit, uma_fini fini, int align, uint32_t flags)
 1933 
 1934 {
 1935         struct uma_zctor_args args;
 1936         uma_zone_t res;
 1937         bool locked;
 1938 
 1939         KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
 1940             align, name));
 1941 
 1942         /* This stuff is essential for the zone ctor */
 1943         memset(&args, 0, sizeof(args));
 1944         args.name = name;
 1945         args.size = size;
 1946         args.ctor = ctor;
 1947         args.dtor = dtor;
 1948         args.uminit = uminit;
 1949         args.fini = fini;
 1950         args.align = align;
 1951         args.flags = flags;
 1952         args.keg = NULL;
 1953 
 1954         if (booted < UMA_STARTUP2) {
 1955                 locked = false;
 1956         } else {
 1957                 sx_slock(&uma_drain_lock);
 1958                 locked = true;
 1959         }
 1960         res = zone_alloc_item(zones, &args, M_WAITOK);
 1961         if (locked)
 1962                 sx_sunlock(&uma_drain_lock);
 1963         return (res);
 1964 }
 1965 
 1966 /* See uma.h */
 1967 uma_zone_t
 1968 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
 1969                     uma_init zinit, uma_fini zfini, uma_zone_t master)
 1970 {
 1971         struct uma_zctor_args args;
 1972         uma_keg_t keg;
 1973         uma_zone_t res;
 1974         bool locked;
 1975 
 1976         keg = zone_first_keg(master);
 1977         memset(&args, 0, sizeof(args));
 1978         args.name = name;
 1979         args.size = keg->uk_size;
 1980         args.ctor = ctor;
 1981         args.dtor = dtor;
 1982         args.uminit = zinit;
 1983         args.fini = zfini;
 1984         args.align = keg->uk_align;
 1985         args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
 1986         args.keg = keg;
 1987 
 1988         if (booted < UMA_STARTUP2) {
 1989                 locked = false;
 1990         } else {
 1991                 sx_slock(&uma_drain_lock);
 1992                 locked = true;
 1993         }
 1994         /* XXX Attaches only one keg of potentially many. */
 1995         res = zone_alloc_item(zones, &args, M_WAITOK);
 1996         if (locked)
 1997                 sx_sunlock(&uma_drain_lock);
 1998         return (res);
 1999 }
 2000 
 2001 /* See uma.h */
 2002 uma_zone_t
 2003 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
 2004                     uma_init zinit, uma_fini zfini, uma_import zimport,
 2005                     uma_release zrelease, void *arg, int flags)
 2006 {
 2007         struct uma_zctor_args args;
 2008 
 2009         memset(&args, 0, sizeof(args));
 2010         args.name = name;
 2011         args.size = size;
 2012         args.ctor = ctor;
 2013         args.dtor = dtor;
 2014         args.uminit = zinit;
 2015         args.fini = zfini;
 2016         args.import = zimport;
 2017         args.release = zrelease;
 2018         args.arg = arg;
 2019         args.align = 0;
 2020         args.flags = flags;
 2021 
 2022         return (zone_alloc_item(zones, &args, M_WAITOK));
 2023 }
 2024 
 2025 static void
 2026 zone_lock_pair(uma_zone_t a, uma_zone_t b)
 2027 {
 2028         if (a < b) {
 2029                 ZONE_LOCK(a);
 2030                 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
 2031         } else {
 2032                 ZONE_LOCK(b);
 2033                 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
 2034         }
 2035 }
 2036 
 2037 static void
 2038 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
 2039 {
 2040 
 2041         ZONE_UNLOCK(a);
 2042         ZONE_UNLOCK(b);
 2043 }
 2044 
 2045 int
 2046 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
 2047 {
 2048         uma_klink_t klink;
 2049         uma_klink_t kl;
 2050         int error;
 2051 
 2052         error = 0;
 2053         klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
 2054 
 2055         zone_lock_pair(zone, master);
 2056         /*
 2057          * zone must use vtoslab() to resolve objects and must already be
 2058          * a secondary.
 2059          */
 2060         if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
 2061             != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
 2062                 error = EINVAL;
 2063                 goto out;
 2064         }
 2065         /*
 2066          * The new master must also use vtoslab().
 2067          */
 2068         if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
 2069                 error = EINVAL;
 2070                 goto out;
 2071         }
 2072         /*
 2073          * Both must either be refcnt, or not be refcnt.
 2074          */
 2075         if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
 2076             (master->uz_flags & UMA_ZONE_REFCNT)) {
 2077                 error = EINVAL;
 2078                 goto out;
 2079         }
 2080         /*
 2081          * The underlying object must be the same size.  rsize
 2082          * may be different.
 2083          */
 2084         if (master->uz_size != zone->uz_size) {
 2085                 error = E2BIG;
 2086                 goto out;
 2087         }
 2088         /*
 2089          * Put it at the end of the list.
 2090          */
 2091         klink->kl_keg = zone_first_keg(master);
 2092         LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
 2093                 if (LIST_NEXT(kl, kl_link) == NULL) {
 2094                         LIST_INSERT_AFTER(kl, klink, kl_link);
 2095                         break;
 2096                 }
 2097         }
 2098         klink = NULL;
 2099         zone->uz_flags |= UMA_ZFLAG_MULTI;
 2100         zone->uz_slab = zone_fetch_slab_multi;
 2101 
 2102 out:
 2103         zone_unlock_pair(zone, master);
 2104         if (klink != NULL)
 2105                 free(klink, M_TEMP);
 2106 
 2107         return (error);
 2108 }
 2109 
 2110 
 2111 /* See uma.h */
 2112 void
 2113 uma_zdestroy(uma_zone_t zone)
 2114 {
 2115 
 2116         sx_slock(&uma_drain_lock);
 2117         zone_free_item(zones, zone, NULL, SKIP_NONE);
 2118         sx_sunlock(&uma_drain_lock);
 2119 }
 2120 
 2121 /* See uma.h */
 2122 void *
 2123 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
 2124 {
 2125         void *item;
 2126         uma_cache_t cache;
 2127         uma_bucket_t bucket;
 2128         int lockfail;
 2129         int cpu;
 2130 
 2131         /* This is the fast path allocation */
 2132 #ifdef UMA_DEBUG_ALLOC_1
 2133         printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
 2134 #endif
 2135         CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
 2136             zone->uz_name, flags);
 2137 
 2138         if (flags & M_WAITOK) {
 2139                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
 2140                     "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
 2141         }
 2142 #ifdef DEBUG_MEMGUARD
 2143         if (memguard_cmp_zone(zone)) {
 2144                 item = memguard_alloc(zone->uz_size, flags);
 2145                 if (item != NULL) {
 2146                         /*
 2147                          * Avoid conflict with the use-after-free
 2148                          * protecting infrastructure from INVARIANTS.
 2149                          */
 2150                         if (zone->uz_init != NULL &&
 2151                             zone->uz_init != mtrash_init &&
 2152                             zone->uz_init(item, zone->uz_size, flags) != 0)
 2153                                 return (NULL);
 2154                         if (zone->uz_ctor != NULL &&
 2155                             zone->uz_ctor != mtrash_ctor &&
 2156                             zone->uz_ctor(item, zone->uz_size, udata,
 2157                             flags) != 0) {
 2158                                 zone->uz_fini(item, zone->uz_size);
 2159                                 return (NULL);
 2160                         }
 2161                         return (item);
 2162                 }
 2163                 /* This is unfortunate but should not be fatal. */
 2164         }
 2165 #endif
 2166         /*
 2167          * If possible, allocate from the per-CPU cache.  There are two
 2168          * requirements for safe access to the per-CPU cache: (1) the thread
 2169          * accessing the cache must not be preempted or yield during access,
 2170          * and (2) the thread must not migrate CPUs without switching which
 2171          * cache it accesses.  We rely on a critical section to prevent
 2172          * preemption and migration.  We release the critical section in
 2173          * order to acquire the zone mutex if we are unable to allocate from
 2174          * the current cache; when we re-acquire the critical section, we
 2175          * must detect and handle migration if it has occurred.
 2176          */
 2177         critical_enter();
 2178         cpu = curcpu;
 2179         cache = &zone->uz_cpu[cpu];
 2180 
 2181 zalloc_start:
 2182         bucket = cache->uc_allocbucket;
 2183         if (bucket != NULL && bucket->ub_cnt > 0) {
 2184                 bucket->ub_cnt--;
 2185                 item = bucket->ub_bucket[bucket->ub_cnt];
 2186 #ifdef INVARIANTS
 2187                 bucket->ub_bucket[bucket->ub_cnt] = NULL;
 2188 #endif
 2189                 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
 2190                 cache->uc_allocs++;
 2191                 critical_exit();
 2192                 if (zone->uz_ctor != NULL &&
 2193                     zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
 2194                         atomic_add_long(&zone->uz_fails, 1);
 2195                         zone_free_item(zone, item, udata, SKIP_DTOR);
 2196                         return (NULL);
 2197                 }
 2198 #ifdef INVARIANTS
 2199                 uma_dbg_alloc(zone, NULL, item);
 2200 #endif
 2201                 if (flags & M_ZERO)
 2202                         uma_zero_item(item, zone);
 2203                 return (item);
 2204         }
 2205 
 2206         /*
 2207          * We have run out of items in our alloc bucket.
 2208          * See if we can switch with our free bucket.
 2209          */
 2210         bucket = cache->uc_freebucket;
 2211         if (bucket != NULL && bucket->ub_cnt > 0) {
 2212 #ifdef UMA_DEBUG_ALLOC
 2213                 printf("uma_zalloc: Swapping empty with alloc.\n");
 2214 #endif
 2215                 cache->uc_freebucket = cache->uc_allocbucket;
 2216                 cache->uc_allocbucket = bucket;
 2217                 goto zalloc_start;
 2218         }
 2219 
 2220         /*
 2221          * Discard any empty allocation bucket while we hold no locks.
 2222          */
 2223         bucket = cache->uc_allocbucket;
 2224         cache->uc_allocbucket = NULL;
 2225         critical_exit();
 2226         if (bucket != NULL)
 2227                 bucket_free(zone, bucket, udata);
 2228 
 2229         /* Short-circuit for zones without buckets and low memory. */
 2230         if (zone->uz_count == 0 || bucketdisable)
 2231                 goto zalloc_item;
 2232 
 2233         /*
 2234          * Attempt to retrieve the item from the per-CPU cache has failed, so
 2235          * we must go back to the zone.  This requires the zone lock, so we
 2236          * must drop the critical section, then re-acquire it when we go back
 2237          * to the cache.  Since the critical section is released, we may be
 2238          * preempted or migrate.  As such, make sure not to maintain any
 2239          * thread-local state specific to the cache from prior to releasing
 2240          * the critical section.
 2241          */
 2242         lockfail = 0;
 2243         if (ZONE_TRYLOCK(zone) == 0) {
 2244                 /* Record contention to size the buckets. */
 2245                 ZONE_LOCK(zone);
 2246                 lockfail = 1;
 2247         }
 2248         critical_enter();
 2249         cpu = curcpu;
 2250         cache = &zone->uz_cpu[cpu];
 2251 
 2252         /*
 2253          * Since we have locked the zone we may as well send back our stats.
 2254          */
 2255         atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
 2256         atomic_add_long(&zone->uz_frees, cache->uc_frees);
 2257         cache->uc_allocs = 0;
 2258         cache->uc_frees = 0;
 2259 
 2260         /* See if we lost the race to fill the cache. */
 2261         if (cache->uc_allocbucket != NULL) {
 2262                 ZONE_UNLOCK(zone);
 2263                 goto zalloc_start;
 2264         }
 2265 
 2266         /*
 2267          * Check the zone's cache of buckets.
 2268          */
 2269         if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
 2270                 KASSERT(bucket->ub_cnt != 0,
 2271                     ("uma_zalloc_arg: Returning an empty bucket."));
 2272 
 2273                 LIST_REMOVE(bucket, ub_link);
 2274                 cache->uc_allocbucket = bucket;
 2275                 ZONE_UNLOCK(zone);
 2276                 goto zalloc_start;
 2277         }
 2278         /* We are no longer associated with this CPU. */
 2279         critical_exit();
 2280 
 2281         /*
 2282          * We bump the uz count when the cache size is insufficient to
 2283          * handle the working set.
 2284          */
 2285         if (lockfail && zone->uz_count < BUCKET_MAX)
 2286                 zone->uz_count++;
 2287         ZONE_UNLOCK(zone);
 2288 
 2289         /*
 2290          * Now lets just fill a bucket and put it on the free list.  If that
 2291          * works we'll restart the allocation from the begining and it
 2292          * will use the just filled bucket.
 2293          */
 2294         bucket = zone_alloc_bucket(zone, udata, flags);
 2295         if (bucket != NULL) {
 2296                 ZONE_LOCK(zone);
 2297                 critical_enter();
 2298                 cpu = curcpu;
 2299                 cache = &zone->uz_cpu[cpu];
 2300                 /*
 2301                  * See if we lost the race or were migrated.  Cache the
 2302                  * initialized bucket to make this less likely or claim
 2303                  * the memory directly.
 2304                  */
 2305                 if (cache->uc_allocbucket == NULL)
 2306                         cache->uc_allocbucket = bucket;
 2307                 else
 2308                         LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
 2309                 ZONE_UNLOCK(zone);
 2310                 goto zalloc_start;
 2311         }
 2312 
 2313         /*
 2314          * We may not be able to get a bucket so return an actual item.
 2315          */
 2316 #ifdef UMA_DEBUG
 2317         printf("uma_zalloc_arg: Bucketzone returned NULL\n");
 2318 #endif
 2319 
 2320 zalloc_item:
 2321         item = zone_alloc_item(zone, udata, flags);
 2322 
 2323         return (item);
 2324 }
 2325 
 2326 static uma_slab_t
 2327 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
 2328 {
 2329         uma_slab_t slab;
 2330         int reserve;
 2331 
 2332         mtx_assert(&keg->uk_lock, MA_OWNED);
 2333         slab = NULL;
 2334         reserve = 0;
 2335         if ((flags & M_USE_RESERVE) == 0)
 2336                 reserve = keg->uk_reserve;
 2337 
 2338         for (;;) {
 2339                 /*
 2340                  * Find a slab with some space.  Prefer slabs that are partially
 2341                  * used over those that are totally full.  This helps to reduce
 2342                  * fragmentation.
 2343                  */
 2344                 if (keg->uk_free > reserve) {
 2345                         if (!LIST_EMPTY(&keg->uk_part_slab)) {
 2346                                 slab = LIST_FIRST(&keg->uk_part_slab);
 2347                         } else {
 2348                                 slab = LIST_FIRST(&keg->uk_free_slab);
 2349                                 LIST_REMOVE(slab, us_link);
 2350                                 LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
 2351                                     us_link);
 2352                         }
 2353                         MPASS(slab->us_keg == keg);
 2354                         return (slab);
 2355                 }
 2356 
 2357                 /*
 2358                  * M_NOVM means don't ask at all!
 2359                  */
 2360                 if (flags & M_NOVM)
 2361                         break;
 2362 
 2363                 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
 2364                         keg->uk_flags |= UMA_ZFLAG_FULL;
 2365                         /*
 2366                          * If this is not a multi-zone, set the FULL bit.
 2367                          * Otherwise slab_multi() takes care of it.
 2368                          */
 2369                         if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
 2370                                 zone->uz_flags |= UMA_ZFLAG_FULL;
 2371                                 zone_log_warning(zone);
 2372                         }
 2373                         if (flags & M_NOWAIT)
 2374                                 break;
 2375                         zone->uz_sleeps++;
 2376                         msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
 2377                         continue;
 2378                 }
 2379                 slab = keg_alloc_slab(keg, zone, flags);
 2380                 /*
 2381                  * If we got a slab here it's safe to mark it partially used
 2382                  * and return.  We assume that the caller is going to remove
 2383                  * at least one item.
 2384                  */
 2385                 if (slab) {
 2386                         MPASS(slab->us_keg == keg);
 2387                         LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
 2388                         return (slab);
 2389                 }
 2390                 /*
 2391                  * We might not have been able to get a slab but another cpu
 2392                  * could have while we were unlocked.  Check again before we
 2393                  * fail.
 2394                  */
 2395                 flags |= M_NOVM;
 2396         }
 2397         return (slab);
 2398 }
 2399 
 2400 static uma_slab_t
 2401 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
 2402 {
 2403         uma_slab_t slab;
 2404 
 2405         if (keg == NULL) {
 2406                 keg = zone_first_keg(zone);
 2407                 KEG_LOCK(keg);
 2408         }
 2409 
 2410         for (;;) {
 2411                 slab = keg_fetch_slab(keg, zone, flags);
 2412                 if (slab)
 2413                         return (slab);
 2414                 if (flags & (M_NOWAIT | M_NOVM))
 2415                         break;
 2416         }
 2417         KEG_UNLOCK(keg);
 2418         return (NULL);
 2419 }
 2420 
 2421 /*
 2422  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
 2423  * with the keg locked.  On NULL no lock is held.
 2424  *
 2425  * The last pointer is used to seed the search.  It is not required.
 2426  */
 2427 static uma_slab_t
 2428 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
 2429 {
 2430         uma_klink_t klink;
 2431         uma_slab_t slab;
 2432         uma_keg_t keg;
 2433         int flags;
 2434         int empty;
 2435         int full;
 2436 
 2437         /*
 2438          * Don't wait on the first pass.  This will skip limit tests
 2439          * as well.  We don't want to block if we can find a provider
 2440          * without blocking.
 2441          */
 2442         flags = (rflags & ~M_WAITOK) | M_NOWAIT;
 2443         /*
 2444          * Use the last slab allocated as a hint for where to start
 2445          * the search.
 2446          */
 2447         if (last != NULL) {
 2448                 slab = keg_fetch_slab(last, zone, flags);
 2449                 if (slab)
 2450                         return (slab);
 2451                 KEG_UNLOCK(last);
 2452         }
 2453         /*
 2454          * Loop until we have a slab incase of transient failures
 2455          * while M_WAITOK is specified.  I'm not sure this is 100%
 2456          * required but we've done it for so long now.
 2457          */
 2458         for (;;) {
 2459                 empty = 0;
 2460                 full = 0;
 2461                 /*
 2462                  * Search the available kegs for slabs.  Be careful to hold the
 2463                  * correct lock while calling into the keg layer.
 2464                  */
 2465                 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
 2466                         keg = klink->kl_keg;
 2467                         KEG_LOCK(keg);
 2468                         if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
 2469                                 slab = keg_fetch_slab(keg, zone, flags);
 2470                                 if (slab)
 2471                                         return (slab);
 2472                         }
 2473                         if (keg->uk_flags & UMA_ZFLAG_FULL)
 2474                                 full++;
 2475                         else
 2476                                 empty++;
 2477                         KEG_UNLOCK(keg);
 2478                 }
 2479                 if (rflags & (M_NOWAIT | M_NOVM))
 2480                         break;
 2481                 flags = rflags;
 2482                 /*
 2483                  * All kegs are full.  XXX We can't atomically check all kegs
 2484                  * and sleep so just sleep for a short period and retry.
 2485                  */
 2486                 if (full && !empty) {
 2487                         ZONE_LOCK(zone);
 2488                         zone->uz_flags |= UMA_ZFLAG_FULL;
 2489                         zone->uz_sleeps++;
 2490                         zone_log_warning(zone);
 2491                         msleep(zone, zone->uz_lockptr, PVM,
 2492                             "zonelimit", hz/100);
 2493                         zone->uz_flags &= ~UMA_ZFLAG_FULL;
 2494                         ZONE_UNLOCK(zone);
 2495                         continue;
 2496                 }
 2497         }
 2498         return (NULL);
 2499 }
 2500 
 2501 static void *
 2502 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
 2503 {
 2504         void *item;
 2505         uint8_t freei;
 2506 
 2507         MPASS(keg == slab->us_keg);
 2508         mtx_assert(&keg->uk_lock, MA_OWNED);
 2509 
 2510         freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
 2511         BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
 2512         item = slab->us_data + (keg->uk_rsize * freei);
 2513         slab->us_freecount--;
 2514         keg->uk_free--;
 2515 
 2516         /* Move this slab to the full list */
 2517         if (slab->us_freecount == 0) {
 2518                 LIST_REMOVE(slab, us_link);
 2519                 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
 2520         }
 2521 
 2522         return (item);
 2523 }
 2524 
 2525 static int
 2526 zone_import(uma_zone_t zone, void **bucket, int max, int flags)
 2527 {
 2528         uma_slab_t slab;
 2529         uma_keg_t keg;
 2530         int i;
 2531 
 2532         slab = NULL;
 2533         keg = NULL;
 2534         /* Try to keep the buckets totally full */
 2535         for (i = 0; i < max; ) {
 2536                 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
 2537                         break;
 2538                 keg = slab->us_keg;
 2539                 while (slab->us_freecount && i < max) { 
 2540                         bucket[i++] = slab_alloc_item(keg, slab);
 2541                         if (keg->uk_free <= keg->uk_reserve)
 2542                                 break;
 2543                 }
 2544                 /* Don't grab more than one slab at a time. */
 2545                 flags &= ~M_WAITOK;
 2546                 flags |= M_NOWAIT;
 2547         }
 2548         if (slab != NULL)
 2549                 KEG_UNLOCK(keg);
 2550 
 2551         return i;
 2552 }
 2553 
 2554 static uma_bucket_t
 2555 zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
 2556 {
 2557         uma_bucket_t bucket;
 2558         int max;
 2559 
 2560         /* Don't wait for buckets, preserve caller's NOVM setting. */
 2561         bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
 2562         if (bucket == NULL)
 2563                 return (NULL);
 2564 
 2565         max = MIN(bucket->ub_entries, zone->uz_count);
 2566         bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
 2567             max, flags);
 2568 
 2569         /*
 2570          * Initialize the memory if necessary.
 2571          */
 2572         if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
 2573                 int i;
 2574 
 2575                 for (i = 0; i < bucket->ub_cnt; i++)
 2576                         if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
 2577                             flags) != 0)
 2578                                 break;
 2579                 /*
 2580                  * If we couldn't initialize the whole bucket, put the
 2581                  * rest back onto the freelist.
 2582                  */
 2583                 if (i != bucket->ub_cnt) {
 2584                         zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
 2585                             bucket->ub_cnt - i);
 2586 #ifdef INVARIANTS
 2587                         bzero(&bucket->ub_bucket[i],
 2588                             sizeof(void *) * (bucket->ub_cnt - i));
 2589 #endif
 2590                         bucket->ub_cnt = i;
 2591                 }
 2592         }
 2593 
 2594         if (bucket->ub_cnt == 0) {
 2595                 bucket_free(zone, bucket, udata);
 2596                 atomic_add_long(&zone->uz_fails, 1);
 2597                 return (NULL);
 2598         }
 2599 
 2600         return (bucket);
 2601 }
 2602 
 2603 /*
 2604  * Allocates a single item from a zone.
 2605  *
 2606  * Arguments
 2607  *      zone   The zone to alloc for.
 2608  *      udata  The data to be passed to the constructor.
 2609  *      flags  M_WAITOK, M_NOWAIT, M_ZERO.
 2610  *
 2611  * Returns
 2612  *      NULL if there is no memory and M_NOWAIT is set
 2613  *      An item if successful
 2614  */
 2615 
 2616 static void *
 2617 zone_alloc_item(uma_zone_t zone, void *udata, int flags)
 2618 {
 2619         void *item;
 2620 
 2621         item = NULL;
 2622 
 2623 #ifdef UMA_DEBUG_ALLOC
 2624         printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
 2625 #endif
 2626         if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
 2627                 goto fail;
 2628         atomic_add_long(&zone->uz_allocs, 1);
 2629 
 2630         /*
 2631          * We have to call both the zone's init (not the keg's init)
 2632          * and the zone's ctor.  This is because the item is going from
 2633          * a keg slab directly to the user, and the user is expecting it
 2634          * to be both zone-init'd as well as zone-ctor'd.
 2635          */
 2636         if (zone->uz_init != NULL) {
 2637                 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
 2638                         zone_free_item(zone, item, udata, SKIP_FINI);
 2639                         goto fail;
 2640                 }
 2641         }
 2642         if (zone->uz_ctor != NULL) {
 2643                 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
 2644                         zone_free_item(zone, item, udata, SKIP_DTOR);
 2645                         goto fail;
 2646                 }
 2647         }
 2648 #ifdef INVARIANTS
 2649         uma_dbg_alloc(zone, NULL, item);
 2650 #endif
 2651         if (flags & M_ZERO)
 2652                 uma_zero_item(item, zone);
 2653 
 2654         return (item);
 2655 
 2656 fail:
 2657         atomic_add_long(&zone->uz_fails, 1);
 2658         return (NULL);
 2659 }
 2660 
 2661 /* See uma.h */
 2662 void
 2663 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
 2664 {
 2665         uma_cache_t cache;
 2666         uma_bucket_t bucket;
 2667         int lockfail;
 2668         int cpu;
 2669 
 2670 #ifdef UMA_DEBUG_ALLOC_1
 2671         printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
 2672 #endif
 2673         CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
 2674             zone->uz_name);
 2675 
 2676         /* uma_zfree(..., NULL) does nothing, to match free(9). */
 2677         if (item == NULL)
 2678                 return;
 2679 #ifdef DEBUG_MEMGUARD
 2680         if (is_memguard_addr(item)) {
 2681                 if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor)
 2682                         zone->uz_dtor(item, zone->uz_size, udata);
 2683                 if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini)
 2684                         zone->uz_fini(item, zone->uz_size);
 2685                 memguard_free(item);
 2686                 return;
 2687         }
 2688 #endif
 2689 #ifdef INVARIANTS
 2690         if (zone->uz_flags & UMA_ZONE_MALLOC)
 2691                 uma_dbg_free(zone, udata, item);
 2692         else
 2693                 uma_dbg_free(zone, NULL, item);
 2694 #endif
 2695         if (zone->uz_dtor != NULL)
 2696                 zone->uz_dtor(item, zone->uz_size, udata);
 2697 
 2698         /*
 2699          * The race here is acceptable.  If we miss it we'll just have to wait
 2700          * a little longer for the limits to be reset.
 2701          */
 2702         if (zone->uz_flags & UMA_ZFLAG_FULL)
 2703                 goto zfree_item;
 2704 
 2705         /*
 2706          * If possible, free to the per-CPU cache.  There are two
 2707          * requirements for safe access to the per-CPU cache: (1) the thread
 2708          * accessing the cache must not be preempted or yield during access,
 2709          * and (2) the thread must not migrate CPUs without switching which
 2710          * cache it accesses.  We rely on a critical section to prevent
 2711          * preemption and migration.  We release the critical section in
 2712          * order to acquire the zone mutex if we are unable to free to the
 2713          * current cache; when we re-acquire the critical section, we must
 2714          * detect and handle migration if it has occurred.
 2715          */
 2716 zfree_restart:
 2717         critical_enter();
 2718         cpu = curcpu;
 2719         cache = &zone->uz_cpu[cpu];
 2720 
 2721 zfree_start:
 2722         /*
 2723          * Try to free into the allocbucket first to give LIFO ordering
 2724          * for cache-hot datastructures.  Spill over into the freebucket
 2725          * if necessary.  Alloc will swap them if one runs dry.
 2726          */
 2727         bucket = cache->uc_allocbucket;
 2728         if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
 2729                 bucket = cache->uc_freebucket;
 2730         if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
 2731                 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
 2732                     ("uma_zfree: Freeing to non free bucket index."));
 2733                 bucket->ub_bucket[bucket->ub_cnt] = item;
 2734                 bucket->ub_cnt++;
 2735                 cache->uc_frees++;
 2736                 critical_exit();
 2737                 return;
 2738         }
 2739 
 2740         /*
 2741          * We must go back the zone, which requires acquiring the zone lock,
 2742          * which in turn means we must release and re-acquire the critical
 2743          * section.  Since the critical section is released, we may be
 2744          * preempted or migrate.  As such, make sure not to maintain any
 2745          * thread-local state specific to the cache from prior to releasing
 2746          * the critical section.
 2747          */
 2748         critical_exit();
 2749         if (zone->uz_count == 0 || bucketdisable)
 2750                 goto zfree_item;
 2751 
 2752         lockfail = 0;
 2753         if (ZONE_TRYLOCK(zone) == 0) {
 2754                 /* Record contention to size the buckets. */
 2755                 ZONE_LOCK(zone);
 2756                 lockfail = 1;
 2757         }
 2758         critical_enter();
 2759         cpu = curcpu;
 2760         cache = &zone->uz_cpu[cpu];
 2761 
 2762         /*
 2763          * Since we have locked the zone we may as well send back our stats.
 2764          */
 2765         atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
 2766         atomic_add_long(&zone->uz_frees, cache->uc_frees);
 2767         cache->uc_allocs = 0;
 2768         cache->uc_frees = 0;
 2769 
 2770         bucket = cache->uc_freebucket;
 2771         if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
 2772                 ZONE_UNLOCK(zone);
 2773                 goto zfree_start;
 2774         }
 2775         cache->uc_freebucket = NULL;
 2776         /* We are no longer associated with this CPU. */
 2777         critical_exit();
 2778 
 2779         /* Can we throw this on the zone full list? */
 2780         if (bucket != NULL) {
 2781 #ifdef UMA_DEBUG_ALLOC
 2782                 printf("uma_zfree: Putting old bucket on the free list.\n");
 2783 #endif
 2784                 /* ub_cnt is pointing to the last free item */
 2785                 KASSERT(bucket->ub_cnt != 0,
 2786                     ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
 2787                 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
 2788         }
 2789 
 2790         /*
 2791          * We bump the uz count when the cache size is insufficient to
 2792          * handle the working set.
 2793          */
 2794         if (lockfail && zone->uz_count < BUCKET_MAX)
 2795                 zone->uz_count++;
 2796         ZONE_UNLOCK(zone);
 2797 
 2798 #ifdef UMA_DEBUG_ALLOC
 2799         printf("uma_zfree: Allocating new free bucket.\n");
 2800 #endif
 2801         bucket = bucket_alloc(zone, udata, M_NOWAIT);
 2802         if (bucket) {
 2803                 critical_enter();
 2804                 cpu = curcpu;
 2805                 cache = &zone->uz_cpu[cpu];
 2806                 if (cache->uc_freebucket == NULL) {
 2807                         cache->uc_freebucket = bucket;
 2808                         goto zfree_start;
 2809                 }
 2810                 /*
 2811                  * We lost the race, start over.  We have to drop our
 2812                  * critical section to free the bucket.
 2813                  */
 2814                 critical_exit();
 2815                 bucket_free(zone, bucket, udata);
 2816                 goto zfree_restart;
 2817         }
 2818 
 2819         /*
 2820          * If nothing else caught this, we'll just do an internal free.
 2821          */
 2822 zfree_item:
 2823         zone_free_item(zone, item, udata, SKIP_DTOR);
 2824 
 2825         return;
 2826 }
 2827 
 2828 static void
 2829 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
 2830 {
 2831         uint8_t freei;
 2832 
 2833         mtx_assert(&keg->uk_lock, MA_OWNED);
 2834         MPASS(keg == slab->us_keg);
 2835 
 2836         /* Do we need to remove from any lists? */
 2837         if (slab->us_freecount+1 == keg->uk_ipers) {
 2838                 LIST_REMOVE(slab, us_link);
 2839                 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
 2840         } else if (slab->us_freecount == 0) {
 2841                 LIST_REMOVE(slab, us_link);
 2842                 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
 2843         }
 2844 
 2845         /* Slab management. */
 2846         freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
 2847         BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
 2848         slab->us_freecount++;
 2849 
 2850         /* Keg statistics. */
 2851         keg->uk_free++;
 2852 }
 2853 
 2854 static void
 2855 zone_release(uma_zone_t zone, void **bucket, int cnt)
 2856 {
 2857         void *item;
 2858         uma_slab_t slab;
 2859         uma_keg_t keg;
 2860         uint8_t *mem;
 2861         int clearfull;
 2862         int i;
 2863 
 2864         clearfull = 0;
 2865         keg = zone_first_keg(zone);
 2866         KEG_LOCK(keg);
 2867         for (i = 0; i < cnt; i++) {
 2868                 item = bucket[i];
 2869                 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
 2870                         mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
 2871                         if (zone->uz_flags & UMA_ZONE_HASH) {
 2872                                 slab = hash_sfind(&keg->uk_hash, mem);
 2873                         } else {
 2874                                 mem += keg->uk_pgoff;
 2875                                 slab = (uma_slab_t)mem;
 2876                         }
 2877                 } else {
 2878                         slab = vtoslab((vm_offset_t)item);
 2879                         if (slab->us_keg != keg) {
 2880                                 KEG_UNLOCK(keg);
 2881                                 keg = slab->us_keg;
 2882                                 KEG_LOCK(keg);
 2883                         }
 2884                 }
 2885                 slab_free_item(keg, slab, item);
 2886                 if (keg->uk_flags & UMA_ZFLAG_FULL) {
 2887                         if (keg->uk_pages < keg->uk_maxpages) {
 2888                                 keg->uk_flags &= ~UMA_ZFLAG_FULL;
 2889                                 clearfull = 1;
 2890                         }
 2891 
 2892                         /* 
 2893                          * We can handle one more allocation. Since we're
 2894                          * clearing ZFLAG_FULL, wake up all procs blocked
 2895                          * on pages. This should be uncommon, so keeping this
 2896                          * simple for now (rather than adding count of blocked 
 2897                          * threads etc).
 2898                          */
 2899                         wakeup(keg);
 2900                 }
 2901         }
 2902         KEG_UNLOCK(keg);
 2903         if (clearfull) {
 2904                 ZONE_LOCK(zone);
 2905                 zone->uz_flags &= ~UMA_ZFLAG_FULL;
 2906                 wakeup(zone);
 2907                 ZONE_UNLOCK(zone);
 2908         }
 2909 
 2910 }
 2911 
 2912 /*
 2913  * Frees a single item to any zone.
 2914  *
 2915  * Arguments:
 2916  *      zone   The zone to free to
 2917  *      item   The item we're freeing
 2918  *      udata  User supplied data for the dtor
 2919  *      skip   Skip dtors and finis
 2920  */
 2921 static void
 2922 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
 2923 {
 2924 
 2925 #ifdef INVARIANTS
 2926         if (skip == SKIP_NONE) {
 2927                 if (zone->uz_flags & UMA_ZONE_MALLOC)
 2928                         uma_dbg_free(zone, udata, item);
 2929                 else
 2930                         uma_dbg_free(zone, NULL, item);
 2931         }
 2932 #endif
 2933         if (skip < SKIP_DTOR && zone->uz_dtor)
 2934                 zone->uz_dtor(item, zone->uz_size, udata);
 2935 
 2936         if (skip < SKIP_FINI && zone->uz_fini)
 2937                 zone->uz_fini(item, zone->uz_size);
 2938 
 2939         atomic_add_long(&zone->uz_frees, 1);
 2940         zone->uz_release(zone->uz_arg, &item, 1);
 2941 }
 2942 
 2943 /* See uma.h */
 2944 int
 2945 uma_zone_set_max(uma_zone_t zone, int nitems)
 2946 {
 2947         uma_keg_t keg;
 2948 
 2949         keg = zone_first_keg(zone);
 2950         if (keg == NULL)
 2951                 return (0);
 2952         KEG_LOCK(keg);
 2953         keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
 2954         if (keg->uk_maxpages * keg->uk_ipers < nitems)
 2955                 keg->uk_maxpages += keg->uk_ppera;
 2956         nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
 2957         KEG_UNLOCK(keg);
 2958 
 2959         return (nitems);
 2960 }
 2961 
 2962 /* See uma.h */
 2963 int
 2964 uma_zone_get_max(uma_zone_t zone)
 2965 {
 2966         int nitems;
 2967         uma_keg_t keg;
 2968 
 2969         keg = zone_first_keg(zone);
 2970         if (keg == NULL)
 2971                 return (0);
 2972         KEG_LOCK(keg);
 2973         nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
 2974         KEG_UNLOCK(keg);
 2975 
 2976         return (nitems);
 2977 }
 2978 
 2979 /* See uma.h */
 2980 void
 2981 uma_zone_set_warning(uma_zone_t zone, const char *warning)
 2982 {
 2983 
 2984         ZONE_LOCK(zone);
 2985         zone->uz_warning = warning;
 2986         ZONE_UNLOCK(zone);
 2987 }
 2988 
 2989 /* See uma.h */
 2990 int
 2991 uma_zone_get_cur(uma_zone_t zone)
 2992 {
 2993         int64_t nitems;
 2994         u_int i;
 2995 
 2996         ZONE_LOCK(zone);
 2997         nitems = zone->uz_allocs - zone->uz_frees;
 2998         CPU_FOREACH(i) {
 2999                 /*
 3000                  * See the comment in sysctl_vm_zone_stats() regarding the
 3001                  * safety of accessing the per-cpu caches. With the zone lock
 3002                  * held, it is safe, but can potentially result in stale data.
 3003                  */
 3004                 nitems += zone->uz_cpu[i].uc_allocs -
 3005                     zone->uz_cpu[i].uc_frees;
 3006         }
 3007         ZONE_UNLOCK(zone);
 3008 
 3009         return (nitems < 0 ? 0 : nitems);
 3010 }
 3011 
 3012 /* See uma.h */
 3013 void
 3014 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
 3015 {
 3016         uma_keg_t keg;
 3017 
 3018         keg = zone_first_keg(zone);
 3019         KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
 3020         KEG_LOCK(keg);
 3021         KASSERT(keg->uk_pages == 0,
 3022             ("uma_zone_set_init on non-empty keg"));
 3023         keg->uk_init = uminit;
 3024         KEG_UNLOCK(keg);
 3025 }
 3026 
 3027 /* See uma.h */
 3028 void
 3029 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
 3030 {
 3031         uma_keg_t keg;
 3032 
 3033         keg = zone_first_keg(zone);
 3034         KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
 3035         KEG_LOCK(keg);
 3036         KASSERT(keg->uk_pages == 0,
 3037             ("uma_zone_set_fini on non-empty keg"));
 3038         keg->uk_fini = fini;
 3039         KEG_UNLOCK(keg);
 3040 }
 3041 
 3042 /* See uma.h */
 3043 void
 3044 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
 3045 {
 3046 
 3047         ZONE_LOCK(zone);
 3048         KASSERT(zone_first_keg(zone)->uk_pages == 0,
 3049             ("uma_zone_set_zinit on non-empty keg"));
 3050         zone->uz_init = zinit;
 3051         ZONE_UNLOCK(zone);
 3052 }
 3053 
 3054 /* See uma.h */
 3055 void
 3056 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
 3057 {
 3058 
 3059         ZONE_LOCK(zone);
 3060         KASSERT(zone_first_keg(zone)->uk_pages == 0,
 3061             ("uma_zone_set_zfini on non-empty keg"));
 3062         zone->uz_fini = zfini;
 3063         ZONE_UNLOCK(zone);
 3064 }
 3065 
 3066 /* See uma.h */
 3067 /* XXX uk_freef is not actually used with the zone locked */
 3068 void
 3069 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
 3070 {
 3071         uma_keg_t keg;
 3072 
 3073         keg = zone_first_keg(zone);
 3074         KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
 3075         KEG_LOCK(keg);
 3076         keg->uk_freef = freef;
 3077         KEG_UNLOCK(keg);
 3078 }
 3079 
 3080 /* See uma.h */
 3081 /* XXX uk_allocf is not actually used with the zone locked */
 3082 void
 3083 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
 3084 {
 3085         uma_keg_t keg;
 3086 
 3087         keg = zone_first_keg(zone);
 3088         KEG_LOCK(keg);
 3089         keg->uk_allocf = allocf;
 3090         KEG_UNLOCK(keg);
 3091 }
 3092 
 3093 /* See uma.h */
 3094 void
 3095 uma_zone_reserve(uma_zone_t zone, int items)
 3096 {
 3097         uma_keg_t keg;
 3098 
 3099         keg = zone_first_keg(zone);
 3100         if (keg == NULL)
 3101                 return;
 3102         KEG_LOCK(keg);
 3103         keg->uk_reserve = items;
 3104         KEG_UNLOCK(keg);
 3105 
 3106         return;
 3107 }
 3108 
 3109 /* See uma.h */
 3110 int
 3111 uma_zone_reserve_kva(uma_zone_t zone, int count)
 3112 {
 3113         uma_keg_t keg;
 3114         vm_offset_t kva;
 3115         u_int pages;
 3116 
 3117         keg = zone_first_keg(zone);
 3118         if (keg == NULL)
 3119                 return (0);
 3120         pages = count / keg->uk_ipers;
 3121 
 3122         if (pages * keg->uk_ipers < count)
 3123                 pages++;
 3124         pages *= keg->uk_ppera;
 3125 
 3126 #ifdef UMA_MD_SMALL_ALLOC
 3127         if (keg->uk_ppera > 1) {
 3128 #else
 3129         if (1) {
 3130 #endif
 3131                 kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
 3132                 if (kva == 0)
 3133                         return (0);
 3134         } else
 3135                 kva = 0;
 3136         KEG_LOCK(keg);
 3137         keg->uk_kva = kva;
 3138         keg->uk_offset = 0;
 3139         keg->uk_maxpages = pages;
 3140 #ifdef UMA_MD_SMALL_ALLOC
 3141         keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
 3142 #else
 3143         keg->uk_allocf = noobj_alloc;
 3144 #endif
 3145         keg->uk_flags |= UMA_ZONE_NOFREE;
 3146         KEG_UNLOCK(keg);
 3147 
 3148         return (1);
 3149 }
 3150 
 3151 /* See uma.h */
 3152 void
 3153 uma_prealloc(uma_zone_t zone, int items)
 3154 {
 3155         int slabs;
 3156         uma_slab_t slab;
 3157         uma_keg_t keg;
 3158 
 3159         keg = zone_first_keg(zone);
 3160         if (keg == NULL)
 3161                 return;
 3162         KEG_LOCK(keg);
 3163         slabs = items / keg->uk_ipers;
 3164         if (slabs * keg->uk_ipers < items)
 3165                 slabs++;
 3166         while (slabs > 0) {
 3167                 slab = keg_alloc_slab(keg, zone, M_WAITOK);
 3168                 if (slab == NULL)
 3169                         break;
 3170                 MPASS(slab->us_keg == keg);
 3171                 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
 3172                 slabs--;
 3173         }
 3174         KEG_UNLOCK(keg);
 3175 }
 3176 
 3177 /* See uma.h */
 3178 uint32_t *
 3179 uma_find_refcnt(uma_zone_t zone, void *item)
 3180 {
 3181         uma_slabrefcnt_t slabref;
 3182         uma_slab_t slab;
 3183         uma_keg_t keg;
 3184         uint32_t *refcnt;
 3185         int idx;
 3186 
 3187         slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
 3188         slabref = (uma_slabrefcnt_t)slab;
 3189         keg = slab->us_keg;
 3190         KASSERT(keg->uk_flags & UMA_ZONE_REFCNT,
 3191             ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
 3192         idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
 3193         refcnt = &slabref->us_refcnt[idx];
 3194         return refcnt;
 3195 }
 3196 
 3197 /* See uma.h */
 3198 static void
 3199 uma_reclaim_locked(bool kmem_danger)
 3200 {
 3201 
 3202 #ifdef UMA_DEBUG
 3203         printf("UMA: vm asked us to release pages!\n");
 3204 #endif
 3205         sx_assert(&uma_drain_lock, SA_XLOCKED);
 3206         bucket_enable();
 3207         zone_foreach(zone_drain);
 3208         if (vm_page_count_min() || kmem_danger) {
 3209                 cache_drain_safe(NULL);
 3210                 zone_foreach(zone_drain);
 3211         }
 3212         /*
 3213          * Some slabs may have been freed but this zone will be visited early
 3214          * we visit again so that we can free pages that are empty once other
 3215          * zones are drained.  We have to do the same for buckets.
 3216          */
 3217         zone_drain(slabzone);
 3218         zone_drain(slabrefzone);
 3219         bucket_zone_drain();
 3220 }
 3221 
 3222 void
 3223 uma_reclaim(void)
 3224 {
 3225 
 3226         sx_xlock(&uma_drain_lock);
 3227         uma_reclaim_locked(false);
 3228         sx_xunlock(&uma_drain_lock);
 3229 }
 3230 
 3231 static int uma_reclaim_needed;
 3232 
 3233 void
 3234 uma_reclaim_wakeup(void)
 3235 {
 3236 
 3237         uma_reclaim_needed = 1;
 3238         wakeup(&uma_reclaim_needed);
 3239 }
 3240 
 3241 void
 3242 uma_reclaim_worker(void *arg __unused)
 3243 {
 3244 
 3245         sx_xlock(&uma_drain_lock);
 3246         for (;;) {
 3247                 sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM,
 3248                     "umarcl", 0);
 3249                 if (uma_reclaim_needed) {
 3250                         uma_reclaim_needed = 0;
 3251                         sx_xunlock(&uma_drain_lock);
 3252                         EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
 3253                         sx_xlock(&uma_drain_lock);
 3254                         uma_reclaim_locked(true);
 3255                 }
 3256         }
 3257 }
 3258 
 3259 /* See uma.h */
 3260 int
 3261 uma_zone_exhausted(uma_zone_t zone)
 3262 {
 3263         int full;
 3264 
 3265         ZONE_LOCK(zone);
 3266         full = (zone->uz_flags & UMA_ZFLAG_FULL);
 3267         ZONE_UNLOCK(zone);
 3268         return (full);  
 3269 }
 3270 
 3271 int
 3272 uma_zone_exhausted_nolock(uma_zone_t zone)
 3273 {
 3274         return (zone->uz_flags & UMA_ZFLAG_FULL);
 3275 }
 3276 
 3277 void *
 3278 uma_large_malloc(vm_size_t size, int wait)
 3279 {
 3280         void *mem;
 3281         uma_slab_t slab;
 3282         uint8_t flags;
 3283 
 3284         slab = zone_alloc_item(slabzone, NULL, wait);
 3285         if (slab == NULL)
 3286                 return (NULL);
 3287         mem = page_alloc(NULL, size, &flags, wait);
 3288         if (mem) {
 3289                 vsetslab((vm_offset_t)mem, slab);
 3290                 slab->us_data = mem;
 3291                 slab->us_flags = flags | UMA_SLAB_MALLOC;
 3292                 slab->us_size = size;
 3293         } else {
 3294                 zone_free_item(slabzone, slab, NULL, SKIP_NONE);
 3295         }
 3296 
 3297         return (mem);
 3298 }
 3299 
 3300 void
 3301 uma_large_free(uma_slab_t slab)
 3302 {
 3303 
 3304         page_free(slab->us_data, slab->us_size, slab->us_flags);
 3305         zone_free_item(slabzone, slab, NULL, SKIP_NONE);
 3306 }
 3307 
 3308 static void
 3309 uma_zero_item(void *item, uma_zone_t zone)
 3310 {
 3311 
 3312         if (zone->uz_flags & UMA_ZONE_PCPU) {
 3313                 for (int i = 0; i < mp_ncpus; i++)
 3314                         bzero(zpcpu_get_cpu(item, i), zone->uz_size);
 3315         } else
 3316                 bzero(item, zone->uz_size);
 3317 }
 3318 
 3319 void
 3320 uma_print_stats(void)
 3321 {
 3322         zone_foreach(uma_print_zone);
 3323 }
 3324 
 3325 static void
 3326 slab_print(uma_slab_t slab)
 3327 {
 3328         printf("slab: keg %p, data %p, freecount %d\n",
 3329                 slab->us_keg, slab->us_data, slab->us_freecount);
 3330 }
 3331 
 3332 static void
 3333 cache_print(uma_cache_t cache)
 3334 {
 3335         printf("alloc: %p(%d), free: %p(%d)\n",
 3336                 cache->uc_allocbucket,
 3337                 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
 3338                 cache->uc_freebucket,
 3339                 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
 3340 }
 3341 
 3342 static void
 3343 uma_print_keg(uma_keg_t keg)
 3344 {
 3345         uma_slab_t slab;
 3346 
 3347         printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
 3348             "out %d free %d limit %d\n",
 3349             keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
 3350             keg->uk_ipers, keg->uk_ppera,
 3351             (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
 3352             keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
 3353         printf("Part slabs:\n");
 3354         LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
 3355                 slab_print(slab);
 3356         printf("Free slabs:\n");
 3357         LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
 3358                 slab_print(slab);
 3359         printf("Full slabs:\n");
 3360         LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
 3361                 slab_print(slab);
 3362 }
 3363 
 3364 void
 3365 uma_print_zone(uma_zone_t zone)
 3366 {
 3367         uma_cache_t cache;
 3368         uma_klink_t kl;
 3369         int i;
 3370 
 3371         printf("zone: %s(%p) size %d flags %#x\n",
 3372             zone->uz_name, zone, zone->uz_size, zone->uz_flags);
 3373         LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
 3374                 uma_print_keg(kl->kl_keg);
 3375         CPU_FOREACH(i) {
 3376                 cache = &zone->uz_cpu[i];
 3377                 printf("CPU %d Cache:\n", i);
 3378                 cache_print(cache);
 3379         }
 3380 }
 3381 
 3382 #ifdef DDB
 3383 /*
 3384  * Generate statistics across both the zone and its per-cpu cache's.  Return
 3385  * desired statistics if the pointer is non-NULL for that statistic.
 3386  *
 3387  * Note: does not update the zone statistics, as it can't safely clear the
 3388  * per-CPU cache statistic.
 3389  *
 3390  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
 3391  * safe from off-CPU; we should modify the caches to track this information
 3392  * directly so that we don't have to.
 3393  */
 3394 static void
 3395 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
 3396     uint64_t *freesp, uint64_t *sleepsp)
 3397 {
 3398         uma_cache_t cache;
 3399         uint64_t allocs, frees, sleeps;
 3400         int cachefree, cpu;
 3401 
 3402         allocs = frees = sleeps = 0;
 3403         cachefree = 0;
 3404         CPU_FOREACH(cpu) {
 3405                 cache = &z->uz_cpu[cpu];
 3406                 if (cache->uc_allocbucket != NULL)
 3407                         cachefree += cache->uc_allocbucket->ub_cnt;
 3408                 if (cache->uc_freebucket != NULL)
 3409                         cachefree += cache->uc_freebucket->ub_cnt;
 3410                 allocs += cache->uc_allocs;
 3411                 frees += cache->uc_frees;
 3412         }
 3413         allocs += z->uz_allocs;
 3414         frees += z->uz_frees;
 3415         sleeps += z->uz_sleeps;
 3416         if (cachefreep != NULL)
 3417                 *cachefreep = cachefree;
 3418         if (allocsp != NULL)
 3419                 *allocsp = allocs;
 3420         if (freesp != NULL)
 3421                 *freesp = frees;
 3422         if (sleepsp != NULL)
 3423                 *sleepsp = sleeps;
 3424 }
 3425 #endif /* DDB */
 3426 
 3427 static int
 3428 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
 3429 {
 3430         uma_keg_t kz;
 3431         uma_zone_t z;
 3432         int count;
 3433 
 3434         count = 0;
 3435         rw_rlock(&uma_rwlock);
 3436         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3437                 LIST_FOREACH(z, &kz->uk_zones, uz_link)
 3438                         count++;
 3439         }
 3440         rw_runlock(&uma_rwlock);
 3441         return (sysctl_handle_int(oidp, &count, 0, req));
 3442 }
 3443 
 3444 static int
 3445 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
 3446 {
 3447         struct uma_stream_header ush;
 3448         struct uma_type_header uth;
 3449         struct uma_percpu_stat ups;
 3450         uma_bucket_t bucket;
 3451         struct sbuf sbuf;
 3452         uma_cache_t cache;
 3453         uma_klink_t kl;
 3454         uma_keg_t kz;
 3455         uma_zone_t z;
 3456         uma_keg_t k;
 3457         int count, error, i;
 3458 
 3459         error = sysctl_wire_old_buffer(req, 0);
 3460         if (error != 0)
 3461                 return (error);
 3462         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
 3463 
 3464         count = 0;
 3465         rw_rlock(&uma_rwlock);
 3466         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3467                 LIST_FOREACH(z, &kz->uk_zones, uz_link)
 3468                         count++;
 3469         }
 3470 
 3471         /*
 3472          * Insert stream header.
 3473          */
 3474         bzero(&ush, sizeof(ush));
 3475         ush.ush_version = UMA_STREAM_VERSION;
 3476         ush.ush_maxcpus = (mp_maxid + 1);
 3477         ush.ush_count = count;
 3478         (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
 3479 
 3480         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3481                 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
 3482                         bzero(&uth, sizeof(uth));
 3483                         ZONE_LOCK(z);
 3484                         strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
 3485                         uth.uth_align = kz->uk_align;
 3486                         uth.uth_size = kz->uk_size;
 3487                         uth.uth_rsize = kz->uk_rsize;
 3488                         LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
 3489                                 k = kl->kl_keg;
 3490                                 uth.uth_maxpages += k->uk_maxpages;
 3491                                 uth.uth_pages += k->uk_pages;
 3492                                 uth.uth_keg_free += k->uk_free;
 3493                                 uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
 3494                                     * k->uk_ipers;
 3495                         }
 3496 
 3497                         /*
 3498                          * A zone is secondary is it is not the first entry
 3499                          * on the keg's zone list.
 3500                          */
 3501                         if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
 3502                             (LIST_FIRST(&kz->uk_zones) != z))
 3503                                 uth.uth_zone_flags = UTH_ZONE_SECONDARY;
 3504 
 3505                         LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
 3506                                 uth.uth_zone_free += bucket->ub_cnt;
 3507                         uth.uth_allocs = z->uz_allocs;
 3508                         uth.uth_frees = z->uz_frees;
 3509                         uth.uth_fails = z->uz_fails;
 3510                         uth.uth_sleeps = z->uz_sleeps;
 3511                         (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
 3512                         /*
 3513                          * While it is not normally safe to access the cache
 3514                          * bucket pointers while not on the CPU that owns the
 3515                          * cache, we only allow the pointers to be exchanged
 3516                          * without the zone lock held, not invalidated, so
 3517                          * accept the possible race associated with bucket
 3518                          * exchange during monitoring.
 3519                          */
 3520                         for (i = 0; i < (mp_maxid + 1); i++) {
 3521                                 bzero(&ups, sizeof(ups));
 3522                                 if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
 3523                                         goto skip;
 3524                                 if (CPU_ABSENT(i))
 3525                                         goto skip;
 3526                                 cache = &z->uz_cpu[i];
 3527                                 if (cache->uc_allocbucket != NULL)
 3528                                         ups.ups_cache_free +=
 3529                                             cache->uc_allocbucket->ub_cnt;
 3530                                 if (cache->uc_freebucket != NULL)
 3531                                         ups.ups_cache_free +=
 3532                                             cache->uc_freebucket->ub_cnt;
 3533                                 ups.ups_allocs = cache->uc_allocs;
 3534                                 ups.ups_frees = cache->uc_frees;
 3535 skip:
 3536                                 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
 3537                         }
 3538                         ZONE_UNLOCK(z);
 3539                 }
 3540         }
 3541         rw_runlock(&uma_rwlock);
 3542         error = sbuf_finish(&sbuf);
 3543         sbuf_delete(&sbuf);
 3544         return (error);
 3545 }
 3546 
 3547 int
 3548 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
 3549 {
 3550         uma_zone_t zone = *(uma_zone_t *)arg1;
 3551         int error, max, old;
 3552 
 3553         old = max = uma_zone_get_max(zone);
 3554         error = sysctl_handle_int(oidp, &max, 0, req);
 3555         if (error || !req->newptr)
 3556                 return (error);
 3557 
 3558         if (max < old)
 3559                 return (EINVAL);
 3560 
 3561         uma_zone_set_max(zone, max);
 3562 
 3563         return (0);
 3564 }
 3565 
 3566 int
 3567 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
 3568 {
 3569         uma_zone_t zone = *(uma_zone_t *)arg1;
 3570         int cur;
 3571 
 3572         cur = uma_zone_get_cur(zone);
 3573         return (sysctl_handle_int(oidp, &cur, 0, req));
 3574 }
 3575 
 3576 #ifdef DDB
 3577 DB_SHOW_COMMAND(uma, db_show_uma)
 3578 {
 3579         uint64_t allocs, frees, sleeps;
 3580         uma_bucket_t bucket;
 3581         uma_keg_t kz;
 3582         uma_zone_t z;
 3583         int cachefree;
 3584 
 3585         db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
 3586             "Free", "Requests", "Sleeps", "Bucket");
 3587         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3588                 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
 3589                         if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
 3590                                 allocs = z->uz_allocs;
 3591                                 frees = z->uz_frees;
 3592                                 sleeps = z->uz_sleeps;
 3593                                 cachefree = 0;
 3594                         } else
 3595                                 uma_zone_sumstat(z, &cachefree, &allocs,
 3596                                     &frees, &sleeps);
 3597                         if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
 3598                             (LIST_FIRST(&kz->uk_zones) != z)))
 3599                                 cachefree += kz->uk_free;
 3600                         LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
 3601                                 cachefree += bucket->ub_cnt;
 3602                         db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
 3603                             z->uz_name, (uintmax_t)kz->uk_size,
 3604                             (intmax_t)(allocs - frees), cachefree,
 3605                             (uintmax_t)allocs, sleeps, z->uz_count);
 3606                         if (db_pager_quit)
 3607                                 return;
 3608                 }
 3609         }
 3610 }
 3611 
 3612 DB_SHOW_COMMAND(umacache, db_show_umacache)
 3613 {
 3614         uint64_t allocs, frees;
 3615         uma_bucket_t bucket;
 3616         uma_zone_t z;
 3617         int cachefree;
 3618 
 3619         db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
 3620             "Requests", "Bucket");
 3621         LIST_FOREACH(z, &uma_cachezones, uz_link) {
 3622                 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
 3623                 LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
 3624                         cachefree += bucket->ub_cnt;
 3625                 db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
 3626                     z->uz_name, (uintmax_t)z->uz_size,
 3627                     (intmax_t)(allocs - frees), cachefree,
 3628                     (uintmax_t)allocs, z->uz_count);
 3629                 if (db_pager_quit)
 3630                         return;
 3631         }
 3632 }
 3633 #endif

Cache object: 117f6aff5690b36447d4b45b1f495ee8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.