The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/uma_core.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
    5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
    6  * Copyright (c) 2004-2006 Robert N. M. Watson
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice unmodified, this list of conditions, and the following
   14  *    disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 
   31 /*
   32  * uma_core.c  Implementation of the Universal Memory allocator
   33  *
   34  * This allocator is intended to replace the multitude of similar object caches
   35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
   36  * efficient.  A primary design goal is to return unused memory to the rest of
   37  * the system.  This will make the system as a whole more flexible due to the
   38  * ability to move memory to subsystems which most need it instead of leaving
   39  * pools of reserved memory unused.
   40  *
   41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
   42  * are well known.
   43  *
   44  */
   45 
   46 /*
   47  * TODO:
   48  *      - Improve memory usage for large allocations
   49  *      - Investigate cache size adjustments
   50  */
   51 
   52 #include <sys/cdefs.h>
   53 __FBSDID("$FreeBSD: releng/12.0/sys/vm/uma_core.c 340401 2018-11-13 18:21:47Z markj $");
   54 
   55 #include "opt_ddb.h"
   56 #include "opt_param.h"
   57 #include "opt_vm.h"
   58 
   59 #include <sys/param.h>
   60 #include <sys/systm.h>
   61 #include <sys/bitset.h>
   62 #include <sys/domainset.h>
   63 #include <sys/eventhandler.h>
   64 #include <sys/kernel.h>
   65 #include <sys/types.h>
   66 #include <sys/limits.h>
   67 #include <sys/queue.h>
   68 #include <sys/malloc.h>
   69 #include <sys/ktr.h>
   70 #include <sys/lock.h>
   71 #include <sys/sysctl.h>
   72 #include <sys/mutex.h>
   73 #include <sys/proc.h>
   74 #include <sys/random.h>
   75 #include <sys/rwlock.h>
   76 #include <sys/sbuf.h>
   77 #include <sys/sched.h>
   78 #include <sys/smp.h>
   79 #include <sys/taskqueue.h>
   80 #include <sys/vmmeter.h>
   81 
   82 #include <vm/vm.h>
   83 #include <vm/vm_domainset.h>
   84 #include <vm/vm_object.h>
   85 #include <vm/vm_page.h>
   86 #include <vm/vm_pageout.h>
   87 #include <vm/vm_param.h>
   88 #include <vm/vm_phys.h>
   89 #include <vm/vm_pagequeue.h>
   90 #include <vm/vm_map.h>
   91 #include <vm/vm_kern.h>
   92 #include <vm/vm_extern.h>
   93 #include <vm/uma.h>
   94 #include <vm/uma_int.h>
   95 #include <vm/uma_dbg.h>
   96 
   97 #include <ddb/ddb.h>
   98 
   99 #ifdef DEBUG_MEMGUARD
  100 #include <vm/memguard.h>
  101 #endif
  102 
  103 /*
  104  * This is the zone and keg from which all zones are spawned.
  105  */
  106 static uma_zone_t kegs;
  107 static uma_zone_t zones;
  108 
  109 /* This is the zone from which all offpage uma_slab_ts are allocated. */
  110 static uma_zone_t slabzone;
  111 
  112 /*
  113  * The initial hash tables come out of this zone so they can be allocated
  114  * prior to malloc coming up.
  115  */
  116 static uma_zone_t hashzone;
  117 
  118 /* The boot-time adjusted value for cache line alignment. */
  119 int uma_align_cache = 64 - 1;
  120 
  121 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
  122 
  123 /*
  124  * Are we allowed to allocate buckets?
  125  */
  126 static int bucketdisable = 1;
  127 
  128 /* Linked list of all kegs in the system */
  129 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
  130 
  131 /* Linked list of all cache-only zones in the system */
  132 static LIST_HEAD(,uma_zone) uma_cachezones =
  133     LIST_HEAD_INITIALIZER(uma_cachezones);
  134 
  135 /* This RW lock protects the keg list */
  136 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
  137 
  138 /*
  139  * Pointer and counter to pool of pages, that is preallocated at
  140  * startup to bootstrap UMA.
  141  */
  142 static char *bootmem;
  143 static int boot_pages;
  144 
  145 static struct sx uma_drain_lock;
  146 
  147 /* kmem soft limit. */
  148 static unsigned long uma_kmem_limit = LONG_MAX;
  149 static volatile unsigned long uma_kmem_total;
  150 
  151 /* Is the VM done starting up? */
  152 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
  153     BOOT_RUNNING } booted = BOOT_COLD;
  154 
  155 /*
  156  * This is the handle used to schedule events that need to happen
  157  * outside of the allocation fast path.
  158  */
  159 static struct callout uma_callout;
  160 #define UMA_TIMEOUT     20              /* Seconds for callout interval. */
  161 
  162 /*
  163  * This structure is passed as the zone ctor arg so that I don't have to create
  164  * a special allocation function just for zones.
  165  */
  166 struct uma_zctor_args {
  167         const char *name;
  168         size_t size;
  169         uma_ctor ctor;
  170         uma_dtor dtor;
  171         uma_init uminit;
  172         uma_fini fini;
  173         uma_import import;
  174         uma_release release;
  175         void *arg;
  176         uma_keg_t keg;
  177         int align;
  178         uint32_t flags;
  179 };
  180 
  181 struct uma_kctor_args {
  182         uma_zone_t zone;
  183         size_t size;
  184         uma_init uminit;
  185         uma_fini fini;
  186         int align;
  187         uint32_t flags;
  188 };
  189 
  190 struct uma_bucket_zone {
  191         uma_zone_t      ubz_zone;
  192         char            *ubz_name;
  193         int             ubz_entries;    /* Number of items it can hold. */
  194         int             ubz_maxsize;    /* Maximum allocation size per-item. */
  195 };
  196 
  197 /*
  198  * Compute the actual number of bucket entries to pack them in power
  199  * of two sizes for more efficient space utilization.
  200  */
  201 #define BUCKET_SIZE(n)                                          \
  202     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
  203 
  204 #define BUCKET_MAX      BUCKET_SIZE(256)
  205 
  206 struct uma_bucket_zone bucket_zones[] = {
  207         { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
  208         { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
  209         { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
  210         { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
  211         { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
  212         { NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
  213         { NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
  214         { NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
  215         { NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
  216         { NULL, NULL, 0}
  217 };
  218 
  219 /*
  220  * Flags and enumerations to be passed to internal functions.
  221  */
  222 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
  223 
  224 #define UMA_ANYDOMAIN   -1      /* Special value for domain search. */
  225 
  226 /* Prototypes.. */
  227 
  228 int     uma_startup_count(int);
  229 void    uma_startup(void *, int);
  230 void    uma_startup1(void);
  231 void    uma_startup2(void);
  232 
  233 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
  234 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
  235 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
  236 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
  237 static void page_free(void *, vm_size_t, uint8_t);
  238 static void pcpu_page_free(void *, vm_size_t, uint8_t);
  239 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int);
  240 static void cache_drain(uma_zone_t);
  241 static void bucket_drain(uma_zone_t, uma_bucket_t);
  242 static void bucket_cache_drain(uma_zone_t zone);
  243 static int keg_ctor(void *, int, void *, int);
  244 static void keg_dtor(void *, int, void *);
  245 static int zone_ctor(void *, int, void *, int);
  246 static void zone_dtor(void *, int, void *);
  247 static int zero_init(void *, int, int);
  248 static void keg_small_init(uma_keg_t keg);
  249 static void keg_large_init(uma_keg_t keg);
  250 static void zone_foreach(void (*zfunc)(uma_zone_t));
  251 static void zone_timeout(uma_zone_t zone);
  252 static int hash_alloc(struct uma_hash *);
  253 static int hash_expand(struct uma_hash *, struct uma_hash *);
  254 static void hash_free(struct uma_hash *hash);
  255 static void uma_timeout(void *);
  256 static void uma_startup3(void);
  257 static void *zone_alloc_item(uma_zone_t, void *, int, int);
  258 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
  259 static void bucket_enable(void);
  260 static void bucket_init(void);
  261 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
  262 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
  263 static void bucket_zone_drain(void);
  264 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
  265 static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
  266 static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int);
  267 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
  268 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
  269 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
  270     uma_fini fini, int align, uint32_t flags);
  271 static int zone_import(uma_zone_t, void **, int, int, int);
  272 static void zone_release(uma_zone_t, void **, int);
  273 static void uma_zero_item(void *, uma_zone_t);
  274 
  275 void uma_print_zone(uma_zone_t);
  276 void uma_print_stats(void);
  277 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
  278 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
  279 
  280 #ifdef INVARIANTS
  281 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
  282 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
  283 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
  284 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
  285 
  286 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
  287     "Memory allocation debugging");
  288 
  289 static u_int dbg_divisor = 1;
  290 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
  291     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
  292     "Debug & thrash every this item in memory allocator");
  293 
  294 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
  295 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
  296 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
  297     &uma_dbg_cnt, "memory items debugged");
  298 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
  299     &uma_skip_cnt, "memory items skipped, not debugged");
  300 #endif
  301 
  302 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
  303 
  304 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
  305     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
  306 
  307 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
  308     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
  309 
  310 static int zone_warnings = 1;
  311 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
  312     "Warn when UMA zones becomes full");
  313 
  314 /* Adjust bytes under management by UMA. */
  315 static inline void
  316 uma_total_dec(unsigned long size)
  317 {
  318 
  319         atomic_subtract_long(&uma_kmem_total, size);
  320 }
  321 
  322 static inline void
  323 uma_total_inc(unsigned long size)
  324 {
  325 
  326         if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
  327                 uma_reclaim_wakeup();
  328 }
  329 
  330 /*
  331  * This routine checks to see whether or not it's safe to enable buckets.
  332  */
  333 static void
  334 bucket_enable(void)
  335 {
  336         bucketdisable = vm_page_count_min();
  337 }
  338 
  339 /*
  340  * Initialize bucket_zones, the array of zones of buckets of various sizes.
  341  *
  342  * For each zone, calculate the memory required for each bucket, consisting
  343  * of the header and an array of pointers.
  344  */
  345 static void
  346 bucket_init(void)
  347 {
  348         struct uma_bucket_zone *ubz;
  349         int size;
  350 
  351         for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
  352                 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
  353                 size += sizeof(void *) * ubz->ubz_entries;
  354                 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
  355                     NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
  356                     UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
  357         }
  358 }
  359 
  360 /*
  361  * Given a desired number of entries for a bucket, return the zone from which
  362  * to allocate the bucket.
  363  */
  364 static struct uma_bucket_zone *
  365 bucket_zone_lookup(int entries)
  366 {
  367         struct uma_bucket_zone *ubz;
  368 
  369         for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
  370                 if (ubz->ubz_entries >= entries)
  371                         return (ubz);
  372         ubz--;
  373         return (ubz);
  374 }
  375 
  376 static int
  377 bucket_select(int size)
  378 {
  379         struct uma_bucket_zone *ubz;
  380 
  381         ubz = &bucket_zones[0];
  382         if (size > ubz->ubz_maxsize)
  383                 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
  384 
  385         for (; ubz->ubz_entries != 0; ubz++)
  386                 if (ubz->ubz_maxsize < size)
  387                         break;
  388         ubz--;
  389         return (ubz->ubz_entries);
  390 }
  391 
  392 static uma_bucket_t
  393 bucket_alloc(uma_zone_t zone, void *udata, int flags)
  394 {
  395         struct uma_bucket_zone *ubz;
  396         uma_bucket_t bucket;
  397 
  398         /*
  399          * This is to stop us from allocating per cpu buckets while we're
  400          * running out of vm.boot_pages.  Otherwise, we would exhaust the
  401          * boot pages.  This also prevents us from allocating buckets in
  402          * low memory situations.
  403          */
  404         if (bucketdisable)
  405                 return (NULL);
  406         /*
  407          * To limit bucket recursion we store the original zone flags
  408          * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
  409          * NOVM flag to persist even through deep recursions.  We also
  410          * store ZFLAG_BUCKET once we have recursed attempting to allocate
  411          * a bucket for a bucket zone so we do not allow infinite bucket
  412          * recursion.  This cookie will even persist to frees of unused
  413          * buckets via the allocation path or bucket allocations in the
  414          * free path.
  415          */
  416         if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
  417                 udata = (void *)(uintptr_t)zone->uz_flags;
  418         else {
  419                 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
  420                         return (NULL);
  421                 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
  422         }
  423         if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
  424                 flags |= M_NOVM;
  425         ubz = bucket_zone_lookup(zone->uz_count);
  426         if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
  427                 ubz++;
  428         bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
  429         if (bucket) {
  430 #ifdef INVARIANTS
  431                 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
  432 #endif
  433                 bucket->ub_cnt = 0;
  434                 bucket->ub_entries = ubz->ubz_entries;
  435         }
  436 
  437         return (bucket);
  438 }
  439 
  440 static void
  441 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
  442 {
  443         struct uma_bucket_zone *ubz;
  444 
  445         KASSERT(bucket->ub_cnt == 0,
  446             ("bucket_free: Freeing a non free bucket."));
  447         if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
  448                 udata = (void *)(uintptr_t)zone->uz_flags;
  449         ubz = bucket_zone_lookup(bucket->ub_entries);
  450         uma_zfree_arg(ubz->ubz_zone, bucket, udata);
  451 }
  452 
  453 static void
  454 bucket_zone_drain(void)
  455 {
  456         struct uma_bucket_zone *ubz;
  457 
  458         for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
  459                 zone_drain(ubz->ubz_zone);
  460 }
  461 
  462 static void
  463 zone_log_warning(uma_zone_t zone)
  464 {
  465         static const struct timeval warninterval = { 300, 0 };
  466 
  467         if (!zone_warnings || zone->uz_warning == NULL)
  468                 return;
  469 
  470         if (ratecheck(&zone->uz_ratecheck, &warninterval))
  471                 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
  472 }
  473 
  474 static inline void
  475 zone_maxaction(uma_zone_t zone)
  476 {
  477 
  478         if (zone->uz_maxaction.ta_func != NULL)
  479                 taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
  480 }
  481 
  482 static void
  483 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
  484 {
  485         uma_klink_t klink;
  486 
  487         LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
  488                 kegfn(klink->kl_keg);
  489 }
  490 
  491 /*
  492  * Routine called by timeout which is used to fire off some time interval
  493  * based calculations.  (stats, hash size, etc.)
  494  *
  495  * Arguments:
  496  *      arg   Unused
  497  *
  498  * Returns:
  499  *      Nothing
  500  */
  501 static void
  502 uma_timeout(void *unused)
  503 {
  504         bucket_enable();
  505         zone_foreach(zone_timeout);
  506 
  507         /* Reschedule this event */
  508         callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
  509 }
  510 
  511 /*
  512  * Routine to perform timeout driven calculations.  This expands the
  513  * hashes and does per cpu statistics aggregation.
  514  *
  515  *  Returns nothing.
  516  */
  517 static void
  518 keg_timeout(uma_keg_t keg)
  519 {
  520 
  521         KEG_LOCK(keg);
  522         /*
  523          * Expand the keg hash table.
  524          *
  525          * This is done if the number of slabs is larger than the hash size.
  526          * What I'm trying to do here is completely reduce collisions.  This
  527          * may be a little aggressive.  Should I allow for two collisions max?
  528          */
  529         if (keg->uk_flags & UMA_ZONE_HASH &&
  530             keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
  531                 struct uma_hash newhash;
  532                 struct uma_hash oldhash;
  533                 int ret;
  534 
  535                 /*
  536                  * This is so involved because allocating and freeing
  537                  * while the keg lock is held will lead to deadlock.
  538                  * I have to do everything in stages and check for
  539                  * races.
  540                  */
  541                 newhash = keg->uk_hash;
  542                 KEG_UNLOCK(keg);
  543                 ret = hash_alloc(&newhash);
  544                 KEG_LOCK(keg);
  545                 if (ret) {
  546                         if (hash_expand(&keg->uk_hash, &newhash)) {
  547                                 oldhash = keg->uk_hash;
  548                                 keg->uk_hash = newhash;
  549                         } else
  550                                 oldhash = newhash;
  551 
  552                         KEG_UNLOCK(keg);
  553                         hash_free(&oldhash);
  554                         return;
  555                 }
  556         }
  557         KEG_UNLOCK(keg);
  558 }
  559 
  560 static void
  561 zone_timeout(uma_zone_t zone)
  562 {
  563 
  564         zone_foreach_keg(zone, &keg_timeout);
  565 }
  566 
  567 /*
  568  * Allocate and zero fill the next sized hash table from the appropriate
  569  * backing store.
  570  *
  571  * Arguments:
  572  *      hash  A new hash structure with the old hash size in uh_hashsize
  573  *
  574  * Returns:
  575  *      1 on success and 0 on failure.
  576  */
  577 static int
  578 hash_alloc(struct uma_hash *hash)
  579 {
  580         int oldsize;
  581         int alloc;
  582 
  583         oldsize = hash->uh_hashsize;
  584 
  585         /* We're just going to go to a power of two greater */
  586         if (oldsize)  {
  587                 hash->uh_hashsize = oldsize * 2;
  588                 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
  589                 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
  590                     M_UMAHASH, M_NOWAIT);
  591         } else {
  592                 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
  593                 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
  594                     UMA_ANYDOMAIN, M_WAITOK);
  595                 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
  596         }
  597         if (hash->uh_slab_hash) {
  598                 bzero(hash->uh_slab_hash, alloc);
  599                 hash->uh_hashmask = hash->uh_hashsize - 1;
  600                 return (1);
  601         }
  602 
  603         return (0);
  604 }
  605 
  606 /*
  607  * Expands the hash table for HASH zones.  This is done from zone_timeout
  608  * to reduce collisions.  This must not be done in the regular allocation
  609  * path, otherwise, we can recurse on the vm while allocating pages.
  610  *
  611  * Arguments:
  612  *      oldhash  The hash you want to expand
  613  *      newhash  The hash structure for the new table
  614  *
  615  * Returns:
  616  *      Nothing
  617  *
  618  * Discussion:
  619  */
  620 static int
  621 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
  622 {
  623         uma_slab_t slab;
  624         int hval;
  625         int i;
  626 
  627         if (!newhash->uh_slab_hash)
  628                 return (0);
  629 
  630         if (oldhash->uh_hashsize >= newhash->uh_hashsize)
  631                 return (0);
  632 
  633         /*
  634          * I need to investigate hash algorithms for resizing without a
  635          * full rehash.
  636          */
  637 
  638         for (i = 0; i < oldhash->uh_hashsize; i++)
  639                 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
  640                         slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
  641                         SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
  642                         hval = UMA_HASH(newhash, slab->us_data);
  643                         SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
  644                             slab, us_hlink);
  645                 }
  646 
  647         return (1);
  648 }
  649 
  650 /*
  651  * Free the hash bucket to the appropriate backing store.
  652  *
  653  * Arguments:
  654  *      slab_hash  The hash bucket we're freeing
  655  *      hashsize   The number of entries in that hash bucket
  656  *
  657  * Returns:
  658  *      Nothing
  659  */
  660 static void
  661 hash_free(struct uma_hash *hash)
  662 {
  663         if (hash->uh_slab_hash == NULL)
  664                 return;
  665         if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
  666                 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
  667         else
  668                 free(hash->uh_slab_hash, M_UMAHASH);
  669 }
  670 
  671 /*
  672  * Frees all outstanding items in a bucket
  673  *
  674  * Arguments:
  675  *      zone   The zone to free to, must be unlocked.
  676  *      bucket The free/alloc bucket with items, cpu queue must be locked.
  677  *
  678  * Returns:
  679  *      Nothing
  680  */
  681 
  682 static void
  683 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
  684 {
  685         int i;
  686 
  687         if (bucket == NULL)
  688                 return;
  689 
  690         if (zone->uz_fini)
  691                 for (i = 0; i < bucket->ub_cnt; i++) 
  692                         zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
  693         zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
  694         bucket->ub_cnt = 0;
  695 }
  696 
  697 /*
  698  * Drains the per cpu caches for a zone.
  699  *
  700  * NOTE: This may only be called while the zone is being turn down, and not
  701  * during normal operation.  This is necessary in order that we do not have
  702  * to migrate CPUs to drain the per-CPU caches.
  703  *
  704  * Arguments:
  705  *      zone     The zone to drain, must be unlocked.
  706  *
  707  * Returns:
  708  *      Nothing
  709  */
  710 static void
  711 cache_drain(uma_zone_t zone)
  712 {
  713         uma_cache_t cache;
  714         int cpu;
  715 
  716         /*
  717          * XXX: It is safe to not lock the per-CPU caches, because we're
  718          * tearing down the zone anyway.  I.e., there will be no further use
  719          * of the caches at this point.
  720          *
  721          * XXX: It would good to be able to assert that the zone is being
  722          * torn down to prevent improper use of cache_drain().
  723          *
  724          * XXX: We lock the zone before passing into bucket_cache_drain() as
  725          * it is used elsewhere.  Should the tear-down path be made special
  726          * there in some form?
  727          */
  728         CPU_FOREACH(cpu) {
  729                 cache = &zone->uz_cpu[cpu];
  730                 bucket_drain(zone, cache->uc_allocbucket);
  731                 bucket_drain(zone, cache->uc_freebucket);
  732                 if (cache->uc_allocbucket != NULL)
  733                         bucket_free(zone, cache->uc_allocbucket, NULL);
  734                 if (cache->uc_freebucket != NULL)
  735                         bucket_free(zone, cache->uc_freebucket, NULL);
  736                 cache->uc_allocbucket = cache->uc_freebucket = NULL;
  737         }
  738         ZONE_LOCK(zone);
  739         bucket_cache_drain(zone);
  740         ZONE_UNLOCK(zone);
  741 }
  742 
  743 static void
  744 cache_shrink(uma_zone_t zone)
  745 {
  746 
  747         if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
  748                 return;
  749 
  750         ZONE_LOCK(zone);
  751         zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
  752         ZONE_UNLOCK(zone);
  753 }
  754 
  755 static void
  756 cache_drain_safe_cpu(uma_zone_t zone)
  757 {
  758         uma_cache_t cache;
  759         uma_bucket_t b1, b2;
  760         int domain;
  761 
  762         if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
  763                 return;
  764 
  765         b1 = b2 = NULL;
  766         ZONE_LOCK(zone);
  767         critical_enter();
  768         if (zone->uz_flags & UMA_ZONE_NUMA)
  769                 domain = PCPU_GET(domain);
  770         else
  771                 domain = 0;
  772         cache = &zone->uz_cpu[curcpu];
  773         if (cache->uc_allocbucket) {
  774                 if (cache->uc_allocbucket->ub_cnt != 0)
  775                         LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets,
  776                             cache->uc_allocbucket, ub_link);
  777                 else
  778                         b1 = cache->uc_allocbucket;
  779                 cache->uc_allocbucket = NULL;
  780         }
  781         if (cache->uc_freebucket) {
  782                 if (cache->uc_freebucket->ub_cnt != 0)
  783                         LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets,
  784                             cache->uc_freebucket, ub_link);
  785                 else
  786                         b2 = cache->uc_freebucket;
  787                 cache->uc_freebucket = NULL;
  788         }
  789         critical_exit();
  790         ZONE_UNLOCK(zone);
  791         if (b1)
  792                 bucket_free(zone, b1, NULL);
  793         if (b2)
  794                 bucket_free(zone, b2, NULL);
  795 }
  796 
  797 /*
  798  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
  799  * This is an expensive call because it needs to bind to all CPUs
  800  * one by one and enter a critical section on each of them in order
  801  * to safely access their cache buckets.
  802  * Zone lock must not be held on call this function.
  803  */
  804 static void
  805 cache_drain_safe(uma_zone_t zone)
  806 {
  807         int cpu;
  808 
  809         /*
  810          * Polite bucket sizes shrinking was not enouth, shrink aggressively.
  811          */
  812         if (zone)
  813                 cache_shrink(zone);
  814         else
  815                 zone_foreach(cache_shrink);
  816 
  817         CPU_FOREACH(cpu) {
  818                 thread_lock(curthread);
  819                 sched_bind(curthread, cpu);
  820                 thread_unlock(curthread);
  821 
  822                 if (zone)
  823                         cache_drain_safe_cpu(zone);
  824                 else
  825                         zone_foreach(cache_drain_safe_cpu);
  826         }
  827         thread_lock(curthread);
  828         sched_unbind(curthread);
  829         thread_unlock(curthread);
  830 }
  831 
  832 /*
  833  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
  834  */
  835 static void
  836 bucket_cache_drain(uma_zone_t zone)
  837 {
  838         uma_zone_domain_t zdom;
  839         uma_bucket_t bucket;
  840         int i;
  841 
  842         /*
  843          * Drain the bucket queues and free the buckets.
  844          */
  845         for (i = 0; i < vm_ndomains; i++) {
  846                 zdom = &zone->uz_domain[i];
  847                 while ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
  848                         LIST_REMOVE(bucket, ub_link);
  849                         ZONE_UNLOCK(zone);
  850                         bucket_drain(zone, bucket);
  851                         bucket_free(zone, bucket, NULL);
  852                         ZONE_LOCK(zone);
  853                 }
  854         }
  855 
  856         /*
  857          * Shrink further bucket sizes.  Price of single zone lock collision
  858          * is probably lower then price of global cache drain.
  859          */
  860         if (zone->uz_count > zone->uz_count_min)
  861                 zone->uz_count--;
  862 }
  863 
  864 static void
  865 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
  866 {
  867         uint8_t *mem;
  868         int i;
  869         uint8_t flags;
  870 
  871         CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
  872             keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
  873 
  874         mem = slab->us_data;
  875         flags = slab->us_flags;
  876         i = start;
  877         if (keg->uk_fini != NULL) {
  878                 for (i--; i > -1; i--)
  879 #ifdef INVARIANTS
  880                 /*
  881                  * trash_fini implies that dtor was trash_dtor. trash_fini
  882                  * would check that memory hasn't been modified since free,
  883                  * which executed trash_dtor.
  884                  * That's why we need to run uma_dbg_kskip() check here,
  885                  * albeit we don't make skip check for other init/fini
  886                  * invocations.
  887                  */
  888                 if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
  889                     keg->uk_fini != trash_fini)
  890 #endif
  891                         keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
  892                             keg->uk_size);
  893         }
  894         if (keg->uk_flags & UMA_ZONE_OFFPAGE)
  895                 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
  896         keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
  897         uma_total_dec(PAGE_SIZE * keg->uk_ppera);
  898 }
  899 
  900 /*
  901  * Frees pages from a keg back to the system.  This is done on demand from
  902  * the pageout daemon.
  903  *
  904  * Returns nothing.
  905  */
  906 static void
  907 keg_drain(uma_keg_t keg)
  908 {
  909         struct slabhead freeslabs = { 0 };
  910         uma_domain_t dom;
  911         uma_slab_t slab, tmp;
  912         int i;
  913 
  914         /*
  915          * We don't want to take pages from statically allocated kegs at this
  916          * time
  917          */
  918         if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
  919                 return;
  920 
  921         CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
  922             keg->uk_name, keg, keg->uk_free);
  923         KEG_LOCK(keg);
  924         if (keg->uk_free == 0)
  925                 goto finished;
  926 
  927         for (i = 0; i < vm_ndomains; i++) {
  928                 dom = &keg->uk_domain[i];
  929                 LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
  930                         /* We have nowhere to free these to. */
  931                         if (slab->us_flags & UMA_SLAB_BOOT)
  932                                 continue;
  933 
  934                         LIST_REMOVE(slab, us_link);
  935                         keg->uk_pages -= keg->uk_ppera;
  936                         keg->uk_free -= keg->uk_ipers;
  937 
  938                         if (keg->uk_flags & UMA_ZONE_HASH)
  939                                 UMA_HASH_REMOVE(&keg->uk_hash, slab,
  940                                     slab->us_data);
  941 
  942                         SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
  943                 }
  944         }
  945 
  946 finished:
  947         KEG_UNLOCK(keg);
  948 
  949         while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
  950                 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
  951                 keg_free_slab(keg, slab, keg->uk_ipers);
  952         }
  953 }
  954 
  955 static void
  956 zone_drain_wait(uma_zone_t zone, int waitok)
  957 {
  958 
  959         /*
  960          * Set draining to interlock with zone_dtor() so we can release our
  961          * locks as we go.  Only dtor() should do a WAITOK call since it
  962          * is the only call that knows the structure will still be available
  963          * when it wakes up.
  964          */
  965         ZONE_LOCK(zone);
  966         while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
  967                 if (waitok == M_NOWAIT)
  968                         goto out;
  969                 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
  970         }
  971         zone->uz_flags |= UMA_ZFLAG_DRAINING;
  972         bucket_cache_drain(zone);
  973         ZONE_UNLOCK(zone);
  974         /*
  975          * The DRAINING flag protects us from being freed while
  976          * we're running.  Normally the uma_rwlock would protect us but we
  977          * must be able to release and acquire the right lock for each keg.
  978          */
  979         zone_foreach_keg(zone, &keg_drain);
  980         ZONE_LOCK(zone);
  981         zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
  982         wakeup(zone);
  983 out:
  984         ZONE_UNLOCK(zone);
  985 }
  986 
  987 void
  988 zone_drain(uma_zone_t zone)
  989 {
  990 
  991         zone_drain_wait(zone, M_NOWAIT);
  992 }
  993 
  994 /*
  995  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
  996  * If the allocation was successful, the keg lock will be held upon return,
  997  * otherwise the keg will be left unlocked.
  998  *
  999  * Arguments:
 1000  *      wait  Shall we wait?
 1001  *
 1002  * Returns:
 1003  *      The slab that was allocated or NULL if there is no memory and the
 1004  *      caller specified M_NOWAIT.
 1005  */
 1006 static uma_slab_t
 1007 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait)
 1008 {
 1009         uma_alloc allocf;
 1010         uma_slab_t slab;
 1011         unsigned long size;
 1012         uint8_t *mem;
 1013         uint8_t flags;
 1014         int i;
 1015 
 1016         KASSERT(domain >= 0 && domain < vm_ndomains,
 1017             ("keg_alloc_slab: domain %d out of range", domain));
 1018         mtx_assert(&keg->uk_lock, MA_OWNED);
 1019 
 1020         allocf = keg->uk_allocf;
 1021         KEG_UNLOCK(keg);
 1022 
 1023         slab = NULL;
 1024         mem = NULL;
 1025         if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
 1026                 slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait);
 1027                 if (slab == NULL)
 1028                         goto out;
 1029         }
 1030 
 1031         /*
 1032          * This reproduces the old vm_zone behavior of zero filling pages the
 1033          * first time they are added to a zone.
 1034          *
 1035          * Malloced items are zeroed in uma_zalloc.
 1036          */
 1037 
 1038         if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
 1039                 wait |= M_ZERO;
 1040         else
 1041                 wait &= ~M_ZERO;
 1042 
 1043         if (keg->uk_flags & UMA_ZONE_NODUMP)
 1044                 wait |= M_NODUMP;
 1045 
 1046         /* zone is passed for legacy reasons. */
 1047         size = keg->uk_ppera * PAGE_SIZE;
 1048         mem = allocf(zone, size, domain, &flags, wait);
 1049         if (mem == NULL) {
 1050                 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
 1051                         zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
 1052                 slab = NULL;
 1053                 goto out;
 1054         }
 1055         uma_total_inc(size);
 1056 
 1057         /* Point the slab into the allocated memory */
 1058         if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
 1059                 slab = (uma_slab_t )(mem + keg->uk_pgoff);
 1060 
 1061         if (keg->uk_flags & UMA_ZONE_VTOSLAB)
 1062                 for (i = 0; i < keg->uk_ppera; i++)
 1063                         vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
 1064 
 1065         slab->us_keg = keg;
 1066         slab->us_data = mem;
 1067         slab->us_freecount = keg->uk_ipers;
 1068         slab->us_flags = flags;
 1069         slab->us_domain = domain;
 1070         BIT_FILL(SLAB_SETSIZE, &slab->us_free);
 1071 #ifdef INVARIANTS
 1072         BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
 1073 #endif
 1074 
 1075         if (keg->uk_init != NULL) {
 1076                 for (i = 0; i < keg->uk_ipers; i++)
 1077                         if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
 1078                             keg->uk_size, wait) != 0)
 1079                                 break;
 1080                 if (i != keg->uk_ipers) {
 1081                         keg_free_slab(keg, slab, i);
 1082                         slab = NULL;
 1083                         goto out;
 1084                 }
 1085         }
 1086         KEG_LOCK(keg);
 1087 
 1088         CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
 1089             slab, keg->uk_name, keg);
 1090 
 1091         if (keg->uk_flags & UMA_ZONE_HASH)
 1092                 UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
 1093 
 1094         keg->uk_pages += keg->uk_ppera;
 1095         keg->uk_free += keg->uk_ipers;
 1096 
 1097 out:
 1098         return (slab);
 1099 }
 1100 
 1101 /*
 1102  * This function is intended to be used early on in place of page_alloc() so
 1103  * that we may use the boot time page cache to satisfy allocations before
 1104  * the VM is ready.
 1105  */
 1106 static void *
 1107 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
 1108     int wait)
 1109 {
 1110         uma_keg_t keg;
 1111         void *mem;
 1112         int pages;
 1113 
 1114         keg = zone_first_keg(zone);
 1115 
 1116         /*
 1117          * If we are in BOOT_BUCKETS or higher, than switch to real
 1118          * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
 1119          */
 1120         switch (booted) {
 1121                 case BOOT_COLD:
 1122                 case BOOT_STRAPPED:
 1123                         break;
 1124                 case BOOT_PAGEALLOC:
 1125                         if (keg->uk_ppera > 1)
 1126                                 break;
 1127                 case BOOT_BUCKETS:
 1128                 case BOOT_RUNNING:
 1129 #ifdef UMA_MD_SMALL_ALLOC
 1130                         keg->uk_allocf = (keg->uk_ppera > 1) ?
 1131                             page_alloc : uma_small_alloc;
 1132 #else
 1133                         keg->uk_allocf = page_alloc;
 1134 #endif
 1135                         return keg->uk_allocf(zone, bytes, domain, pflag, wait);
 1136         }
 1137 
 1138         /*
 1139          * Check our small startup cache to see if it has pages remaining.
 1140          */
 1141         pages = howmany(bytes, PAGE_SIZE);
 1142         KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
 1143         if (pages > boot_pages)
 1144                 panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
 1145 #ifdef DIAGNOSTIC
 1146         printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
 1147             boot_pages);
 1148 #endif
 1149         mem = bootmem;
 1150         boot_pages -= pages;
 1151         bootmem += pages * PAGE_SIZE;
 1152         *pflag = UMA_SLAB_BOOT;
 1153 
 1154         return (mem);
 1155 }
 1156 
 1157 /*
 1158  * Allocates a number of pages from the system
 1159  *
 1160  * Arguments:
 1161  *      bytes  The number of bytes requested
 1162  *      wait  Shall we wait?
 1163  *
 1164  * Returns:
 1165  *      A pointer to the alloced memory or possibly
 1166  *      NULL if M_NOWAIT is set.
 1167  */
 1168 static void *
 1169 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
 1170     int wait)
 1171 {
 1172         void *p;        /* Returned page */
 1173 
 1174         *pflag = UMA_SLAB_KERNEL;
 1175         p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
 1176 
 1177         return (p);
 1178 }
 1179 
 1180 static void *
 1181 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
 1182     int wait)
 1183 {
 1184         struct pglist alloctail;
 1185         vm_offset_t addr, zkva;
 1186         int cpu, flags;
 1187         vm_page_t p, p_next;
 1188 #ifdef NUMA
 1189         struct pcpu *pc;
 1190 #endif
 1191 
 1192         MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
 1193 
 1194         TAILQ_INIT(&alloctail);
 1195         flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
 1196             malloc2vm_flags(wait);
 1197         *pflag = UMA_SLAB_KERNEL;
 1198         for (cpu = 0; cpu <= mp_maxid; cpu++) {
 1199                 if (CPU_ABSENT(cpu)) {
 1200                         p = vm_page_alloc(NULL, 0, flags);
 1201                 } else {
 1202 #ifndef NUMA
 1203                         p = vm_page_alloc(NULL, 0, flags);
 1204 #else
 1205                         pc = pcpu_find(cpu);
 1206                         p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
 1207                         if (__predict_false(p == NULL))
 1208                                 p = vm_page_alloc(NULL, 0, flags);
 1209 #endif
 1210                 }
 1211                 if (__predict_false(p == NULL))
 1212                         goto fail;
 1213                 TAILQ_INSERT_TAIL(&alloctail, p, listq);
 1214         }
 1215         if ((addr = kva_alloc(bytes)) == 0)
 1216                 goto fail;
 1217         zkva = addr;
 1218         TAILQ_FOREACH(p, &alloctail, listq) {
 1219                 pmap_qenter(zkva, &p, 1);
 1220                 zkva += PAGE_SIZE;
 1221         }
 1222         return ((void*)addr);
 1223  fail:
 1224         TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
 1225                 vm_page_unwire(p, PQ_NONE);
 1226                 vm_page_free(p);
 1227         }
 1228         return (NULL);
 1229 }
 1230 
 1231 /*
 1232  * Allocates a number of pages from within an object
 1233  *
 1234  * Arguments:
 1235  *      bytes  The number of bytes requested
 1236  *      wait   Shall we wait?
 1237  *
 1238  * Returns:
 1239  *      A pointer to the alloced memory or possibly
 1240  *      NULL if M_NOWAIT is set.
 1241  */
 1242 static void *
 1243 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
 1244     int wait)
 1245 {
 1246         TAILQ_HEAD(, vm_page) alloctail;
 1247         u_long npages;
 1248         vm_offset_t retkva, zkva;
 1249         vm_page_t p, p_next;
 1250         uma_keg_t keg;
 1251 
 1252         TAILQ_INIT(&alloctail);
 1253         keg = zone_first_keg(zone);
 1254 
 1255         npages = howmany(bytes, PAGE_SIZE);
 1256         while (npages > 0) {
 1257                 p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
 1258                     VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
 1259                     ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
 1260                     VM_ALLOC_NOWAIT));
 1261                 if (p != NULL) {
 1262                         /*
 1263                          * Since the page does not belong to an object, its
 1264                          * listq is unused.
 1265                          */
 1266                         TAILQ_INSERT_TAIL(&alloctail, p, listq);
 1267                         npages--;
 1268                         continue;
 1269                 }
 1270                 /*
 1271                  * Page allocation failed, free intermediate pages and
 1272                  * exit.
 1273                  */
 1274                 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
 1275                         vm_page_unwire(p, PQ_NONE);
 1276                         vm_page_free(p); 
 1277                 }
 1278                 return (NULL);
 1279         }
 1280         *flags = UMA_SLAB_PRIV;
 1281         zkva = keg->uk_kva +
 1282             atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
 1283         retkva = zkva;
 1284         TAILQ_FOREACH(p, &alloctail, listq) {
 1285                 pmap_qenter(zkva, &p, 1);
 1286                 zkva += PAGE_SIZE;
 1287         }
 1288 
 1289         return ((void *)retkva);
 1290 }
 1291 
 1292 /*
 1293  * Frees a number of pages to the system
 1294  *
 1295  * Arguments:
 1296  *      mem   A pointer to the memory to be freed
 1297  *      size  The size of the memory being freed
 1298  *      flags The original p->us_flags field
 1299  *
 1300  * Returns:
 1301  *      Nothing
 1302  */
 1303 static void
 1304 page_free(void *mem, vm_size_t size, uint8_t flags)
 1305 {
 1306 
 1307         if ((flags & UMA_SLAB_KERNEL) == 0)
 1308                 panic("UMA: page_free used with invalid flags %x", flags);
 1309 
 1310         kmem_free((vm_offset_t)mem, size);
 1311 }
 1312 
 1313 /*
 1314  * Frees pcpu zone allocations
 1315  *
 1316  * Arguments:
 1317  *      mem   A pointer to the memory to be freed
 1318  *      size  The size of the memory being freed
 1319  *      flags The original p->us_flags field
 1320  *
 1321  * Returns:
 1322  *      Nothing
 1323  */
 1324 static void
 1325 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
 1326 {
 1327         vm_offset_t sva, curva;
 1328         vm_paddr_t paddr;
 1329         vm_page_t m;
 1330 
 1331         MPASS(size == (mp_maxid+1)*PAGE_SIZE);
 1332         sva = (vm_offset_t)mem;
 1333         for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
 1334                 paddr = pmap_kextract(curva);
 1335                 m = PHYS_TO_VM_PAGE(paddr);
 1336                 vm_page_unwire(m, PQ_NONE);
 1337                 vm_page_free(m);
 1338         }
 1339         pmap_qremove(sva, size >> PAGE_SHIFT);
 1340         kva_free(sva, size);
 1341 }
 1342 
 1343 
 1344 /*
 1345  * Zero fill initializer
 1346  *
 1347  * Arguments/Returns follow uma_init specifications
 1348  */
 1349 static int
 1350 zero_init(void *mem, int size, int flags)
 1351 {
 1352         bzero(mem, size);
 1353         return (0);
 1354 }
 1355 
 1356 /*
 1357  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
 1358  *
 1359  * Arguments
 1360  *      keg  The zone we should initialize
 1361  *
 1362  * Returns
 1363  *      Nothing
 1364  */
 1365 static void
 1366 keg_small_init(uma_keg_t keg)
 1367 {
 1368         u_int rsize;
 1369         u_int memused;
 1370         u_int wastedspace;
 1371         u_int shsize;
 1372         u_int slabsize;
 1373 
 1374         if (keg->uk_flags & UMA_ZONE_PCPU) {
 1375                 u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
 1376 
 1377                 slabsize = UMA_PCPU_ALLOC_SIZE;
 1378                 keg->uk_ppera = ncpus;
 1379         } else {
 1380                 slabsize = UMA_SLAB_SIZE;
 1381                 keg->uk_ppera = 1;
 1382         }
 1383 
 1384         /*
 1385          * Calculate the size of each allocation (rsize) according to
 1386          * alignment.  If the requested size is smaller than we have
 1387          * allocation bits for we round it up.
 1388          */
 1389         rsize = keg->uk_size;
 1390         if (rsize < slabsize / SLAB_SETSIZE)
 1391                 rsize = slabsize / SLAB_SETSIZE;
 1392         if (rsize & keg->uk_align)
 1393                 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
 1394         keg->uk_rsize = rsize;
 1395 
 1396         KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
 1397             keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
 1398             ("%s: size %u too large", __func__, keg->uk_rsize));
 1399 
 1400         if (keg->uk_flags & UMA_ZONE_OFFPAGE)
 1401                 shsize = 0;
 1402         else 
 1403                 shsize = sizeof(struct uma_slab);
 1404 
 1405         if (rsize <= slabsize - shsize)
 1406                 keg->uk_ipers = (slabsize - shsize) / rsize;
 1407         else {
 1408                 /* Handle special case when we have 1 item per slab, so
 1409                  * alignment requirement can be relaxed. */
 1410                 KASSERT(keg->uk_size <= slabsize - shsize,
 1411                     ("%s: size %u greater than slab", __func__, keg->uk_size));
 1412                 keg->uk_ipers = 1;
 1413         }
 1414         KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
 1415             ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
 1416 
 1417         memused = keg->uk_ipers * rsize + shsize;
 1418         wastedspace = slabsize - memused;
 1419 
 1420         /*
 1421          * We can't do OFFPAGE if we're internal or if we've been
 1422          * asked to not go to the VM for buckets.  If we do this we
 1423          * may end up going to the VM  for slabs which we do not
 1424          * want to do if we're UMA_ZFLAG_CACHEONLY as a result
 1425          * of UMA_ZONE_VM, which clearly forbids it.
 1426          */
 1427         if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
 1428             (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
 1429                 return;
 1430 
 1431         /*
 1432          * See if using an OFFPAGE slab will limit our waste.  Only do
 1433          * this if it permits more items per-slab.
 1434          *
 1435          * XXX We could try growing slabsize to limit max waste as well.
 1436          * Historically this was not done because the VM could not
 1437          * efficiently handle contiguous allocations.
 1438          */
 1439         if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
 1440             (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
 1441                 keg->uk_ipers = slabsize / keg->uk_rsize;
 1442                 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
 1443                     ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
 1444                 CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
 1445                     "keg: %s(%p), calculated wastedspace = %d, "
 1446                     "maximum wasted space allowed = %d, "
 1447                     "calculated ipers = %d, "
 1448                     "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
 1449                     slabsize / UMA_MAX_WASTE, keg->uk_ipers,
 1450                     slabsize - keg->uk_ipers * keg->uk_rsize);
 1451                 keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1452         }
 1453 
 1454         if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
 1455             (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
 1456                 keg->uk_flags |= UMA_ZONE_HASH;
 1457 }
 1458 
 1459 /*
 1460  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
 1461  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
 1462  * more complicated.
 1463  *
 1464  * Arguments
 1465  *      keg  The keg we should initialize
 1466  *
 1467  * Returns
 1468  *      Nothing
 1469  */
 1470 static void
 1471 keg_large_init(uma_keg_t keg)
 1472 {
 1473         u_int shsize;
 1474 
 1475         KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
 1476         KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
 1477             ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
 1478         KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
 1479             ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
 1480 
 1481         keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
 1482         keg->uk_ipers = 1;
 1483         keg->uk_rsize = keg->uk_size;
 1484 
 1485         /* Check whether we have enough space to not do OFFPAGE. */
 1486         if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
 1487                 shsize = sizeof(struct uma_slab);
 1488                 if (shsize & UMA_ALIGN_PTR)
 1489                         shsize = (shsize & ~UMA_ALIGN_PTR) +
 1490                             (UMA_ALIGN_PTR + 1);
 1491 
 1492                 if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) {
 1493                         /*
 1494                          * We can't do OFFPAGE if we're internal, in which case
 1495                          * we need an extra page per allocation to contain the
 1496                          * slab header.
 1497                          */
 1498                         if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
 1499                                 keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1500                         else
 1501                                 keg->uk_ppera++;
 1502                 }
 1503         }
 1504 
 1505         if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
 1506             (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
 1507                 keg->uk_flags |= UMA_ZONE_HASH;
 1508 }
 1509 
 1510 static void
 1511 keg_cachespread_init(uma_keg_t keg)
 1512 {
 1513         int alignsize;
 1514         int trailer;
 1515         int pages;
 1516         int rsize;
 1517 
 1518         KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
 1519             ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
 1520 
 1521         alignsize = keg->uk_align + 1;
 1522         rsize = keg->uk_size;
 1523         /*
 1524          * We want one item to start on every align boundary in a page.  To
 1525          * do this we will span pages.  We will also extend the item by the
 1526          * size of align if it is an even multiple of align.  Otherwise, it
 1527          * would fall on the same boundary every time.
 1528          */
 1529         if (rsize & keg->uk_align)
 1530                 rsize = (rsize & ~keg->uk_align) + alignsize;
 1531         if ((rsize & alignsize) == 0)
 1532                 rsize += alignsize;
 1533         trailer = rsize - keg->uk_size;
 1534         pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
 1535         pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
 1536         keg->uk_rsize = rsize;
 1537         keg->uk_ppera = pages;
 1538         keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
 1539         keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
 1540         KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
 1541             ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
 1542             keg->uk_ipers));
 1543 }
 1544 
 1545 /*
 1546  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
 1547  * the keg onto the global keg list.
 1548  *
 1549  * Arguments/Returns follow uma_ctor specifications
 1550  *      udata  Actually uma_kctor_args
 1551  */
 1552 static int
 1553 keg_ctor(void *mem, int size, void *udata, int flags)
 1554 {
 1555         struct uma_kctor_args *arg = udata;
 1556         uma_keg_t keg = mem;
 1557         uma_zone_t zone;
 1558 
 1559         bzero(keg, size);
 1560         keg->uk_size = arg->size;
 1561         keg->uk_init = arg->uminit;
 1562         keg->uk_fini = arg->fini;
 1563         keg->uk_align = arg->align;
 1564         keg->uk_free = 0;
 1565         keg->uk_reserve = 0;
 1566         keg->uk_pages = 0;
 1567         keg->uk_flags = arg->flags;
 1568         keg->uk_slabzone = NULL;
 1569 
 1570         /*
 1571          * We use a global round-robin policy by default.  Zones with
 1572          * UMA_ZONE_NUMA set will use first-touch instead, in which case the
 1573          * iterator is never run.
 1574          */
 1575         keg->uk_dr.dr_policy = DOMAINSET_RR();
 1576         keg->uk_dr.dr_iter = 0;
 1577 
 1578         /*
 1579          * The master zone is passed to us at keg-creation time.
 1580          */
 1581         zone = arg->zone;
 1582         keg->uk_name = zone->uz_name;
 1583 
 1584         if (arg->flags & UMA_ZONE_VM)
 1585                 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
 1586 
 1587         if (arg->flags & UMA_ZONE_ZINIT)
 1588                 keg->uk_init = zero_init;
 1589 
 1590         if (arg->flags & UMA_ZONE_MALLOC)
 1591                 keg->uk_flags |= UMA_ZONE_VTOSLAB;
 1592 
 1593         if (arg->flags & UMA_ZONE_PCPU)
 1594 #ifdef SMP
 1595                 keg->uk_flags |= UMA_ZONE_OFFPAGE;
 1596 #else
 1597                 keg->uk_flags &= ~UMA_ZONE_PCPU;
 1598 #endif
 1599 
 1600         if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
 1601                 keg_cachespread_init(keg);
 1602         } else {
 1603                 if (keg->uk_size > UMA_SLAB_SPACE)
 1604                         keg_large_init(keg);
 1605                 else
 1606                         keg_small_init(keg);
 1607         }
 1608 
 1609         if (keg->uk_flags & UMA_ZONE_OFFPAGE)
 1610                 keg->uk_slabzone = slabzone;
 1611 
 1612         /*
 1613          * If we haven't booted yet we need allocations to go through the
 1614          * startup cache until the vm is ready.
 1615          */
 1616         if (booted < BOOT_PAGEALLOC)
 1617                 keg->uk_allocf = startup_alloc;
 1618 #ifdef UMA_MD_SMALL_ALLOC
 1619         else if (keg->uk_ppera == 1)
 1620                 keg->uk_allocf = uma_small_alloc;
 1621 #endif
 1622         else if (keg->uk_flags & UMA_ZONE_PCPU)
 1623                 keg->uk_allocf = pcpu_page_alloc;
 1624         else
 1625                 keg->uk_allocf = page_alloc;
 1626 #ifdef UMA_MD_SMALL_ALLOC
 1627         if (keg->uk_ppera == 1)
 1628                 keg->uk_freef = uma_small_free;
 1629         else
 1630 #endif
 1631         if (keg->uk_flags & UMA_ZONE_PCPU)
 1632                 keg->uk_freef = pcpu_page_free;
 1633         else
 1634                 keg->uk_freef = page_free;
 1635 
 1636         /*
 1637          * Initialize keg's lock
 1638          */
 1639         KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
 1640 
 1641         /*
 1642          * If we're putting the slab header in the actual page we need to
 1643          * figure out where in each page it goes.  This calculates a right
 1644          * justified offset into the memory on an ALIGN_PTR boundary.
 1645          */
 1646         if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
 1647                 u_int totsize;
 1648 
 1649                 /* Size of the slab struct and free list */
 1650                 totsize = sizeof(struct uma_slab);
 1651 
 1652                 if (totsize & UMA_ALIGN_PTR)
 1653                         totsize = (totsize & ~UMA_ALIGN_PTR) +
 1654                             (UMA_ALIGN_PTR + 1);
 1655                 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
 1656 
 1657                 /*
 1658                  * The only way the following is possible is if with our
 1659                  * UMA_ALIGN_PTR adjustments we are now bigger than
 1660                  * UMA_SLAB_SIZE.  I haven't checked whether this is
 1661                  * mathematically possible for all cases, so we make
 1662                  * sure here anyway.
 1663                  */
 1664                 totsize = keg->uk_pgoff + sizeof(struct uma_slab);
 1665                 if (totsize > PAGE_SIZE * keg->uk_ppera) {
 1666                         printf("zone %s ipers %d rsize %d size %d\n",
 1667                             zone->uz_name, keg->uk_ipers, keg->uk_rsize,
 1668                             keg->uk_size);
 1669                         panic("UMA slab won't fit.");
 1670                 }
 1671         }
 1672 
 1673         if (keg->uk_flags & UMA_ZONE_HASH)
 1674                 hash_alloc(&keg->uk_hash);
 1675 
 1676         CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
 1677             keg, zone->uz_name, zone,
 1678             (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
 1679             keg->uk_free);
 1680 
 1681         LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
 1682 
 1683         rw_wlock(&uma_rwlock);
 1684         LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
 1685         rw_wunlock(&uma_rwlock);
 1686         return (0);
 1687 }
 1688 
 1689 /*
 1690  * Zone header ctor.  This initializes all fields, locks, etc.
 1691  *
 1692  * Arguments/Returns follow uma_ctor specifications
 1693  *      udata  Actually uma_zctor_args
 1694  */
 1695 static int
 1696 zone_ctor(void *mem, int size, void *udata, int flags)
 1697 {
 1698         struct uma_zctor_args *arg = udata;
 1699         uma_zone_t zone = mem;
 1700         uma_zone_t z;
 1701         uma_keg_t keg;
 1702 
 1703         bzero(zone, size);
 1704         zone->uz_name = arg->name;
 1705         zone->uz_ctor = arg->ctor;
 1706         zone->uz_dtor = arg->dtor;
 1707         zone->uz_slab = zone_fetch_slab;
 1708         zone->uz_init = NULL;
 1709         zone->uz_fini = NULL;
 1710         zone->uz_allocs = 0;
 1711         zone->uz_frees = 0;
 1712         zone->uz_fails = 0;
 1713         zone->uz_sleeps = 0;
 1714         zone->uz_count = 0;
 1715         zone->uz_count_min = 0;
 1716         zone->uz_flags = 0;
 1717         zone->uz_warning = NULL;
 1718         /* The domain structures follow the cpu structures. */
 1719         zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
 1720         timevalclear(&zone->uz_ratecheck);
 1721         keg = arg->keg;
 1722 
 1723         ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
 1724 
 1725         /*
 1726          * This is a pure cache zone, no kegs.
 1727          */
 1728         if (arg->import) {
 1729                 if (arg->flags & UMA_ZONE_VM)
 1730                         arg->flags |= UMA_ZFLAG_CACHEONLY;
 1731                 zone->uz_flags = arg->flags;
 1732                 zone->uz_size = arg->size;
 1733                 zone->uz_import = arg->import;
 1734                 zone->uz_release = arg->release;
 1735                 zone->uz_arg = arg->arg;
 1736                 zone->uz_lockptr = &zone->uz_lock;
 1737                 rw_wlock(&uma_rwlock);
 1738                 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
 1739                 rw_wunlock(&uma_rwlock);
 1740                 goto out;
 1741         }
 1742 
 1743         /*
 1744          * Use the regular zone/keg/slab allocator.
 1745          */
 1746         zone->uz_import = (uma_import)zone_import;
 1747         zone->uz_release = (uma_release)zone_release;
 1748         zone->uz_arg = zone; 
 1749 
 1750         if (arg->flags & UMA_ZONE_SECONDARY) {
 1751                 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
 1752                 zone->uz_init = arg->uminit;
 1753                 zone->uz_fini = arg->fini;
 1754                 zone->uz_lockptr = &keg->uk_lock;
 1755                 zone->uz_flags |= UMA_ZONE_SECONDARY;
 1756                 rw_wlock(&uma_rwlock);
 1757                 ZONE_LOCK(zone);
 1758                 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
 1759                         if (LIST_NEXT(z, uz_link) == NULL) {
 1760                                 LIST_INSERT_AFTER(z, zone, uz_link);
 1761                                 break;
 1762                         }
 1763                 }
 1764                 ZONE_UNLOCK(zone);
 1765                 rw_wunlock(&uma_rwlock);
 1766         } else if (keg == NULL) {
 1767                 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
 1768                     arg->align, arg->flags)) == NULL)
 1769                         return (ENOMEM);
 1770         } else {
 1771                 struct uma_kctor_args karg;
 1772                 int error;
 1773 
 1774                 /* We should only be here from uma_startup() */
 1775                 karg.size = arg->size;
 1776                 karg.uminit = arg->uminit;
 1777                 karg.fini = arg->fini;
 1778                 karg.align = arg->align;
 1779                 karg.flags = arg->flags;
 1780                 karg.zone = zone;
 1781                 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
 1782                     flags);
 1783                 if (error)
 1784                         return (error);
 1785         }
 1786 
 1787         /*
 1788          * Link in the first keg.
 1789          */
 1790         zone->uz_klink.kl_keg = keg;
 1791         LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
 1792         zone->uz_lockptr = &keg->uk_lock;
 1793         zone->uz_size = keg->uk_size;
 1794         zone->uz_flags |= (keg->uk_flags &
 1795             (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
 1796 
 1797         /*
 1798          * Some internal zones don't have room allocated for the per cpu
 1799          * caches.  If we're internal, bail out here.
 1800          */
 1801         if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
 1802                 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
 1803                     ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
 1804                 return (0);
 1805         }
 1806 
 1807 out:
 1808         KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
 1809             (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
 1810             ("Invalid zone flag combination"));
 1811         if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
 1812                 zone->uz_count = BUCKET_MAX;
 1813         else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
 1814                 zone->uz_count = 0;
 1815         else
 1816                 zone->uz_count = bucket_select(zone->uz_size);
 1817         zone->uz_count_min = zone->uz_count;
 1818 
 1819         return (0);
 1820 }
 1821 
 1822 /*
 1823  * Keg header dtor.  This frees all data, destroys locks, frees the hash
 1824  * table and removes the keg from the global list.
 1825  *
 1826  * Arguments/Returns follow uma_dtor specifications
 1827  *      udata  unused
 1828  */
 1829 static void
 1830 keg_dtor(void *arg, int size, void *udata)
 1831 {
 1832         uma_keg_t keg;
 1833 
 1834         keg = (uma_keg_t)arg;
 1835         KEG_LOCK(keg);
 1836         if (keg->uk_free != 0) {
 1837                 printf("Freed UMA keg (%s) was not empty (%d items). "
 1838                     " Lost %d pages of memory.\n",
 1839                     keg->uk_name ? keg->uk_name : "",
 1840                     keg->uk_free, keg->uk_pages);
 1841         }
 1842         KEG_UNLOCK(keg);
 1843 
 1844         hash_free(&keg->uk_hash);
 1845 
 1846         KEG_LOCK_FINI(keg);
 1847 }
 1848 
 1849 /*
 1850  * Zone header dtor.
 1851  *
 1852  * Arguments/Returns follow uma_dtor specifications
 1853  *      udata  unused
 1854  */
 1855 static void
 1856 zone_dtor(void *arg, int size, void *udata)
 1857 {
 1858         uma_klink_t klink;
 1859         uma_zone_t zone;
 1860         uma_keg_t keg;
 1861 
 1862         zone = (uma_zone_t)arg;
 1863         keg = zone_first_keg(zone);
 1864 
 1865         if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
 1866                 cache_drain(zone);
 1867 
 1868         rw_wlock(&uma_rwlock);
 1869         LIST_REMOVE(zone, uz_link);
 1870         rw_wunlock(&uma_rwlock);
 1871         /*
 1872          * XXX there are some races here where
 1873          * the zone can be drained but zone lock
 1874          * released and then refilled before we
 1875          * remove it... we dont care for now
 1876          */
 1877         zone_drain_wait(zone, M_WAITOK);
 1878         /*
 1879          * Unlink all of our kegs.
 1880          */
 1881         while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
 1882                 klink->kl_keg = NULL;
 1883                 LIST_REMOVE(klink, kl_link);
 1884                 if (klink == &zone->uz_klink)
 1885                         continue;
 1886                 free(klink, M_TEMP);
 1887         }
 1888         /*
 1889          * We only destroy kegs from non secondary zones.
 1890          */
 1891         if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
 1892                 rw_wlock(&uma_rwlock);
 1893                 LIST_REMOVE(keg, uk_link);
 1894                 rw_wunlock(&uma_rwlock);
 1895                 zone_free_item(kegs, keg, NULL, SKIP_NONE);
 1896         }
 1897         ZONE_LOCK_FINI(zone);
 1898 }
 1899 
 1900 /*
 1901  * Traverses every zone in the system and calls a callback
 1902  *
 1903  * Arguments:
 1904  *      zfunc  A pointer to a function which accepts a zone
 1905  *              as an argument.
 1906  *
 1907  * Returns:
 1908  *      Nothing
 1909  */
 1910 static void
 1911 zone_foreach(void (*zfunc)(uma_zone_t))
 1912 {
 1913         uma_keg_t keg;
 1914         uma_zone_t zone;
 1915 
 1916         rw_rlock(&uma_rwlock);
 1917         LIST_FOREACH(keg, &uma_kegs, uk_link) {
 1918                 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
 1919                         zfunc(zone);
 1920         }
 1921         rw_runlock(&uma_rwlock);
 1922 }
 1923 
 1924 /*
 1925  * Count how many pages do we need to bootstrap.  VM supplies
 1926  * its need in early zones in the argument, we add up our zones,
 1927  * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
 1928  * zone of zones and zone of kegs are accounted separately.
 1929  */
 1930 #define UMA_BOOT_ZONES  11
 1931 /* Zone of zones and zone of kegs have arbitrary alignment. */
 1932 #define UMA_BOOT_ALIGN  32
 1933 static int zsize, ksize;
 1934 int
 1935 uma_startup_count(int vm_zones)
 1936 {
 1937         int zones, pages;
 1938 
 1939         ksize = sizeof(struct uma_keg) +
 1940             (sizeof(struct uma_domain) * vm_ndomains);
 1941         zsize = sizeof(struct uma_zone) +
 1942             (sizeof(struct uma_cache) * (mp_maxid + 1)) +
 1943             (sizeof(struct uma_zone_domain) * vm_ndomains);
 1944 
 1945         /*
 1946          * Memory for the zone of kegs and its keg,
 1947          * and for zone of zones.
 1948          */
 1949         pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
 1950             roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
 1951 
 1952 #ifdef  UMA_MD_SMALL_ALLOC
 1953         zones = UMA_BOOT_ZONES;
 1954 #else
 1955         zones = UMA_BOOT_ZONES + vm_zones;
 1956         vm_zones = 0;
 1957 #endif
 1958 
 1959         /* Memory for the rest of startup zones, UMA and VM, ... */
 1960         if (zsize > UMA_SLAB_SPACE)
 1961                 pages += (zones + vm_zones) *
 1962                     howmany(roundup2(zsize, UMA_BOOT_ALIGN), UMA_SLAB_SIZE);
 1963         else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE)
 1964                 pages += zones;
 1965         else
 1966                 pages += howmany(zones,
 1967                     UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN));
 1968 
 1969         /* ... and their kegs. Note that zone of zones allocates a keg! */
 1970         pages += howmany(zones + 1,
 1971             UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN));
 1972 
 1973         /*
 1974          * Most of startup zones are not going to be offpages, that's
 1975          * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
 1976          * calculations.  Some large bucket zones will be offpage, and
 1977          * thus will allocate hashes.  We take conservative approach
 1978          * and assume that all zones may allocate hash.  This may give
 1979          * us some positive inaccuracy, usually an extra single page.
 1980          */
 1981         pages += howmany(zones, UMA_SLAB_SPACE /
 1982             (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
 1983 
 1984         return (pages);
 1985 }
 1986 
 1987 void
 1988 uma_startup(void *mem, int npages)
 1989 {
 1990         struct uma_zctor_args args;
 1991         uma_keg_t masterkeg;
 1992         uintptr_t m;
 1993 
 1994 #ifdef DIAGNOSTIC
 1995         printf("Entering %s with %d boot pages configured\n", __func__, npages);
 1996 #endif
 1997 
 1998         rw_init(&uma_rwlock, "UMA lock");
 1999 
 2000         /* Use bootpages memory for the zone of zones and zone of kegs. */
 2001         m = (uintptr_t)mem;
 2002         zones = (uma_zone_t)m;
 2003         m += roundup(zsize, CACHE_LINE_SIZE);
 2004         kegs = (uma_zone_t)m;
 2005         m += roundup(zsize, CACHE_LINE_SIZE);
 2006         masterkeg = (uma_keg_t)m;
 2007         m += roundup(ksize, CACHE_LINE_SIZE);
 2008         m = roundup(m, PAGE_SIZE);
 2009         npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
 2010         mem = (void *)m;
 2011 
 2012         /* "manually" create the initial zone */
 2013         memset(&args, 0, sizeof(args));
 2014         args.name = "UMA Kegs";
 2015         args.size = ksize;
 2016         args.ctor = keg_ctor;
 2017         args.dtor = keg_dtor;
 2018         args.uminit = zero_init;
 2019         args.fini = NULL;
 2020         args.keg = masterkeg;
 2021         args.align = UMA_BOOT_ALIGN - 1;
 2022         args.flags = UMA_ZFLAG_INTERNAL;
 2023         zone_ctor(kegs, zsize, &args, M_WAITOK);
 2024 
 2025         bootmem = mem;
 2026         boot_pages = npages;
 2027 
 2028         args.name = "UMA Zones";
 2029         args.size = zsize;
 2030         args.ctor = zone_ctor;
 2031         args.dtor = zone_dtor;
 2032         args.uminit = zero_init;
 2033         args.fini = NULL;
 2034         args.keg = NULL;
 2035         args.align = UMA_BOOT_ALIGN - 1;
 2036         args.flags = UMA_ZFLAG_INTERNAL;
 2037         zone_ctor(zones, zsize, &args, M_WAITOK);
 2038 
 2039         /* Now make a zone for slab headers */
 2040         slabzone = uma_zcreate("UMA Slabs",
 2041                                 sizeof(struct uma_slab),
 2042                                 NULL, NULL, NULL, NULL,
 2043                                 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
 2044 
 2045         hashzone = uma_zcreate("UMA Hash",
 2046             sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
 2047             NULL, NULL, NULL, NULL,
 2048             UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
 2049 
 2050         bucket_init();
 2051 
 2052         booted = BOOT_STRAPPED;
 2053 }
 2054 
 2055 void
 2056 uma_startup1(void)
 2057 {
 2058 
 2059 #ifdef DIAGNOSTIC
 2060         printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
 2061 #endif
 2062         booted = BOOT_PAGEALLOC;
 2063 }
 2064 
 2065 void
 2066 uma_startup2(void)
 2067 {
 2068 
 2069 #ifdef DIAGNOSTIC
 2070         printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
 2071 #endif
 2072         booted = BOOT_BUCKETS;
 2073         sx_init(&uma_drain_lock, "umadrain");
 2074         bucket_enable();
 2075 }
 2076 
 2077 /*
 2078  * Initialize our callout handle
 2079  *
 2080  */
 2081 static void
 2082 uma_startup3(void)
 2083 {
 2084 
 2085 #ifdef INVARIANTS
 2086         TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
 2087         uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
 2088         uma_skip_cnt = counter_u64_alloc(M_WAITOK);
 2089 #endif
 2090         callout_init(&uma_callout, 1);
 2091         callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
 2092         booted = BOOT_RUNNING;
 2093 }
 2094 
 2095 static uma_keg_t
 2096 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
 2097                 int align, uint32_t flags)
 2098 {
 2099         struct uma_kctor_args args;
 2100 
 2101         args.size = size;
 2102         args.uminit = uminit;
 2103         args.fini = fini;
 2104         args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
 2105         args.flags = flags;
 2106         args.zone = zone;
 2107         return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
 2108 }
 2109 
 2110 /* Public functions */
 2111 /* See uma.h */
 2112 void
 2113 uma_set_align(int align)
 2114 {
 2115 
 2116         if (align != UMA_ALIGN_CACHE)
 2117                 uma_align_cache = align;
 2118 }
 2119 
 2120 /* See uma.h */
 2121 uma_zone_t
 2122 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
 2123                 uma_init uminit, uma_fini fini, int align, uint32_t flags)
 2124 
 2125 {
 2126         struct uma_zctor_args args;
 2127         uma_zone_t res;
 2128         bool locked;
 2129 
 2130         KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
 2131             align, name));
 2132 
 2133         /* This stuff is essential for the zone ctor */
 2134         memset(&args, 0, sizeof(args));
 2135         args.name = name;
 2136         args.size = size;
 2137         args.ctor = ctor;
 2138         args.dtor = dtor;
 2139         args.uminit = uminit;
 2140         args.fini = fini;
 2141 #ifdef  INVARIANTS
 2142         /*
 2143          * If a zone is being created with an empty constructor and
 2144          * destructor, pass UMA constructor/destructor which checks for
 2145          * memory use after free.
 2146          */
 2147         if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
 2148             ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
 2149                 args.ctor = trash_ctor;
 2150                 args.dtor = trash_dtor;
 2151                 args.uminit = trash_init;
 2152                 args.fini = trash_fini;
 2153         }
 2154 #endif
 2155         args.align = align;
 2156         args.flags = flags;
 2157         args.keg = NULL;
 2158 
 2159         if (booted < BOOT_BUCKETS) {
 2160                 locked = false;
 2161         } else {
 2162                 sx_slock(&uma_drain_lock);
 2163                 locked = true;
 2164         }
 2165         res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
 2166         if (locked)
 2167                 sx_sunlock(&uma_drain_lock);
 2168         return (res);
 2169 }
 2170 
 2171 /* See uma.h */
 2172 uma_zone_t
 2173 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
 2174                     uma_init zinit, uma_fini zfini, uma_zone_t master)
 2175 {
 2176         struct uma_zctor_args args;
 2177         uma_keg_t keg;
 2178         uma_zone_t res;
 2179         bool locked;
 2180 
 2181         keg = zone_first_keg(master);
 2182         memset(&args, 0, sizeof(args));
 2183         args.name = name;
 2184         args.size = keg->uk_size;
 2185         args.ctor = ctor;
 2186         args.dtor = dtor;
 2187         args.uminit = zinit;
 2188         args.fini = zfini;
 2189         args.align = keg->uk_align;
 2190         args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
 2191         args.keg = keg;
 2192 
 2193         if (booted < BOOT_BUCKETS) {
 2194                 locked = false;
 2195         } else {
 2196                 sx_slock(&uma_drain_lock);
 2197                 locked = true;
 2198         }
 2199         /* XXX Attaches only one keg of potentially many. */
 2200         res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
 2201         if (locked)
 2202                 sx_sunlock(&uma_drain_lock);
 2203         return (res);
 2204 }
 2205 
 2206 /* See uma.h */
 2207 uma_zone_t
 2208 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
 2209                     uma_init zinit, uma_fini zfini, uma_import zimport,
 2210                     uma_release zrelease, void *arg, int flags)
 2211 {
 2212         struct uma_zctor_args args;
 2213 
 2214         memset(&args, 0, sizeof(args));
 2215         args.name = name;
 2216         args.size = size;
 2217         args.ctor = ctor;
 2218         args.dtor = dtor;
 2219         args.uminit = zinit;
 2220         args.fini = zfini;
 2221         args.import = zimport;
 2222         args.release = zrelease;
 2223         args.arg = arg;
 2224         args.align = 0;
 2225         args.flags = flags;
 2226 
 2227         return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
 2228 }
 2229 
 2230 static void
 2231 zone_lock_pair(uma_zone_t a, uma_zone_t b)
 2232 {
 2233         if (a < b) {
 2234                 ZONE_LOCK(a);
 2235                 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
 2236         } else {
 2237                 ZONE_LOCK(b);
 2238                 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
 2239         }
 2240 }
 2241 
 2242 static void
 2243 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
 2244 {
 2245 
 2246         ZONE_UNLOCK(a);
 2247         ZONE_UNLOCK(b);
 2248 }
 2249 
 2250 int
 2251 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
 2252 {
 2253         uma_klink_t klink;
 2254         uma_klink_t kl;
 2255         int error;
 2256 
 2257         error = 0;
 2258         klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
 2259 
 2260         zone_lock_pair(zone, master);
 2261         /*
 2262          * zone must use vtoslab() to resolve objects and must already be
 2263          * a secondary.
 2264          */
 2265         if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
 2266             != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
 2267                 error = EINVAL;
 2268                 goto out;
 2269         }
 2270         /*
 2271          * The new master must also use vtoslab().
 2272          */
 2273         if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
 2274                 error = EINVAL;
 2275                 goto out;
 2276         }
 2277 
 2278         /*
 2279          * The underlying object must be the same size.  rsize
 2280          * may be different.
 2281          */
 2282         if (master->uz_size != zone->uz_size) {
 2283                 error = E2BIG;
 2284                 goto out;
 2285         }
 2286         /*
 2287          * Put it at the end of the list.
 2288          */
 2289         klink->kl_keg = zone_first_keg(master);
 2290         LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
 2291                 if (LIST_NEXT(kl, kl_link) == NULL) {
 2292                         LIST_INSERT_AFTER(kl, klink, kl_link);
 2293                         break;
 2294                 }
 2295         }
 2296         klink = NULL;
 2297         zone->uz_flags |= UMA_ZFLAG_MULTI;
 2298         zone->uz_slab = zone_fetch_slab_multi;
 2299 
 2300 out:
 2301         zone_unlock_pair(zone, master);
 2302         if (klink != NULL)
 2303                 free(klink, M_TEMP);
 2304 
 2305         return (error);
 2306 }
 2307 
 2308 
 2309 /* See uma.h */
 2310 void
 2311 uma_zdestroy(uma_zone_t zone)
 2312 {
 2313 
 2314         sx_slock(&uma_drain_lock);
 2315         zone_free_item(zones, zone, NULL, SKIP_NONE);
 2316         sx_sunlock(&uma_drain_lock);
 2317 }
 2318 
 2319 void
 2320 uma_zwait(uma_zone_t zone)
 2321 {
 2322         void *item;
 2323 
 2324         item = uma_zalloc_arg(zone, NULL, M_WAITOK);
 2325         uma_zfree(zone, item);
 2326 }
 2327 
 2328 void *
 2329 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
 2330 {
 2331         void *item;
 2332 #ifdef SMP
 2333         int i;
 2334 
 2335         MPASS(zone->uz_flags & UMA_ZONE_PCPU);
 2336 #endif
 2337         item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
 2338         if (item != NULL && (flags & M_ZERO)) {
 2339 #ifdef SMP
 2340                 for (i = 0; i <= mp_maxid; i++)
 2341                         bzero(zpcpu_get_cpu(item, i), zone->uz_size);
 2342 #else
 2343                 bzero(item, zone->uz_size);
 2344 #endif
 2345         }
 2346         return (item);
 2347 }
 2348 
 2349 /*
 2350  * A stub while both regular and pcpu cases are identical.
 2351  */
 2352 void
 2353 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
 2354 {
 2355 
 2356 #ifdef SMP
 2357         MPASS(zone->uz_flags & UMA_ZONE_PCPU);
 2358 #endif
 2359         uma_zfree_arg(zone, item, udata);
 2360 }
 2361 
 2362 /* See uma.h */
 2363 void *
 2364 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
 2365 {
 2366         uma_zone_domain_t zdom;
 2367         uma_bucket_t bucket;
 2368         uma_cache_t cache;
 2369         void *item;
 2370         int cpu, domain, lockfail;
 2371 #ifdef INVARIANTS
 2372         bool skipdbg;
 2373 #endif
 2374 
 2375         /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
 2376         random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
 2377 
 2378         /* This is the fast path allocation */
 2379         CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
 2380             curthread, zone->uz_name, zone, flags);
 2381 
 2382         if (flags & M_WAITOK) {
 2383                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
 2384                     "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
 2385         }
 2386         KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
 2387         KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
 2388             ("uma_zalloc_arg: called with spinlock or critical section held"));
 2389         if (zone->uz_flags & UMA_ZONE_PCPU)
 2390                 KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
 2391                     "with M_ZERO passed"));
 2392 
 2393 #ifdef DEBUG_MEMGUARD
 2394         if (memguard_cmp_zone(zone)) {
 2395                 item = memguard_alloc(zone->uz_size, flags);
 2396                 if (item != NULL) {
 2397                         if (zone->uz_init != NULL &&
 2398                             zone->uz_init(item, zone->uz_size, flags) != 0)
 2399                                 return (NULL);
 2400                         if (zone->uz_ctor != NULL &&
 2401                             zone->uz_ctor(item, zone->uz_size, udata,
 2402                             flags) != 0) {
 2403                                 zone->uz_fini(item, zone->uz_size);
 2404                                 return (NULL);
 2405                         }
 2406                         return (item);
 2407                 }
 2408                 /* This is unfortunate but should not be fatal. */
 2409         }
 2410 #endif
 2411         /*
 2412          * If possible, allocate from the per-CPU cache.  There are two
 2413          * requirements for safe access to the per-CPU cache: (1) the thread
 2414          * accessing the cache must not be preempted or yield during access,
 2415          * and (2) the thread must not migrate CPUs without switching which
 2416          * cache it accesses.  We rely on a critical section to prevent
 2417          * preemption and migration.  We release the critical section in
 2418          * order to acquire the zone mutex if we are unable to allocate from
 2419          * the current cache; when we re-acquire the critical section, we
 2420          * must detect and handle migration if it has occurred.
 2421          */
 2422 zalloc_restart:
 2423         critical_enter();
 2424         cpu = curcpu;
 2425         cache = &zone->uz_cpu[cpu];
 2426 
 2427 zalloc_start:
 2428         bucket = cache->uc_allocbucket;
 2429         if (bucket != NULL && bucket->ub_cnt > 0) {
 2430                 bucket->ub_cnt--;
 2431                 item = bucket->ub_bucket[bucket->ub_cnt];
 2432 #ifdef INVARIANTS
 2433                 bucket->ub_bucket[bucket->ub_cnt] = NULL;
 2434 #endif
 2435                 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
 2436                 cache->uc_allocs++;
 2437                 critical_exit();
 2438 #ifdef INVARIANTS
 2439                 skipdbg = uma_dbg_zskip(zone, item);
 2440 #endif
 2441                 if (zone->uz_ctor != NULL &&
 2442 #ifdef INVARIANTS
 2443                     (!skipdbg || zone->uz_ctor != trash_ctor ||
 2444                     zone->uz_dtor != trash_dtor) &&
 2445 #endif
 2446                     zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
 2447                         atomic_add_long(&zone->uz_fails, 1);
 2448                         zone_free_item(zone, item, udata, SKIP_DTOR);
 2449                         return (NULL);
 2450                 }
 2451 #ifdef INVARIANTS
 2452                 if (!skipdbg)
 2453                         uma_dbg_alloc(zone, NULL, item);
 2454 #endif
 2455                 if (flags & M_ZERO)
 2456                         uma_zero_item(item, zone);
 2457                 return (item);
 2458         }
 2459 
 2460         /*
 2461          * We have run out of items in our alloc bucket.
 2462          * See if we can switch with our free bucket.
 2463          */
 2464         bucket = cache->uc_freebucket;
 2465         if (bucket != NULL && bucket->ub_cnt > 0) {
 2466                 CTR2(KTR_UMA,
 2467                     "uma_zalloc: zone %s(%p) swapping empty with alloc",
 2468                     zone->uz_name, zone);
 2469                 cache->uc_freebucket = cache->uc_allocbucket;
 2470                 cache->uc_allocbucket = bucket;
 2471                 goto zalloc_start;
 2472         }
 2473 
 2474         /*
 2475          * Discard any empty allocation bucket while we hold no locks.
 2476          */
 2477         bucket = cache->uc_allocbucket;
 2478         cache->uc_allocbucket = NULL;
 2479         critical_exit();
 2480         if (bucket != NULL)
 2481                 bucket_free(zone, bucket, udata);
 2482 
 2483         if (zone->uz_flags & UMA_ZONE_NUMA) {
 2484                 domain = PCPU_GET(domain);
 2485                 if (VM_DOMAIN_EMPTY(domain))
 2486                         domain = UMA_ANYDOMAIN;
 2487         } else
 2488                 domain = UMA_ANYDOMAIN;
 2489 
 2490         /* Short-circuit for zones without buckets and low memory. */
 2491         if (zone->uz_count == 0 || bucketdisable)
 2492                 goto zalloc_item;
 2493 
 2494         /*
 2495          * Attempt to retrieve the item from the per-CPU cache has failed, so
 2496          * we must go back to the zone.  This requires the zone lock, so we
 2497          * must drop the critical section, then re-acquire it when we go back
 2498          * to the cache.  Since the critical section is released, we may be
 2499          * preempted or migrate.  As such, make sure not to maintain any
 2500          * thread-local state specific to the cache from prior to releasing
 2501          * the critical section.
 2502          */
 2503         lockfail = 0;
 2504         if (ZONE_TRYLOCK(zone) == 0) {
 2505                 /* Record contention to size the buckets. */
 2506                 ZONE_LOCK(zone);
 2507                 lockfail = 1;
 2508         }
 2509         critical_enter();
 2510         cpu = curcpu;
 2511         cache = &zone->uz_cpu[cpu];
 2512 
 2513         /* See if we lost the race to fill the cache. */
 2514         if (cache->uc_allocbucket != NULL) {
 2515                 ZONE_UNLOCK(zone);
 2516                 goto zalloc_start;
 2517         }
 2518 
 2519         /*
 2520          * Check the zone's cache of buckets.
 2521          */
 2522         if (domain == UMA_ANYDOMAIN)
 2523                 zdom = &zone->uz_domain[0];
 2524         else
 2525                 zdom = &zone->uz_domain[domain];
 2526         if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
 2527                 KASSERT(bucket->ub_cnt != 0,
 2528                     ("uma_zalloc_arg: Returning an empty bucket."));
 2529 
 2530                 LIST_REMOVE(bucket, ub_link);
 2531                 cache->uc_allocbucket = bucket;
 2532                 ZONE_UNLOCK(zone);
 2533                 goto zalloc_start;
 2534         }
 2535         /* We are no longer associated with this CPU. */
 2536         critical_exit();
 2537 
 2538         /*
 2539          * We bump the uz count when the cache size is insufficient to
 2540          * handle the working set.
 2541          */
 2542         if (lockfail && zone->uz_count < BUCKET_MAX)
 2543                 zone->uz_count++;
 2544         ZONE_UNLOCK(zone);
 2545 
 2546         /*
 2547          * Now lets just fill a bucket and put it on the free list.  If that
 2548          * works we'll restart the allocation from the beginning and it
 2549          * will use the just filled bucket.
 2550          */
 2551         bucket = zone_alloc_bucket(zone, udata, domain, flags);
 2552         CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
 2553             zone->uz_name, zone, bucket);
 2554         if (bucket != NULL) {
 2555                 ZONE_LOCK(zone);
 2556                 critical_enter();
 2557                 cpu = curcpu;
 2558                 cache = &zone->uz_cpu[cpu];
 2559                 /*
 2560                  * See if we lost the race or were migrated.  Cache the
 2561                  * initialized bucket to make this less likely or claim
 2562                  * the memory directly.
 2563                  */
 2564                 if (cache->uc_allocbucket == NULL &&
 2565                     ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
 2566                     domain == PCPU_GET(domain))) {
 2567                         cache->uc_allocbucket = bucket;
 2568                 } else if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) {
 2569                         critical_exit();
 2570                         ZONE_UNLOCK(zone);
 2571                         bucket_drain(zone, bucket);
 2572                         bucket_free(zone, bucket, udata);
 2573                         goto zalloc_restart;
 2574                 } else
 2575                         LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
 2576                 ZONE_UNLOCK(zone);
 2577                 goto zalloc_start;
 2578         }
 2579 
 2580         /*
 2581          * We may not be able to get a bucket so return an actual item.
 2582          */
 2583 zalloc_item:
 2584         item = zone_alloc_item(zone, udata, domain, flags);
 2585 
 2586         return (item);
 2587 }
 2588 
 2589 void *
 2590 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
 2591 {
 2592 
 2593         /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
 2594         random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
 2595 
 2596         /* This is the fast path allocation */
 2597         CTR5(KTR_UMA,
 2598             "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
 2599             curthread, zone->uz_name, zone, domain, flags);
 2600 
 2601         if (flags & M_WAITOK) {
 2602                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
 2603                     "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
 2604         }
 2605         KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
 2606             ("uma_zalloc_domain: called with spinlock or critical section held"));
 2607 
 2608         return (zone_alloc_item(zone, udata, domain, flags));
 2609 }
 2610 
 2611 /*
 2612  * Find a slab with some space.  Prefer slabs that are partially used over those
 2613  * that are totally full.  This helps to reduce fragmentation.
 2614  *
 2615  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
 2616  * only 'domain'.
 2617  */
 2618 static uma_slab_t
 2619 keg_first_slab(uma_keg_t keg, int domain, bool rr)
 2620 {
 2621         uma_domain_t dom;
 2622         uma_slab_t slab;
 2623         int start;
 2624 
 2625         KASSERT(domain >= 0 && domain < vm_ndomains,
 2626             ("keg_first_slab: domain %d out of range", domain));
 2627 
 2628         slab = NULL;
 2629         start = domain;
 2630         do {
 2631                 dom = &keg->uk_domain[domain];
 2632                 if (!LIST_EMPTY(&dom->ud_part_slab))
 2633                         return (LIST_FIRST(&dom->ud_part_slab));
 2634                 if (!LIST_EMPTY(&dom->ud_free_slab)) {
 2635                         slab = LIST_FIRST(&dom->ud_free_slab);
 2636                         LIST_REMOVE(slab, us_link);
 2637                         LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
 2638                         return (slab);
 2639                 }
 2640                 if (rr)
 2641                         domain = (domain + 1) % vm_ndomains;
 2642         } while (domain != start);
 2643 
 2644         return (NULL);
 2645 }
 2646 
 2647 static uma_slab_t
 2648 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
 2649 {
 2650         uint32_t reserve;
 2651 
 2652         mtx_assert(&keg->uk_lock, MA_OWNED);
 2653 
 2654         reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
 2655         if (keg->uk_free <= reserve)
 2656                 return (NULL);
 2657         return (keg_first_slab(keg, domain, rr));
 2658 }
 2659 
 2660 static uma_slab_t
 2661 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
 2662 {
 2663         struct vm_domainset_iter di;
 2664         uma_domain_t dom;
 2665         uma_slab_t slab;
 2666         int aflags, domain;
 2667         bool rr;
 2668 
 2669 restart:
 2670         mtx_assert(&keg->uk_lock, MA_OWNED);
 2671 
 2672         /*
 2673          * Use the keg's policy if upper layers haven't already specified a
 2674          * domain (as happens with first-touch zones).
 2675          *
 2676          * To avoid races we run the iterator with the keg lock held, but that
 2677          * means that we cannot allow the vm_domainset layer to sleep.  Thus,
 2678          * clear M_WAITOK and handle low memory conditions locally.
 2679          */
 2680         rr = rdomain == UMA_ANYDOMAIN;
 2681         if (rr) {
 2682                 aflags = (flags & ~M_WAITOK) | M_NOWAIT;
 2683                 vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
 2684                     &aflags);
 2685         } else {
 2686                 aflags = flags;
 2687                 domain = rdomain;
 2688         }
 2689 
 2690         for (;;) {
 2691                 slab = keg_fetch_free_slab(keg, domain, rr, flags);
 2692                 if (slab != NULL) {
 2693                         MPASS(slab->us_keg == keg);
 2694                         return (slab);
 2695                 }
 2696 
 2697                 /*
 2698                  * M_NOVM means don't ask at all!
 2699                  */
 2700                 if (flags & M_NOVM)
 2701                         break;
 2702 
 2703                 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
 2704                         keg->uk_flags |= UMA_ZFLAG_FULL;
 2705                         /*
 2706                          * If this is not a multi-zone, set the FULL bit.
 2707                          * Otherwise slab_multi() takes care of it.
 2708                          */
 2709                         if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
 2710                                 zone->uz_flags |= UMA_ZFLAG_FULL;
 2711                                 zone_log_warning(zone);
 2712                                 zone_maxaction(zone);
 2713                         }
 2714                         if (flags & M_NOWAIT)
 2715                                 return (NULL);
 2716                         zone->uz_sleeps++;
 2717                         msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
 2718                         continue;
 2719                 }
 2720                 slab = keg_alloc_slab(keg, zone, domain, aflags);
 2721                 /*
 2722                  * If we got a slab here it's safe to mark it partially used
 2723                  * and return.  We assume that the caller is going to remove
 2724                  * at least one item.
 2725                  */
 2726                 if (slab) {
 2727                         MPASS(slab->us_keg == keg);
 2728                         dom = &keg->uk_domain[slab->us_domain];
 2729                         LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
 2730                         return (slab);
 2731                 }
 2732                 KEG_LOCK(keg);
 2733                 if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
 2734                         if ((flags & M_WAITOK) != 0) {
 2735                                 KEG_UNLOCK(keg);
 2736                                 vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
 2737                                 KEG_LOCK(keg);
 2738                                 goto restart;
 2739                         }
 2740                         break;
 2741                 }
 2742         }
 2743 
 2744         /*
 2745          * We might not have been able to get a slab but another cpu
 2746          * could have while we were unlocked.  Check again before we
 2747          * fail.
 2748          */
 2749         if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
 2750                 MPASS(slab->us_keg == keg);
 2751                 return (slab);
 2752         }
 2753         return (NULL);
 2754 }
 2755 
 2756 static uma_slab_t
 2757 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
 2758 {
 2759         uma_slab_t slab;
 2760 
 2761         if (keg == NULL) {
 2762                 keg = zone_first_keg(zone);
 2763                 KEG_LOCK(keg);
 2764         }
 2765 
 2766         for (;;) {
 2767                 slab = keg_fetch_slab(keg, zone, domain, flags);
 2768                 if (slab)
 2769                         return (slab);
 2770                 if (flags & (M_NOWAIT | M_NOVM))
 2771                         break;
 2772         }
 2773         KEG_UNLOCK(keg);
 2774         return (NULL);
 2775 }
 2776 
 2777 /*
 2778  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
 2779  * with the keg locked.  On NULL no lock is held.
 2780  *
 2781  * The last pointer is used to seed the search.  It is not required.
 2782  */
 2783 static uma_slab_t
 2784 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags)
 2785 {
 2786         uma_klink_t klink;
 2787         uma_slab_t slab;
 2788         uma_keg_t keg;
 2789         int flags;
 2790         int empty;
 2791         int full;
 2792 
 2793         /*
 2794          * Don't wait on the first pass.  This will skip limit tests
 2795          * as well.  We don't want to block if we can find a provider
 2796          * without blocking.
 2797          */
 2798         flags = (rflags & ~M_WAITOK) | M_NOWAIT;
 2799         /*
 2800          * Use the last slab allocated as a hint for where to start
 2801          * the search.
 2802          */
 2803         if (last != NULL) {
 2804                 slab = keg_fetch_slab(last, zone, domain, flags);
 2805                 if (slab)
 2806                         return (slab);
 2807                 KEG_UNLOCK(last);
 2808         }
 2809         /*
 2810          * Loop until we have a slab incase of transient failures
 2811          * while M_WAITOK is specified.  I'm not sure this is 100%
 2812          * required but we've done it for so long now.
 2813          */
 2814         for (;;) {
 2815                 empty = 0;
 2816                 full = 0;
 2817                 /*
 2818                  * Search the available kegs for slabs.  Be careful to hold the
 2819                  * correct lock while calling into the keg layer.
 2820                  */
 2821                 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
 2822                         keg = klink->kl_keg;
 2823                         KEG_LOCK(keg);
 2824                         if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
 2825                                 slab = keg_fetch_slab(keg, zone, domain, flags);
 2826                                 if (slab)
 2827                                         return (slab);
 2828                         }
 2829                         if (keg->uk_flags & UMA_ZFLAG_FULL)
 2830                                 full++;
 2831                         else
 2832                                 empty++;
 2833                         KEG_UNLOCK(keg);
 2834                 }
 2835                 if (rflags & (M_NOWAIT | M_NOVM))
 2836                         break;
 2837                 flags = rflags;
 2838                 /*
 2839                  * All kegs are full.  XXX We can't atomically check all kegs
 2840                  * and sleep so just sleep for a short period and retry.
 2841                  */
 2842                 if (full && !empty) {
 2843                         ZONE_LOCK(zone);
 2844                         zone->uz_flags |= UMA_ZFLAG_FULL;
 2845                         zone->uz_sleeps++;
 2846                         zone_log_warning(zone);
 2847                         zone_maxaction(zone);
 2848                         msleep(zone, zone->uz_lockptr, PVM,
 2849                             "zonelimit", hz/100);
 2850                         zone->uz_flags &= ~UMA_ZFLAG_FULL;
 2851                         ZONE_UNLOCK(zone);
 2852                         continue;
 2853                 }
 2854         }
 2855         return (NULL);
 2856 }
 2857 
 2858 static void *
 2859 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
 2860 {
 2861         uma_domain_t dom;
 2862         void *item;
 2863         uint8_t freei;
 2864 
 2865         MPASS(keg == slab->us_keg);
 2866         mtx_assert(&keg->uk_lock, MA_OWNED);
 2867 
 2868         freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
 2869         BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
 2870         item = slab->us_data + (keg->uk_rsize * freei);
 2871         slab->us_freecount--;
 2872         keg->uk_free--;
 2873 
 2874         /* Move this slab to the full list */
 2875         if (slab->us_freecount == 0) {
 2876                 LIST_REMOVE(slab, us_link);
 2877                 dom = &keg->uk_domain[slab->us_domain];
 2878                 LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
 2879         }
 2880 
 2881         return (item);
 2882 }
 2883 
 2884 static int
 2885 zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
 2886 {
 2887         uma_slab_t slab;
 2888         uma_keg_t keg;
 2889 #ifdef NUMA
 2890         int stripe;
 2891 #endif
 2892         int i;
 2893 
 2894         slab = NULL;
 2895         keg = NULL;
 2896         /* Try to keep the buckets totally full */
 2897         for (i = 0; i < max; ) {
 2898                 if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL)
 2899                         break;
 2900                 keg = slab->us_keg;
 2901 #ifdef NUMA
 2902                 stripe = howmany(max, vm_ndomains);
 2903 #endif
 2904                 while (slab->us_freecount && i < max) { 
 2905                         bucket[i++] = slab_alloc_item(keg, slab);
 2906                         if (keg->uk_free <= keg->uk_reserve)
 2907                                 break;
 2908 #ifdef NUMA
 2909                         /*
 2910                          * If the zone is striped we pick a new slab for every
 2911                          * N allocations.  Eliminating this conditional will
 2912                          * instead pick a new domain for each bucket rather
 2913                          * than stripe within each bucket.  The current option
 2914                          * produces more fragmentation and requires more cpu
 2915                          * time but yields better distribution.
 2916                          */
 2917                         if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
 2918                             vm_ndomains > 1 && --stripe == 0)
 2919                                 break;
 2920 #endif
 2921                 }
 2922                 /* Don't block if we allocated any successfully. */
 2923                 flags &= ~M_WAITOK;
 2924                 flags |= M_NOWAIT;
 2925         }
 2926         if (slab != NULL)
 2927                 KEG_UNLOCK(keg);
 2928 
 2929         return i;
 2930 }
 2931 
 2932 static uma_bucket_t
 2933 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
 2934 {
 2935         uma_bucket_t bucket;
 2936         int max;
 2937 
 2938         CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
 2939 
 2940         /* Don't wait for buckets, preserve caller's NOVM setting. */
 2941         bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
 2942         if (bucket == NULL)
 2943                 return (NULL);
 2944 
 2945         max = MIN(bucket->ub_entries, zone->uz_count);
 2946         bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
 2947             max, domain, flags);
 2948 
 2949         /*
 2950          * Initialize the memory if necessary.
 2951          */
 2952         if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
 2953                 int i;
 2954 
 2955                 for (i = 0; i < bucket->ub_cnt; i++)
 2956                         if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
 2957                             flags) != 0)
 2958                                 break;
 2959                 /*
 2960                  * If we couldn't initialize the whole bucket, put the
 2961                  * rest back onto the freelist.
 2962                  */
 2963                 if (i != bucket->ub_cnt) {
 2964                         zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
 2965                             bucket->ub_cnt - i);
 2966 #ifdef INVARIANTS
 2967                         bzero(&bucket->ub_bucket[i],
 2968                             sizeof(void *) * (bucket->ub_cnt - i));
 2969 #endif
 2970                         bucket->ub_cnt = i;
 2971                 }
 2972         }
 2973 
 2974         if (bucket->ub_cnt == 0) {
 2975                 bucket_free(zone, bucket, udata);
 2976                 atomic_add_long(&zone->uz_fails, 1);
 2977                 return (NULL);
 2978         }
 2979 
 2980         return (bucket);
 2981 }
 2982 
 2983 /*
 2984  * Allocates a single item from a zone.
 2985  *
 2986  * Arguments
 2987  *      zone   The zone to alloc for.
 2988  *      udata  The data to be passed to the constructor.
 2989  *      domain The domain to allocate from or UMA_ANYDOMAIN.
 2990  *      flags  M_WAITOK, M_NOWAIT, M_ZERO.
 2991  *
 2992  * Returns
 2993  *      NULL if there is no memory and M_NOWAIT is set
 2994  *      An item if successful
 2995  */
 2996 
 2997 static void *
 2998 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
 2999 {
 3000         void *item;
 3001 #ifdef INVARIANTS
 3002         bool skipdbg;
 3003 #endif
 3004 
 3005         item = NULL;
 3006 
 3007         if (domain != UMA_ANYDOMAIN) {
 3008                 /* avoid allocs targeting empty domains */
 3009                 if (VM_DOMAIN_EMPTY(domain))
 3010                         domain = UMA_ANYDOMAIN;
 3011         }
 3012         if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
 3013                 goto fail;
 3014         atomic_add_long(&zone->uz_allocs, 1);
 3015 
 3016 #ifdef INVARIANTS
 3017         skipdbg = uma_dbg_zskip(zone, item);
 3018 #endif
 3019         /*
 3020          * We have to call both the zone's init (not the keg's init)
 3021          * and the zone's ctor.  This is because the item is going from
 3022          * a keg slab directly to the user, and the user is expecting it
 3023          * to be both zone-init'd as well as zone-ctor'd.
 3024          */
 3025         if (zone->uz_init != NULL) {
 3026                 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
 3027                         zone_free_item(zone, item, udata, SKIP_FINI);
 3028                         goto fail;
 3029                 }
 3030         }
 3031         if (zone->uz_ctor != NULL &&
 3032 #ifdef INVARIANTS
 3033             (!skipdbg || zone->uz_ctor != trash_ctor ||
 3034             zone->uz_dtor != trash_dtor) &&
 3035 #endif
 3036             zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
 3037                 zone_free_item(zone, item, udata, SKIP_DTOR);
 3038                 goto fail;
 3039         }
 3040 #ifdef INVARIANTS
 3041         if (!skipdbg)
 3042                 uma_dbg_alloc(zone, NULL, item);
 3043 #endif
 3044         if (flags & M_ZERO)
 3045                 uma_zero_item(item, zone);
 3046 
 3047         CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
 3048             zone->uz_name, zone);
 3049 
 3050         return (item);
 3051 
 3052 fail:
 3053         CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
 3054             zone->uz_name, zone);
 3055         atomic_add_long(&zone->uz_fails, 1);
 3056         return (NULL);
 3057 }
 3058 
 3059 /* See uma.h */
 3060 void
 3061 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
 3062 {
 3063         uma_cache_t cache;
 3064         uma_bucket_t bucket;
 3065         uma_zone_domain_t zdom;
 3066         int cpu, domain, lockfail;
 3067 #ifdef INVARIANTS
 3068         bool skipdbg;
 3069 #endif
 3070 
 3071         /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
 3072         random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
 3073 
 3074         CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
 3075             zone->uz_name);
 3076 
 3077         KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
 3078             ("uma_zfree_arg: called with spinlock or critical section held"));
 3079 
 3080         /* uma_zfree(..., NULL) does nothing, to match free(9). */
 3081         if (item == NULL)
 3082                 return;
 3083 #ifdef DEBUG_MEMGUARD
 3084         if (is_memguard_addr(item)) {
 3085                 if (zone->uz_dtor != NULL)
 3086                         zone->uz_dtor(item, zone->uz_size, udata);
 3087                 if (zone->uz_fini != NULL)
 3088                         zone->uz_fini(item, zone->uz_size);
 3089                 memguard_free(item);
 3090                 return;
 3091         }
 3092 #endif
 3093 #ifdef INVARIANTS
 3094         skipdbg = uma_dbg_zskip(zone, item);
 3095         if (skipdbg == false) {
 3096                 if (zone->uz_flags & UMA_ZONE_MALLOC)
 3097                         uma_dbg_free(zone, udata, item);
 3098                 else
 3099                         uma_dbg_free(zone, NULL, item);
 3100         }
 3101         if (zone->uz_dtor != NULL && (!skipdbg ||
 3102             zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor))
 3103 #else
 3104         if (zone->uz_dtor != NULL)
 3105 #endif
 3106                 zone->uz_dtor(item, zone->uz_size, udata);
 3107 
 3108         /*
 3109          * The race here is acceptable.  If we miss it we'll just have to wait
 3110          * a little longer for the limits to be reset.
 3111          */
 3112         if (zone->uz_flags & UMA_ZFLAG_FULL)
 3113                 goto zfree_item;
 3114 
 3115         /*
 3116          * If possible, free to the per-CPU cache.  There are two
 3117          * requirements for safe access to the per-CPU cache: (1) the thread
 3118          * accessing the cache must not be preempted or yield during access,
 3119          * and (2) the thread must not migrate CPUs without switching which
 3120          * cache it accesses.  We rely on a critical section to prevent
 3121          * preemption and migration.  We release the critical section in
 3122          * order to acquire the zone mutex if we are unable to free to the
 3123          * current cache; when we re-acquire the critical section, we must
 3124          * detect and handle migration if it has occurred.
 3125          */
 3126 zfree_restart:
 3127         critical_enter();
 3128         cpu = curcpu;
 3129         cache = &zone->uz_cpu[cpu];
 3130 
 3131 zfree_start:
 3132         /*
 3133          * Try to free into the allocbucket first to give LIFO ordering
 3134          * for cache-hot datastructures.  Spill over into the freebucket
 3135          * if necessary.  Alloc will swap them if one runs dry.
 3136          */
 3137         bucket = cache->uc_allocbucket;
 3138         if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
 3139                 bucket = cache->uc_freebucket;
 3140         if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
 3141                 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
 3142                     ("uma_zfree: Freeing to non free bucket index."));
 3143                 bucket->ub_bucket[bucket->ub_cnt] = item;
 3144                 bucket->ub_cnt++;
 3145                 cache->uc_frees++;
 3146                 critical_exit();
 3147                 return;
 3148         }
 3149 
 3150         /*
 3151          * We must go back the zone, which requires acquiring the zone lock,
 3152          * which in turn means we must release and re-acquire the critical
 3153          * section.  Since the critical section is released, we may be
 3154          * preempted or migrate.  As such, make sure not to maintain any
 3155          * thread-local state specific to the cache from prior to releasing
 3156          * the critical section.
 3157          */
 3158         critical_exit();
 3159         if (zone->uz_count == 0 || bucketdisable)
 3160                 goto zfree_item;
 3161 
 3162         lockfail = 0;
 3163         if (ZONE_TRYLOCK(zone) == 0) {
 3164                 /* Record contention to size the buckets. */
 3165                 ZONE_LOCK(zone);
 3166                 lockfail = 1;
 3167         }
 3168         critical_enter();
 3169         cpu = curcpu;
 3170         cache = &zone->uz_cpu[cpu];
 3171 
 3172         bucket = cache->uc_freebucket;
 3173         if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
 3174                 ZONE_UNLOCK(zone);
 3175                 goto zfree_start;
 3176         }
 3177         cache->uc_freebucket = NULL;
 3178         /* We are no longer associated with this CPU. */
 3179         critical_exit();
 3180 
 3181         if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
 3182                 domain = PCPU_GET(domain);
 3183                 if (VM_DOMAIN_EMPTY(domain))
 3184                         domain = UMA_ANYDOMAIN;
 3185         } else
 3186                 domain = 0;
 3187         zdom = &zone->uz_domain[0];
 3188 
 3189         /* Can we throw this on the zone full list? */
 3190         if (bucket != NULL) {
 3191                 CTR3(KTR_UMA,
 3192                     "uma_zfree: zone %s(%p) putting bucket %p on free list",
 3193                     zone->uz_name, zone, bucket);
 3194                 /* ub_cnt is pointing to the last free item */
 3195                 KASSERT(bucket->ub_cnt != 0,
 3196                     ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
 3197                 if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) {
 3198                         ZONE_UNLOCK(zone);
 3199                         bucket_drain(zone, bucket);
 3200                         bucket_free(zone, bucket, udata);
 3201                         goto zfree_restart;
 3202                 } else
 3203                         LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
 3204         }
 3205 
 3206         /*
 3207          * We bump the uz count when the cache size is insufficient to
 3208          * handle the working set.
 3209          */
 3210         if (lockfail && zone->uz_count < BUCKET_MAX)
 3211                 zone->uz_count++;
 3212         ZONE_UNLOCK(zone);
 3213 
 3214         bucket = bucket_alloc(zone, udata, M_NOWAIT);
 3215         CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
 3216             zone->uz_name, zone, bucket);
 3217         if (bucket) {
 3218                 critical_enter();
 3219                 cpu = curcpu;
 3220                 cache = &zone->uz_cpu[cpu];
 3221                 if (cache->uc_freebucket == NULL &&
 3222                     ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
 3223                     domain == PCPU_GET(domain))) {
 3224                         cache->uc_freebucket = bucket;
 3225                         goto zfree_start;
 3226                 }
 3227                 /*
 3228                  * We lost the race, start over.  We have to drop our
 3229                  * critical section to free the bucket.
 3230                  */
 3231                 critical_exit();
 3232                 bucket_free(zone, bucket, udata);
 3233                 goto zfree_restart;
 3234         }
 3235 
 3236         /*
 3237          * If nothing else caught this, we'll just do an internal free.
 3238          */
 3239 zfree_item:
 3240         zone_free_item(zone, item, udata, SKIP_DTOR);
 3241 
 3242         return;
 3243 }
 3244 
 3245 void
 3246 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
 3247 {
 3248 
 3249         /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
 3250         random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
 3251 
 3252         CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
 3253             zone->uz_name);
 3254 
 3255         KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
 3256             ("uma_zfree_domain: called with spinlock or critical section held"));
 3257 
 3258         /* uma_zfree(..., NULL) does nothing, to match free(9). */
 3259         if (item == NULL)
 3260                 return;
 3261         zone_free_item(zone, item, udata, SKIP_NONE);
 3262 }
 3263 
 3264 static void
 3265 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
 3266 {
 3267         uma_domain_t dom;
 3268         uint8_t freei;
 3269 
 3270         mtx_assert(&keg->uk_lock, MA_OWNED);
 3271         MPASS(keg == slab->us_keg);
 3272 
 3273         dom = &keg->uk_domain[slab->us_domain];
 3274 
 3275         /* Do we need to remove from any lists? */
 3276         if (slab->us_freecount+1 == keg->uk_ipers) {
 3277                 LIST_REMOVE(slab, us_link);
 3278                 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
 3279         } else if (slab->us_freecount == 0) {
 3280                 LIST_REMOVE(slab, us_link);
 3281                 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
 3282         }
 3283 
 3284         /* Slab management. */
 3285         freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
 3286         BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
 3287         slab->us_freecount++;
 3288 
 3289         /* Keg statistics. */
 3290         keg->uk_free++;
 3291 }
 3292 
 3293 static void
 3294 zone_release(uma_zone_t zone, void **bucket, int cnt)
 3295 {
 3296         void *item;
 3297         uma_slab_t slab;
 3298         uma_keg_t keg;
 3299         uint8_t *mem;
 3300         int clearfull;
 3301         int i;
 3302 
 3303         clearfull = 0;
 3304         keg = zone_first_keg(zone);
 3305         KEG_LOCK(keg);
 3306         for (i = 0; i < cnt; i++) {
 3307                 item = bucket[i];
 3308                 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
 3309                         mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
 3310                         if (zone->uz_flags & UMA_ZONE_HASH) {
 3311                                 slab = hash_sfind(&keg->uk_hash, mem);
 3312                         } else {
 3313                                 mem += keg->uk_pgoff;
 3314                                 slab = (uma_slab_t)mem;
 3315                         }
 3316                 } else {
 3317                         slab = vtoslab((vm_offset_t)item);
 3318                         if (slab->us_keg != keg) {
 3319                                 KEG_UNLOCK(keg);
 3320                                 keg = slab->us_keg;
 3321                                 KEG_LOCK(keg);
 3322                         }
 3323                 }
 3324                 slab_free_item(keg, slab, item);
 3325                 if (keg->uk_flags & UMA_ZFLAG_FULL) {
 3326                         if (keg->uk_pages < keg->uk_maxpages) {
 3327                                 keg->uk_flags &= ~UMA_ZFLAG_FULL;
 3328                                 clearfull = 1;
 3329                         }
 3330 
 3331                         /* 
 3332                          * We can handle one more allocation. Since we're
 3333                          * clearing ZFLAG_FULL, wake up all procs blocked
 3334                          * on pages. This should be uncommon, so keeping this
 3335                          * simple for now (rather than adding count of blocked 
 3336                          * threads etc).
 3337                          */
 3338                         wakeup(keg);
 3339                 }
 3340         }
 3341         KEG_UNLOCK(keg);
 3342         if (clearfull) {
 3343                 ZONE_LOCK(zone);
 3344                 zone->uz_flags &= ~UMA_ZFLAG_FULL;
 3345                 wakeup(zone);
 3346                 ZONE_UNLOCK(zone);
 3347         }
 3348 
 3349 }
 3350 
 3351 /*
 3352  * Frees a single item to any zone.
 3353  *
 3354  * Arguments:
 3355  *      zone   The zone to free to
 3356  *      item   The item we're freeing
 3357  *      udata  User supplied data for the dtor
 3358  *      skip   Skip dtors and finis
 3359  */
 3360 static void
 3361 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
 3362 {
 3363 #ifdef INVARIANTS
 3364         bool skipdbg;
 3365 
 3366         skipdbg = uma_dbg_zskip(zone, item);
 3367         if (skip == SKIP_NONE && !skipdbg) {
 3368                 if (zone->uz_flags & UMA_ZONE_MALLOC)
 3369                         uma_dbg_free(zone, udata, item);
 3370                 else
 3371                         uma_dbg_free(zone, NULL, item);
 3372         }
 3373 
 3374         if (skip < SKIP_DTOR && zone->uz_dtor != NULL &&
 3375             (!skipdbg || zone->uz_dtor != trash_dtor ||
 3376             zone->uz_ctor != trash_ctor))
 3377 #else
 3378         if (skip < SKIP_DTOR && zone->uz_dtor != NULL)
 3379 #endif
 3380                 zone->uz_dtor(item, zone->uz_size, udata);
 3381 
 3382         if (skip < SKIP_FINI && zone->uz_fini)
 3383                 zone->uz_fini(item, zone->uz_size);
 3384 
 3385         atomic_add_long(&zone->uz_frees, 1);
 3386         zone->uz_release(zone->uz_arg, &item, 1);
 3387 }
 3388 
 3389 /* See uma.h */
 3390 int
 3391 uma_zone_set_max(uma_zone_t zone, int nitems)
 3392 {
 3393         uma_keg_t keg;
 3394 
 3395         keg = zone_first_keg(zone);
 3396         if (keg == NULL)
 3397                 return (0);
 3398         KEG_LOCK(keg);
 3399         keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
 3400         if (keg->uk_maxpages * keg->uk_ipers < nitems)
 3401                 keg->uk_maxpages += keg->uk_ppera;
 3402         nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
 3403         KEG_UNLOCK(keg);
 3404 
 3405         return (nitems);
 3406 }
 3407 
 3408 /* See uma.h */
 3409 int
 3410 uma_zone_get_max(uma_zone_t zone)
 3411 {
 3412         int nitems;
 3413         uma_keg_t keg;
 3414 
 3415         keg = zone_first_keg(zone);
 3416         if (keg == NULL)
 3417                 return (0);
 3418         KEG_LOCK(keg);
 3419         nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
 3420         KEG_UNLOCK(keg);
 3421 
 3422         return (nitems);
 3423 }
 3424 
 3425 /* See uma.h */
 3426 void
 3427 uma_zone_set_warning(uma_zone_t zone, const char *warning)
 3428 {
 3429 
 3430         ZONE_LOCK(zone);
 3431         zone->uz_warning = warning;
 3432         ZONE_UNLOCK(zone);
 3433 }
 3434 
 3435 /* See uma.h */
 3436 void
 3437 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
 3438 {
 3439 
 3440         ZONE_LOCK(zone);
 3441         TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
 3442         ZONE_UNLOCK(zone);
 3443 }
 3444 
 3445 /* See uma.h */
 3446 int
 3447 uma_zone_get_cur(uma_zone_t zone)
 3448 {
 3449         int64_t nitems;
 3450         u_int i;
 3451 
 3452         ZONE_LOCK(zone);
 3453         nitems = zone->uz_allocs - zone->uz_frees;
 3454         CPU_FOREACH(i) {
 3455                 /*
 3456                  * See the comment in sysctl_vm_zone_stats() regarding the
 3457                  * safety of accessing the per-cpu caches. With the zone lock
 3458                  * held, it is safe, but can potentially result in stale data.
 3459                  */
 3460                 nitems += zone->uz_cpu[i].uc_allocs -
 3461                     zone->uz_cpu[i].uc_frees;
 3462         }
 3463         ZONE_UNLOCK(zone);
 3464 
 3465         return (nitems < 0 ? 0 : nitems);
 3466 }
 3467 
 3468 /* See uma.h */
 3469 void
 3470 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
 3471 {
 3472         uma_keg_t keg;
 3473 
 3474         keg = zone_first_keg(zone);
 3475         KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
 3476         KEG_LOCK(keg);
 3477         KASSERT(keg->uk_pages == 0,
 3478             ("uma_zone_set_init on non-empty keg"));
 3479         keg->uk_init = uminit;
 3480         KEG_UNLOCK(keg);
 3481 }
 3482 
 3483 /* See uma.h */
 3484 void
 3485 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
 3486 {
 3487         uma_keg_t keg;
 3488 
 3489         keg = zone_first_keg(zone);
 3490         KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
 3491         KEG_LOCK(keg);
 3492         KASSERT(keg->uk_pages == 0,
 3493             ("uma_zone_set_fini on non-empty keg"));
 3494         keg->uk_fini = fini;
 3495         KEG_UNLOCK(keg);
 3496 }
 3497 
 3498 /* See uma.h */
 3499 void
 3500 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
 3501 {
 3502 
 3503         ZONE_LOCK(zone);
 3504         KASSERT(zone_first_keg(zone)->uk_pages == 0,
 3505             ("uma_zone_set_zinit on non-empty keg"));
 3506         zone->uz_init = zinit;
 3507         ZONE_UNLOCK(zone);
 3508 }
 3509 
 3510 /* See uma.h */
 3511 void
 3512 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
 3513 {
 3514 
 3515         ZONE_LOCK(zone);
 3516         KASSERT(zone_first_keg(zone)->uk_pages == 0,
 3517             ("uma_zone_set_zfini on non-empty keg"));
 3518         zone->uz_fini = zfini;
 3519         ZONE_UNLOCK(zone);
 3520 }
 3521 
 3522 /* See uma.h */
 3523 /* XXX uk_freef is not actually used with the zone locked */
 3524 void
 3525 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
 3526 {
 3527         uma_keg_t keg;
 3528 
 3529         keg = zone_first_keg(zone);
 3530         KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
 3531         KEG_LOCK(keg);
 3532         keg->uk_freef = freef;
 3533         KEG_UNLOCK(keg);
 3534 }
 3535 
 3536 /* See uma.h */
 3537 /* XXX uk_allocf is not actually used with the zone locked */
 3538 void
 3539 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
 3540 {
 3541         uma_keg_t keg;
 3542 
 3543         keg = zone_first_keg(zone);
 3544         KEG_LOCK(keg);
 3545         keg->uk_allocf = allocf;
 3546         KEG_UNLOCK(keg);
 3547 }
 3548 
 3549 /* See uma.h */
 3550 void
 3551 uma_zone_reserve(uma_zone_t zone, int items)
 3552 {
 3553         uma_keg_t keg;
 3554 
 3555         keg = zone_first_keg(zone);
 3556         if (keg == NULL)
 3557                 return;
 3558         KEG_LOCK(keg);
 3559         keg->uk_reserve = items;
 3560         KEG_UNLOCK(keg);
 3561 
 3562         return;
 3563 }
 3564 
 3565 /* See uma.h */
 3566 int
 3567 uma_zone_reserve_kva(uma_zone_t zone, int count)
 3568 {
 3569         uma_keg_t keg;
 3570         vm_offset_t kva;
 3571         u_int pages;
 3572 
 3573         keg = zone_first_keg(zone);
 3574         if (keg == NULL)
 3575                 return (0);
 3576         pages = count / keg->uk_ipers;
 3577 
 3578         if (pages * keg->uk_ipers < count)
 3579                 pages++;
 3580         pages *= keg->uk_ppera;
 3581 
 3582 #ifdef UMA_MD_SMALL_ALLOC
 3583         if (keg->uk_ppera > 1) {
 3584 #else
 3585         if (1) {
 3586 #endif
 3587                 kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
 3588                 if (kva == 0)
 3589                         return (0);
 3590         } else
 3591                 kva = 0;
 3592         KEG_LOCK(keg);
 3593         keg->uk_kva = kva;
 3594         keg->uk_offset = 0;
 3595         keg->uk_maxpages = pages;
 3596 #ifdef UMA_MD_SMALL_ALLOC
 3597         keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
 3598 #else
 3599         keg->uk_allocf = noobj_alloc;
 3600 #endif
 3601         keg->uk_flags |= UMA_ZONE_NOFREE;
 3602         KEG_UNLOCK(keg);
 3603 
 3604         return (1);
 3605 }
 3606 
 3607 /* See uma.h */
 3608 void
 3609 uma_prealloc(uma_zone_t zone, int items)
 3610 {
 3611         struct vm_domainset_iter di;
 3612         uma_domain_t dom;
 3613         uma_slab_t slab;
 3614         uma_keg_t keg;
 3615         int domain, flags, slabs;
 3616 
 3617         keg = zone_first_keg(zone);
 3618         if (keg == NULL)
 3619                 return;
 3620         KEG_LOCK(keg);
 3621         slabs = items / keg->uk_ipers;
 3622         if (slabs * keg->uk_ipers < items)
 3623                 slabs++;
 3624         flags = M_WAITOK;
 3625         vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, &flags);
 3626         while (slabs-- > 0) {
 3627                 slab = keg_alloc_slab(keg, zone, domain, flags);
 3628                 if (slab == NULL)
 3629                         return;
 3630                 MPASS(slab->us_keg == keg);
 3631                 dom = &keg->uk_domain[slab->us_domain];
 3632                 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
 3633                 if (vm_domainset_iter_policy(&di, &domain) != 0)
 3634                         break;
 3635         }
 3636         KEG_UNLOCK(keg);
 3637 }
 3638 
 3639 /* See uma.h */
 3640 static void
 3641 uma_reclaim_locked(bool kmem_danger)
 3642 {
 3643 
 3644         CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
 3645         sx_assert(&uma_drain_lock, SA_XLOCKED);
 3646         bucket_enable();
 3647         zone_foreach(zone_drain);
 3648         if (vm_page_count_min() || kmem_danger) {
 3649                 cache_drain_safe(NULL);
 3650                 zone_foreach(zone_drain);
 3651         }
 3652         /*
 3653          * Some slabs may have been freed but this zone will be visited early
 3654          * we visit again so that we can free pages that are empty once other
 3655          * zones are drained.  We have to do the same for buckets.
 3656          */
 3657         zone_drain(slabzone);
 3658         bucket_zone_drain();
 3659 }
 3660 
 3661 void
 3662 uma_reclaim(void)
 3663 {
 3664 
 3665         sx_xlock(&uma_drain_lock);
 3666         uma_reclaim_locked(false);
 3667         sx_xunlock(&uma_drain_lock);
 3668 }
 3669 
 3670 static volatile int uma_reclaim_needed;
 3671 
 3672 void
 3673 uma_reclaim_wakeup(void)
 3674 {
 3675 
 3676         if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
 3677                 wakeup(uma_reclaim);
 3678 }
 3679 
 3680 void
 3681 uma_reclaim_worker(void *arg __unused)
 3682 {
 3683 
 3684         for (;;) {
 3685                 sx_xlock(&uma_drain_lock);
 3686                 while (atomic_load_int(&uma_reclaim_needed) == 0)
 3687                         sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl",
 3688                             hz);
 3689                 sx_xunlock(&uma_drain_lock);
 3690                 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
 3691                 sx_xlock(&uma_drain_lock);
 3692                 uma_reclaim_locked(true);
 3693                 atomic_store_int(&uma_reclaim_needed, 0);
 3694                 sx_xunlock(&uma_drain_lock);
 3695                 /* Don't fire more than once per-second. */
 3696                 pause("umarclslp", hz);
 3697         }
 3698 }
 3699 
 3700 /* See uma.h */
 3701 int
 3702 uma_zone_exhausted(uma_zone_t zone)
 3703 {
 3704         int full;
 3705 
 3706         ZONE_LOCK(zone);
 3707         full = (zone->uz_flags & UMA_ZFLAG_FULL);
 3708         ZONE_UNLOCK(zone);
 3709         return (full);  
 3710 }
 3711 
 3712 int
 3713 uma_zone_exhausted_nolock(uma_zone_t zone)
 3714 {
 3715         return (zone->uz_flags & UMA_ZFLAG_FULL);
 3716 }
 3717 
 3718 void *
 3719 uma_large_malloc_domain(vm_size_t size, int domain, int wait)
 3720 {
 3721         struct domainset *policy;
 3722         vm_offset_t addr;
 3723         uma_slab_t slab;
 3724 
 3725         if (domain != UMA_ANYDOMAIN) {
 3726                 /* avoid allocs targeting empty domains */
 3727                 if (VM_DOMAIN_EMPTY(domain))
 3728                         domain = UMA_ANYDOMAIN;
 3729         }
 3730         slab = zone_alloc_item(slabzone, NULL, domain, wait);
 3731         if (slab == NULL)
 3732                 return (NULL);
 3733         policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() :
 3734             DOMAINSET_FIXED(domain);
 3735         addr = kmem_malloc_domainset(policy, size, wait);
 3736         if (addr != 0) {
 3737                 vsetslab(addr, slab);
 3738                 slab->us_data = (void *)addr;
 3739                 slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
 3740                 slab->us_size = size;
 3741                 slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE(
 3742                     pmap_kextract(addr)));
 3743                 uma_total_inc(size);
 3744         } else {
 3745                 zone_free_item(slabzone, slab, NULL, SKIP_NONE);
 3746         }
 3747 
 3748         return ((void *)addr);
 3749 }
 3750 
 3751 void *
 3752 uma_large_malloc(vm_size_t size, int wait)
 3753 {
 3754 
 3755         return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait);
 3756 }
 3757 
 3758 void
 3759 uma_large_free(uma_slab_t slab)
 3760 {
 3761 
 3762         KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0,
 3763             ("uma_large_free:  Memory not allocated with uma_large_malloc."));
 3764         kmem_free((vm_offset_t)slab->us_data, slab->us_size);
 3765         uma_total_dec(slab->us_size);
 3766         zone_free_item(slabzone, slab, NULL, SKIP_NONE);
 3767 }
 3768 
 3769 static void
 3770 uma_zero_item(void *item, uma_zone_t zone)
 3771 {
 3772 
 3773         bzero(item, zone->uz_size);
 3774 }
 3775 
 3776 unsigned long
 3777 uma_limit(void)
 3778 {
 3779 
 3780         return (uma_kmem_limit);
 3781 }
 3782 
 3783 void
 3784 uma_set_limit(unsigned long limit)
 3785 {
 3786 
 3787         uma_kmem_limit = limit;
 3788 }
 3789 
 3790 unsigned long
 3791 uma_size(void)
 3792 {
 3793 
 3794         return (uma_kmem_total);
 3795 }
 3796 
 3797 long
 3798 uma_avail(void)
 3799 {
 3800 
 3801         return (uma_kmem_limit - uma_kmem_total);
 3802 }
 3803 
 3804 void
 3805 uma_print_stats(void)
 3806 {
 3807         zone_foreach(uma_print_zone);
 3808 }
 3809 
 3810 static void
 3811 slab_print(uma_slab_t slab)
 3812 {
 3813         printf("slab: keg %p, data %p, freecount %d\n",
 3814                 slab->us_keg, slab->us_data, slab->us_freecount);
 3815 }
 3816 
 3817 static void
 3818 cache_print(uma_cache_t cache)
 3819 {
 3820         printf("alloc: %p(%d), free: %p(%d)\n",
 3821                 cache->uc_allocbucket,
 3822                 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
 3823                 cache->uc_freebucket,
 3824                 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
 3825 }
 3826 
 3827 static void
 3828 uma_print_keg(uma_keg_t keg)
 3829 {
 3830         uma_domain_t dom;
 3831         uma_slab_t slab;
 3832         int i;
 3833 
 3834         printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
 3835             "out %d free %d limit %d\n",
 3836             keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
 3837             keg->uk_ipers, keg->uk_ppera,
 3838             (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
 3839             keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
 3840         for (i = 0; i < vm_ndomains; i++) {
 3841                 dom = &keg->uk_domain[i];
 3842                 printf("Part slabs:\n");
 3843                 LIST_FOREACH(slab, &dom->ud_part_slab, us_link)
 3844                         slab_print(slab);
 3845                 printf("Free slabs:\n");
 3846                 LIST_FOREACH(slab, &dom->ud_free_slab, us_link)
 3847                         slab_print(slab);
 3848                 printf("Full slabs:\n");
 3849                 LIST_FOREACH(slab, &dom->ud_full_slab, us_link)
 3850                         slab_print(slab);
 3851         }
 3852 }
 3853 
 3854 void
 3855 uma_print_zone(uma_zone_t zone)
 3856 {
 3857         uma_cache_t cache;
 3858         uma_klink_t kl;
 3859         int i;
 3860 
 3861         printf("zone: %s(%p) size %d flags %#x\n",
 3862             zone->uz_name, zone, zone->uz_size, zone->uz_flags);
 3863         LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
 3864                 uma_print_keg(kl->kl_keg);
 3865         CPU_FOREACH(i) {
 3866                 cache = &zone->uz_cpu[i];
 3867                 printf("CPU %d Cache:\n", i);
 3868                 cache_print(cache);
 3869         }
 3870 }
 3871 
 3872 #ifdef DDB
 3873 /*
 3874  * Generate statistics across both the zone and its per-cpu cache's.  Return
 3875  * desired statistics if the pointer is non-NULL for that statistic.
 3876  *
 3877  * Note: does not update the zone statistics, as it can't safely clear the
 3878  * per-CPU cache statistic.
 3879  *
 3880  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
 3881  * safe from off-CPU; we should modify the caches to track this information
 3882  * directly so that we don't have to.
 3883  */
 3884 static void
 3885 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
 3886     uint64_t *freesp, uint64_t *sleepsp)
 3887 {
 3888         uma_cache_t cache;
 3889         uint64_t allocs, frees, sleeps;
 3890         int cachefree, cpu;
 3891 
 3892         allocs = frees = sleeps = 0;
 3893         cachefree = 0;
 3894         CPU_FOREACH(cpu) {
 3895                 cache = &z->uz_cpu[cpu];
 3896                 if (cache->uc_allocbucket != NULL)
 3897                         cachefree += cache->uc_allocbucket->ub_cnt;
 3898                 if (cache->uc_freebucket != NULL)
 3899                         cachefree += cache->uc_freebucket->ub_cnt;
 3900                 allocs += cache->uc_allocs;
 3901                 frees += cache->uc_frees;
 3902         }
 3903         allocs += z->uz_allocs;
 3904         frees += z->uz_frees;
 3905         sleeps += z->uz_sleeps;
 3906         if (cachefreep != NULL)
 3907                 *cachefreep = cachefree;
 3908         if (allocsp != NULL)
 3909                 *allocsp = allocs;
 3910         if (freesp != NULL)
 3911                 *freesp = frees;
 3912         if (sleepsp != NULL)
 3913                 *sleepsp = sleeps;
 3914 }
 3915 #endif /* DDB */
 3916 
 3917 static int
 3918 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
 3919 {
 3920         uma_keg_t kz;
 3921         uma_zone_t z;
 3922         int count;
 3923 
 3924         count = 0;
 3925         rw_rlock(&uma_rwlock);
 3926         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3927                 LIST_FOREACH(z, &kz->uk_zones, uz_link)
 3928                         count++;
 3929         }
 3930         rw_runlock(&uma_rwlock);
 3931         return (sysctl_handle_int(oidp, &count, 0, req));
 3932 }
 3933 
 3934 static int
 3935 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
 3936 {
 3937         struct uma_stream_header ush;
 3938         struct uma_type_header uth;
 3939         struct uma_percpu_stat *ups;
 3940         uma_bucket_t bucket;
 3941         uma_zone_domain_t zdom;
 3942         struct sbuf sbuf;
 3943         uma_cache_t cache;
 3944         uma_klink_t kl;
 3945         uma_keg_t kz;
 3946         uma_zone_t z;
 3947         uma_keg_t k;
 3948         int count, error, i;
 3949 
 3950         error = sysctl_wire_old_buffer(req, 0);
 3951         if (error != 0)
 3952                 return (error);
 3953         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
 3954         sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
 3955         ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
 3956 
 3957         count = 0;
 3958         rw_rlock(&uma_rwlock);
 3959         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3960                 LIST_FOREACH(z, &kz->uk_zones, uz_link)
 3961                         count++;
 3962         }
 3963 
 3964         /*
 3965          * Insert stream header.
 3966          */
 3967         bzero(&ush, sizeof(ush));
 3968         ush.ush_version = UMA_STREAM_VERSION;
 3969         ush.ush_maxcpus = (mp_maxid + 1);
 3970         ush.ush_count = count;
 3971         (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
 3972 
 3973         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 3974                 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
 3975                         bzero(&uth, sizeof(uth));
 3976                         ZONE_LOCK(z);
 3977                         strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
 3978                         uth.uth_align = kz->uk_align;
 3979                         uth.uth_size = kz->uk_size;
 3980                         uth.uth_rsize = kz->uk_rsize;
 3981                         LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
 3982                                 k = kl->kl_keg;
 3983                                 uth.uth_maxpages += k->uk_maxpages;
 3984                                 uth.uth_pages += k->uk_pages;
 3985                                 uth.uth_keg_free += k->uk_free;
 3986                                 uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
 3987                                     * k->uk_ipers;
 3988                         }
 3989 
 3990                         /*
 3991                          * A zone is secondary is it is not the first entry
 3992                          * on the keg's zone list.
 3993                          */
 3994                         if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
 3995                             (LIST_FIRST(&kz->uk_zones) != z))
 3996                                 uth.uth_zone_flags = UTH_ZONE_SECONDARY;
 3997 
 3998                         for (i = 0; i < vm_ndomains; i++) {
 3999                                 zdom = &z->uz_domain[i];
 4000                                 LIST_FOREACH(bucket, &zdom->uzd_buckets,
 4001                                     ub_link)
 4002                                         uth.uth_zone_free += bucket->ub_cnt;
 4003                         }
 4004                         uth.uth_allocs = z->uz_allocs;
 4005                         uth.uth_frees = z->uz_frees;
 4006                         uth.uth_fails = z->uz_fails;
 4007                         uth.uth_sleeps = z->uz_sleeps;
 4008                         /*
 4009                          * While it is not normally safe to access the cache
 4010                          * bucket pointers while not on the CPU that owns the
 4011                          * cache, we only allow the pointers to be exchanged
 4012                          * without the zone lock held, not invalidated, so
 4013                          * accept the possible race associated with bucket
 4014                          * exchange during monitoring.
 4015                          */
 4016                         for (i = 0; i < mp_maxid + 1; i++) {
 4017                                 bzero(&ups[i], sizeof(*ups));
 4018                                 if (kz->uk_flags & UMA_ZFLAG_INTERNAL ||
 4019                                     CPU_ABSENT(i))
 4020                                         continue;
 4021                                 cache = &z->uz_cpu[i];
 4022                                 if (cache->uc_allocbucket != NULL)
 4023                                         ups[i].ups_cache_free +=
 4024                                             cache->uc_allocbucket->ub_cnt;
 4025                                 if (cache->uc_freebucket != NULL)
 4026                                         ups[i].ups_cache_free +=
 4027                                             cache->uc_freebucket->ub_cnt;
 4028                                 ups[i].ups_allocs = cache->uc_allocs;
 4029                                 ups[i].ups_frees = cache->uc_frees;
 4030                         }
 4031                         ZONE_UNLOCK(z);
 4032                         (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
 4033                         for (i = 0; i < mp_maxid + 1; i++)
 4034                                 (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
 4035                 }
 4036         }
 4037         rw_runlock(&uma_rwlock);
 4038         error = sbuf_finish(&sbuf);
 4039         sbuf_delete(&sbuf);
 4040         free(ups, M_TEMP);
 4041         return (error);
 4042 }
 4043 
 4044 int
 4045 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
 4046 {
 4047         uma_zone_t zone = *(uma_zone_t *)arg1;
 4048         int error, max;
 4049 
 4050         max = uma_zone_get_max(zone);
 4051         error = sysctl_handle_int(oidp, &max, 0, req);
 4052         if (error || !req->newptr)
 4053                 return (error);
 4054 
 4055         uma_zone_set_max(zone, max);
 4056 
 4057         return (0);
 4058 }
 4059 
 4060 int
 4061 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
 4062 {
 4063         uma_zone_t zone = *(uma_zone_t *)arg1;
 4064         int cur;
 4065 
 4066         cur = uma_zone_get_cur(zone);
 4067         return (sysctl_handle_int(oidp, &cur, 0, req));
 4068 }
 4069 
 4070 #ifdef INVARIANTS
 4071 static uma_slab_t
 4072 uma_dbg_getslab(uma_zone_t zone, void *item)
 4073 {
 4074         uma_slab_t slab;
 4075         uma_keg_t keg;
 4076         uint8_t *mem;
 4077 
 4078         mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
 4079         if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
 4080                 slab = vtoslab((vm_offset_t)mem);
 4081         } else {
 4082                 /*
 4083                  * It is safe to return the slab here even though the
 4084                  * zone is unlocked because the item's allocation state
 4085                  * essentially holds a reference.
 4086                  */
 4087                 ZONE_LOCK(zone);
 4088                 keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
 4089                 if (keg->uk_flags & UMA_ZONE_HASH)
 4090                         slab = hash_sfind(&keg->uk_hash, mem);
 4091                 else
 4092                         slab = (uma_slab_t)(mem + keg->uk_pgoff);
 4093                 ZONE_UNLOCK(zone);
 4094         }
 4095 
 4096         return (slab);
 4097 }
 4098 
 4099 static bool
 4100 uma_dbg_zskip(uma_zone_t zone, void *mem)
 4101 {
 4102         uma_keg_t keg;
 4103 
 4104         if ((keg = zone_first_keg(zone)) == NULL)
 4105                 return (true);
 4106 
 4107         return (uma_dbg_kskip(keg, mem));
 4108 }
 4109 
 4110 static bool
 4111 uma_dbg_kskip(uma_keg_t keg, void *mem)
 4112 {
 4113         uintptr_t idx;
 4114 
 4115         if (dbg_divisor == 0)
 4116                 return (true);
 4117 
 4118         if (dbg_divisor == 1)
 4119                 return (false);
 4120 
 4121         idx = (uintptr_t)mem >> PAGE_SHIFT;
 4122         if (keg->uk_ipers > 1) {
 4123                 idx *= keg->uk_ipers;
 4124                 idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
 4125         }
 4126 
 4127         if ((idx / dbg_divisor) * dbg_divisor != idx) {
 4128                 counter_u64_add(uma_skip_cnt, 1);
 4129                 return (true);
 4130         }
 4131         counter_u64_add(uma_dbg_cnt, 1);
 4132 
 4133         return (false);
 4134 }
 4135 
 4136 /*
 4137  * Set up the slab's freei data such that uma_dbg_free can function.
 4138  *
 4139  */
 4140 static void
 4141 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
 4142 {
 4143         uma_keg_t keg;
 4144         int freei;
 4145 
 4146         if (slab == NULL) {
 4147                 slab = uma_dbg_getslab(zone, item);
 4148                 if (slab == NULL) 
 4149                         panic("uma: item %p did not belong to zone %s\n",
 4150                             item, zone->uz_name);
 4151         }
 4152         keg = slab->us_keg;
 4153         freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
 4154 
 4155         if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
 4156                 panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
 4157                     item, zone, zone->uz_name, slab, freei);
 4158         BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
 4159 
 4160         return;
 4161 }
 4162 
 4163 /*
 4164  * Verifies freed addresses.  Checks for alignment, valid slab membership
 4165  * and duplicate frees.
 4166  *
 4167  */
 4168 static void
 4169 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
 4170 {
 4171         uma_keg_t keg;
 4172         int freei;
 4173 
 4174         if (slab == NULL) {
 4175                 slab = uma_dbg_getslab(zone, item);
 4176                 if (slab == NULL) 
 4177                         panic("uma: Freed item %p did not belong to zone %s\n",
 4178                             item, zone->uz_name);
 4179         }
 4180         keg = slab->us_keg;
 4181         freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
 4182 
 4183         if (freei >= keg->uk_ipers)
 4184                 panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
 4185                     item, zone, zone->uz_name, slab, freei);
 4186 
 4187         if (((freei * keg->uk_rsize) + slab->us_data) != item) 
 4188                 panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
 4189                     item, zone, zone->uz_name, slab, freei);
 4190 
 4191         if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
 4192                 panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
 4193                     item, zone, zone->uz_name, slab, freei);
 4194 
 4195         BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
 4196 }
 4197 #endif /* INVARIANTS */
 4198 
 4199 #ifdef DDB
 4200 DB_SHOW_COMMAND(uma, db_show_uma)
 4201 {
 4202         uma_bucket_t bucket;
 4203         uma_keg_t kz;
 4204         uma_zone_t z;
 4205         uma_zone_domain_t zdom;
 4206         uint64_t allocs, frees, sleeps;
 4207         int cachefree, i;
 4208 
 4209         db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
 4210             "Free", "Requests", "Sleeps", "Bucket");
 4211         LIST_FOREACH(kz, &uma_kegs, uk_link) {
 4212                 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
 4213                         if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
 4214                                 allocs = z->uz_allocs;
 4215                                 frees = z->uz_frees;
 4216                                 sleeps = z->uz_sleeps;
 4217                                 cachefree = 0;
 4218                         } else
 4219                                 uma_zone_sumstat(z, &cachefree, &allocs,
 4220                                     &frees, &sleeps);
 4221                         if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
 4222                             (LIST_FIRST(&kz->uk_zones) != z)))
 4223                                 cachefree += kz->uk_free;
 4224                         for (i = 0; i < vm_ndomains; i++) {
 4225                                 zdom = &z->uz_domain[i];
 4226                                 LIST_FOREACH(bucket, &zdom->uzd_buckets,
 4227                                     ub_link)
 4228                                         cachefree += bucket->ub_cnt;
 4229                         }
 4230                         db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
 4231                             z->uz_name, (uintmax_t)kz->uk_size,
 4232                             (intmax_t)(allocs - frees), cachefree,
 4233                             (uintmax_t)allocs, sleeps, z->uz_count);
 4234                         if (db_pager_quit)
 4235                                 return;
 4236                 }
 4237         }
 4238 }
 4239 
 4240 DB_SHOW_COMMAND(umacache, db_show_umacache)
 4241 {
 4242         uma_bucket_t bucket;
 4243         uma_zone_t z;
 4244         uma_zone_domain_t zdom;
 4245         uint64_t allocs, frees;
 4246         int cachefree, i;
 4247 
 4248         db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
 4249             "Requests", "Bucket");
 4250         LIST_FOREACH(z, &uma_cachezones, uz_link) {
 4251                 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
 4252                 for (i = 0; i < vm_ndomains; i++) {
 4253                         zdom = &z->uz_domain[i];
 4254                         LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link)
 4255                                 cachefree += bucket->ub_cnt;
 4256                 }
 4257                 db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
 4258                     z->uz_name, (uintmax_t)z->uz_size,
 4259                     (intmax_t)(allocs - frees), cachefree,
 4260                     (uintmax_t)allocs, z->uz_count);
 4261                 if (db_pager_quit)
 4262                         return;
 4263         }
 4264 }
 4265 #endif  /* DDB */

Cache object: fea218c8185c85cc80afe53953b51f38


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.