The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/openzfs/module/os/freebsd/spl/spl_kmem.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2006-2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD$");
   29 
   30 #include <sys/types.h>
   31 #include <sys/param.h>
   32 #include <sys/byteorder.h>
   33 #include <sys/kernel.h>
   34 #include <sys/systm.h>
   35 #include <sys/malloc.h>
   36 #include <sys/kmem.h>
   37 #include <sys/kmem_cache.h>
   38 #include <sys/debug.h>
   39 #include <sys/mutex.h>
   40 #include <sys/vmmeter.h>
   41 
   42 
   43 #include <vm/vm_page.h>
   44 #include <vm/vm_object.h>
   45 #include <vm/vm_kern.h>
   46 #include <vm/vm_map.h>
   47 
   48 #ifdef KMEM_DEBUG
   49 #include <sys/queue.h>
   50 #include <sys/stack.h>
   51 #endif
   52 
   53 #ifdef _KERNEL
   54 MALLOC_DEFINE(M_SOLARIS, "solaris", "Solaris");
   55 #else
   56 #define malloc(size, type, flags)       malloc(size)
   57 #define free(addr, type)                free(addr)
   58 #endif
   59 
   60 #ifdef KMEM_DEBUG
   61 struct kmem_item {
   62         struct stack    stack;
   63         LIST_ENTRY(kmem_item) next;
   64 };
   65 static LIST_HEAD(, kmem_item) kmem_items;
   66 static struct mtx kmem_items_mtx;
   67 MTX_SYSINIT(kmem_items_mtx, &kmem_items_mtx, "kmem_items", MTX_DEF);
   68 #endif  /* KMEM_DEBUG */
   69 
   70 #include <sys/vmem.h>
   71 
   72 void *
   73 zfs_kmem_alloc(size_t size, int kmflags)
   74 {
   75         void *p;
   76 #ifdef KMEM_DEBUG
   77         struct kmem_item *i;
   78 
   79         size += sizeof (struct kmem_item);
   80 #endif
   81         p = malloc(MAX(size, 16), M_SOLARIS, kmflags);
   82 #ifndef _KERNEL
   83         if (kmflags & KM_SLEEP)
   84                 assert(p != NULL);
   85 #endif
   86 #ifdef KMEM_DEBUG
   87         if (p != NULL) {
   88                 i = p;
   89                 p = (uint8_t *)p + sizeof (struct kmem_item);
   90                 stack_save(&i->stack);
   91                 mtx_lock(&kmem_items_mtx);
   92                 LIST_INSERT_HEAD(&kmem_items, i, next);
   93                 mtx_unlock(&kmem_items_mtx);
   94         }
   95 #endif
   96         return (p);
   97 }
   98 
   99 void
  100 zfs_kmem_free(void *buf, size_t size __unused)
  101 {
  102 #ifdef KMEM_DEBUG
  103         if (buf == NULL) {
  104                 printf("%s: attempt to free NULL\n", __func__);
  105                 return;
  106         }
  107         struct kmem_item *i;
  108 
  109         buf = (uint8_t *)buf - sizeof (struct kmem_item);
  110         mtx_lock(&kmem_items_mtx);
  111         LIST_FOREACH(i, &kmem_items, next) {
  112                 if (i == buf)
  113                         break;
  114         }
  115         ASSERT3P(i, !=, NULL);
  116         LIST_REMOVE(i, next);
  117         mtx_unlock(&kmem_items_mtx);
  118         memset(buf, 0xDC, MAX(size, 16));
  119 #endif
  120         free(buf, M_SOLARIS);
  121 }
  122 
  123 static uint64_t kmem_size_val;
  124 
  125 static void
  126 kmem_size_init(void *unused __unused)
  127 {
  128 
  129         kmem_size_val = (uint64_t)vm_cnt.v_page_count * PAGE_SIZE;
  130         if (kmem_size_val > vm_kmem_size)
  131                 kmem_size_val = vm_kmem_size;
  132 }
  133 SYSINIT(kmem_size_init, SI_SUB_KMEM, SI_ORDER_ANY, kmem_size_init, NULL);
  134 
  135 uint64_t
  136 kmem_size(void)
  137 {
  138 
  139         return (kmem_size_val);
  140 }
  141 
  142 static int
  143 kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
  144 {
  145         struct kmem_cache *cache = private;
  146 
  147         return (cache->kc_constructor(mem, cache->kc_private, flags));
  148 }
  149 
  150 static void
  151 kmem_std_destructor(void *mem, int size __unused, void *private)
  152 {
  153         struct kmem_cache *cache = private;
  154 
  155         cache->kc_destructor(mem, cache->kc_private);
  156 }
  157 
  158 kmem_cache_t *
  159 kmem_cache_create(const char *name, size_t bufsize, size_t align,
  160     int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
  161     void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags)
  162 {
  163         kmem_cache_t *cache;
  164 
  165         ASSERT3P(vmp, ==, NULL);
  166 
  167         cache = kmem_alloc(sizeof (*cache), KM_SLEEP);
  168         strlcpy(cache->kc_name, name, sizeof (cache->kc_name));
  169         cache->kc_constructor = constructor;
  170         cache->kc_destructor = destructor;
  171         cache->kc_private = private;
  172 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
  173         cache->kc_zone = uma_zcreate(cache->kc_name, bufsize,
  174             constructor != NULL ? kmem_std_constructor : NULL,
  175             destructor != NULL ? kmem_std_destructor : NULL,
  176             NULL, NULL, align > 0 ? align - 1 : 0, cflags);
  177 #else
  178         cache->kc_size = bufsize;
  179 #endif
  180 
  181         return (cache);
  182 }
  183 
  184 void
  185 kmem_cache_destroy(kmem_cache_t *cache)
  186 {
  187 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
  188         uma_zdestroy(cache->kc_zone);
  189 #endif
  190         kmem_free(cache, sizeof (*cache));
  191 }
  192 
  193 void *
  194 kmem_cache_alloc(kmem_cache_t *cache, int flags)
  195 {
  196 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
  197         return (uma_zalloc_arg(cache->kc_zone, cache, flags));
  198 #else
  199         void *p;
  200 
  201         p = kmem_alloc(cache->kc_size, flags);
  202         if (p != NULL && cache->kc_constructor != NULL)
  203                 kmem_std_constructor(p, cache->kc_size, cache, flags);
  204         return (p);
  205 #endif
  206 }
  207 
  208 void
  209 kmem_cache_free(kmem_cache_t *cache, void *buf)
  210 {
  211 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
  212         uma_zfree_arg(cache->kc_zone, buf, cache);
  213 #else
  214         if (cache->kc_destructor != NULL)
  215                 kmem_std_destructor(buf, cache->kc_size, cache);
  216         kmem_free(buf, cache->kc_size);
  217 #endif
  218 }
  219 
  220 /*
  221  * Allow our caller to determine if there are running reaps.
  222  *
  223  * This call is very conservative and may return B_TRUE even when
  224  * reaping activity isn't active. If it returns B_FALSE, then reaping
  225  * activity is definitely inactive.
  226  */
  227 boolean_t
  228 kmem_cache_reap_active(void)
  229 {
  230 
  231         return (B_FALSE);
  232 }
  233 
  234 /*
  235  * Reap (almost) everything soon.
  236  *
  237  * Note: this does not wait for the reap-tasks to complete. Caller
  238  * should use kmem_cache_reap_active() (above) and/or moderation to
  239  * avoid scheduling too many reap-tasks.
  240  */
  241 #ifdef _KERNEL
  242 void
  243 kmem_cache_reap_soon(kmem_cache_t *cache)
  244 {
  245 #ifndef KMEM_DEBUG
  246 #if __FreeBSD_version >= 1300043
  247         uma_zone_reclaim(cache->kc_zone, UMA_RECLAIM_DRAIN);
  248 #else
  249         zone_drain(cache->kc_zone);
  250 #endif
  251 #endif
  252 }
  253 
  254 void
  255 kmem_reap(void)
  256 {
  257 #if __FreeBSD_version >= 1300043
  258         uma_reclaim(UMA_RECLAIM_TRIM);
  259 #else
  260         uma_reclaim();
  261 #endif
  262 }
  263 #else
  264 void
  265 kmem_cache_reap_soon(kmem_cache_t *cache __unused)
  266 {
  267 }
  268 
  269 void
  270 kmem_reap(void)
  271 {
  272 }
  273 #endif
  274 
  275 int
  276 kmem_debugging(void)
  277 {
  278         return (0);
  279 }
  280 
  281 void *
  282 calloc(size_t n, size_t s)
  283 {
  284         return (kmem_zalloc(n * s, KM_NOSLEEP));
  285 }
  286 
  287 char *
  288 kmem_vasprintf(const char *fmt, va_list adx)
  289 {
  290         char *msg;
  291         va_list adx2;
  292 
  293         va_copy(adx2, adx);
  294         msg = kmem_alloc(vsnprintf(NULL, 0, fmt, adx) + 1, KM_SLEEP);
  295         (void) vsprintf(msg, fmt, adx2);
  296         va_end(adx2);
  297 
  298         return (msg);
  299 }
  300 
  301 #include <vm/uma.h>
  302 #include <vm/uma_int.h>
  303 #ifdef KMEM_DEBUG
  304 #error "KMEM_DEBUG not currently supported"
  305 #endif
  306 
  307 uint64_t
  308 spl_kmem_cache_inuse(kmem_cache_t *cache)
  309 {
  310         return (uma_zone_get_cur(cache->kc_zone));
  311 }
  312 
  313 uint64_t
  314 spl_kmem_cache_entry_size(kmem_cache_t *cache)
  315 {
  316         return (cache->kc_zone->uz_size);
  317 }
  318 
  319 /*
  320  * Register a move callback for cache defragmentation.
  321  * XXX: Unimplemented but harmless to stub out for now.
  322  */
  323 void
  324 spl_kmem_cache_set_move(kmem_cache_t *skc,
  325     kmem_cbrc_t (move)(void *, void *, size_t, void *))
  326 {
  327         ASSERT3P(move, !=, NULL);
  328 }
  329 
  330 #ifdef KMEM_DEBUG
  331 void kmem_show(void *);
  332 void
  333 kmem_show(void *dummy __unused)
  334 {
  335         struct kmem_item *i;
  336 
  337         mtx_lock(&kmem_items_mtx);
  338         if (LIST_EMPTY(&kmem_items))
  339                 printf("KMEM_DEBUG: No leaked elements.\n");
  340         else {
  341                 printf("KMEM_DEBUG: Leaked elements:\n\n");
  342                 LIST_FOREACH(i, &kmem_items, next) {
  343                         printf("address=%p\n", i);
  344                         stack_print_ddb(&i->stack);
  345                         printf("\n");
  346                 }
  347         }
  348         mtx_unlock(&kmem_items_mtx);
  349 }
  350 
  351 SYSUNINIT(sol_kmem, SI_SUB_CPU, SI_ORDER_FIRST, kmem_show, NULL);
  352 #endif  /* KMEM_DEBUG */

Cache object: 050730e69d8c56379389bc135b4c90a5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.