The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_malloc_debug.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_malloc_debug.c,v 1.20 2008/08/07 01:40:21 matt Exp $      */
    2 
    3 /*
    4  * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. The name of the author may not be used to endorse or promote products
   17  *    derived from this software without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
   20  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
   21  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
   22  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
   23  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   24  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   25  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   26  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   27  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   28  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  *
   30  * OpenBSD: kern_malloc_debug.c,v 1.10 2001/07/26 13:33:52 art Exp
   31  */
   32 
   33 /*
   34  * This really belongs in kern/kern_malloc.c, but it was too much pollution.
   35  */
   36 
   37 /*
   38  * It's only possible to debug one type/size at a time. The question is
   39  * if this is a limitation or a feature. We never want to run this as the
   40  * default malloc because we'll run out of memory really fast. Adding
   41  * more types will also add to the complexity of the code.
   42  *
   43  * This is really simple. Every malloc() allocates two virtual pages,
   44  * the second page is left unmapped, and the value returned is aligned
   45  * so that it ends at (or very close to) the page boundary to catch overflows.
   46  * Every free() changes the protection of the first page to VM_PROT_NONE so
   47  * that we can catch any dangling writes to it.
   48  * To minimize the risk of writes to recycled chunks we keep an LRU of latest
   49  * freed chunks. The length of it is controlled by MALLOC_DEBUG_CHUNKS.
   50  *
   51  * Don't expect any performance.
   52  *
   53  * TODO:
   54  *  - support for size >= PAGE_SIZE
   55  *  - add support to the fault handler to give better diagnostics if we fail.
   56  */
   57 
   58 #include <sys/cdefs.h>
   59 __KERNEL_RCSID(0, "$NetBSD: kern_malloc_debug.c,v 1.20 2008/08/07 01:40:21 matt Exp $");
   60 
   61 #include <sys/param.h>
   62 #include <sys/proc.h>
   63 #include <sys/kernel.h>
   64 #include <sys/malloc.h>
   65 #include <sys/systm.h>
   66 #include <sys/pool.h>
   67 
   68 #include <uvm/uvm.h>
   69 
   70 /*
   71  * debug_malloc_type and debug_malloc_size define the type and size of
   72  * memory to be debugged. Use 0 for a wildcard. debug_malloc_size_lo
   73  * is the lower limit and debug_malloc_size_hi the upper limit of sizes
   74  * being debugged; 0 will not work as a wildcard for the upper limit.
   75  * For any debugging to take place, type must be != NULL, size must be >= 0,
   76  * and if the limits are being used, size must be set to 0.
   77  * See /usr/src/sys/sys/malloc.h and malloc(9) for a list of types.
   78  *
   79  * Although those are variables, it's a really bad idea to change the type
   80  * if any memory chunks of this type are used. It's ok to change the size
   81  * in runtime.
   82  */
   83 struct malloc_type *debug_malloc_type = NULL;
   84 int debug_malloc_size = -1;
   85 int debug_malloc_size_lo = -1;
   86 int debug_malloc_size_hi = -1;
   87 
   88 /*
   89  * MALLOC_DEBUG_CHUNKS is the number of memory chunks we require on the
   90  * freelist before we reuse them.
   91  */
   92 #define MALLOC_DEBUG_CHUNKS 16
   93 
   94 void debug_malloc_allocate_free(int);
   95 
   96 struct debug_malloc_entry {
   97         TAILQ_ENTRY(debug_malloc_entry) md_list;
   98         vaddr_t md_va;
   99         paddr_t md_pa;
  100         size_t md_size;
  101         struct malloc_type *md_type;
  102 };
  103 
  104 TAILQ_HEAD(,debug_malloc_entry) debug_malloc_freelist =
  105         TAILQ_HEAD_INITIALIZER(debug_malloc_freelist);
  106 TAILQ_HEAD(,debug_malloc_entry) debug_malloc_usedlist =
  107         TAILQ_HEAD_INITIALIZER(debug_malloc_usedlist);
  108 
  109 int debug_malloc_allocs;
  110 int debug_malloc_frees;
  111 int debug_malloc_pages;
  112 int debug_malloc_chunks_on_freelist;
  113 
  114 POOL_INIT(debug_malloc_pool, sizeof(struct debug_malloc_entry), 0, 0, 0,
  115     "mdbepl", NULL, IPL_VM);
  116 
  117 int
  118 debug_malloc(unsigned long size, struct malloc_type *type, int flags,
  119     void **addr)
  120 {
  121         struct debug_malloc_entry *md = NULL;
  122         int s, wait = !(flags & M_NOWAIT);
  123 
  124         /* Careful not to compare unsigned long to int -1 */
  125         if ((type != debug_malloc_type && debug_malloc_type != 0) ||
  126             (size != debug_malloc_size && debug_malloc_size != 0) ||
  127             (debug_malloc_size_lo != -1 && size < debug_malloc_size_lo) ||
  128             (debug_malloc_size_hi != -1 && size > debug_malloc_size_hi))
  129                 return (0);
  130 
  131         /* XXX - fix later */
  132         if (size > PAGE_SIZE)
  133                 return (0);
  134 
  135         s = splvm();
  136         if (debug_malloc_chunks_on_freelist < MALLOC_DEBUG_CHUNKS)
  137                 debug_malloc_allocate_free(wait);
  138 
  139         md = TAILQ_FIRST(&debug_malloc_freelist);
  140         if (md == NULL) {
  141                 splx(s);
  142                 return (0);
  143         }
  144         TAILQ_REMOVE(&debug_malloc_freelist, md, md_list);
  145         debug_malloc_chunks_on_freelist--;
  146 
  147         TAILQ_INSERT_HEAD(&debug_malloc_usedlist, md, md_list);
  148         debug_malloc_allocs++;
  149         splx(s);
  150 
  151         pmap_kenter_pa(md->md_va, md->md_pa,
  152             VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE);
  153         pmap_update(pmap_kernel());
  154 
  155         md->md_size = size;
  156         md->md_type = type;
  157 
  158         /*
  159          * Align the returned addr so that it ends where the first page
  160          * ends. roundup to get decent alignment.
  161          */
  162         *addr = (void *)(md->md_va + PAGE_SIZE - roundup(size, sizeof(long)));
  163         if (*addr != NULL && (flags & M_ZERO))
  164                 memset(*addr, 0, size);
  165         return (1);
  166 }
  167 
  168 int
  169 debug_free(void *addr, struct malloc_type *type)
  170 {
  171         struct debug_malloc_entry *md;
  172         vaddr_t va;
  173         int s;
  174 
  175         if (type != debug_malloc_type && debug_malloc_type != 0)
  176                 return (0);
  177 
  178         /*
  179          * trunc_page to get the address of the page.
  180          */
  181         va = trunc_page((vaddr_t)addr);
  182 
  183         s = splvm();
  184         TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
  185                 if (md->md_va == va)
  186                         break;
  187 
  188         /*
  189          * If we are not responsible for this entry, let the normal free
  190          * handle it
  191          */
  192         if (md == NULL) {
  193                 /*
  194                  * sanity check. Check for multiple frees.
  195                  */
  196                 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
  197                         if (md->md_va == va)
  198                                 panic("debug_free: already free");
  199                 splx(s);
  200                 return (0);
  201         }
  202 
  203         debug_malloc_frees++;
  204         TAILQ_REMOVE(&debug_malloc_usedlist, md, md_list);
  205 
  206         TAILQ_INSERT_TAIL(&debug_malloc_freelist, md, md_list);
  207         debug_malloc_chunks_on_freelist++;
  208         /*
  209          * unmap the page.
  210          */
  211         pmap_kremove(md->md_va, PAGE_SIZE);
  212         pmap_update(pmap_kernel());
  213         splx(s);
  214 
  215         return (1);
  216 }
  217 
  218 /*
  219  * Add one chunk to the freelist.
  220  *
  221  * called at splvm.
  222  */
  223 void
  224 debug_malloc_allocate_free(int wait)
  225 {
  226         vaddr_t va, offset;
  227         struct vm_page *pg;
  228         struct debug_malloc_entry *md;
  229 
  230         md = pool_get(&debug_malloc_pool, wait ? PR_WAITOK : PR_NOWAIT);
  231         if (md == NULL)
  232                 return;
  233 
  234         va = uvm_km_alloc(kmem_map, PAGE_SIZE * 2, 0,
  235             UVM_KMF_VAONLY | (wait ? UVM_KMF_NOWAIT : 0));
  236         if (va == 0) {
  237                 pool_put(&debug_malloc_pool, md);
  238                 return;
  239         }
  240 
  241         offset = va - vm_map_min(kernel_map);
  242         for (;;) {
  243                 pg = uvm_pagealloc(NULL, offset, NULL, 0);
  244                 if (pg) {
  245                         pg->flags &= ~PG_BUSY;  /* new page */
  246                         UVM_PAGE_OWN(pg, NULL);
  247                 }
  248 
  249                 if (pg)
  250                         break;
  251 
  252                 if (wait == 0) {
  253                         uvm_km_free(kmem_map, va, va + PAGE_SIZE * 2,
  254                             UVM_KMF_VAONLY);
  255                         pool_put(&debug_malloc_pool, md);
  256                         return;
  257                 }
  258                 uvm_wait("debug_malloc");
  259         }
  260 
  261         md->md_va = va;
  262         md->md_pa = VM_PAGE_TO_PHYS(pg);
  263 
  264         debug_malloc_pages++;
  265         TAILQ_INSERT_HEAD(&debug_malloc_freelist, md, md_list);
  266         debug_malloc_chunks_on_freelist++;
  267 }
  268 
  269 void
  270 debug_malloc_print(void)
  271 {
  272 
  273         debug_malloc_printit(printf, 0);
  274 }
  275 
  276 void
  277 debug_malloc_printit(void (*pr)(const char *, ...), vaddr_t addr)
  278 {
  279         struct debug_malloc_entry *md;
  280 
  281         if (addr) {
  282                 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) {
  283                         if (addr >= md->md_va &&
  284                             addr < md->md_va + 2 * PAGE_SIZE) {
  285                                 (*pr)("Memory at address 0x%x is in a freed "
  286                                       "area. type %s, size: %d\n ",
  287                                       addr, md->md_type->ks_shortdesc,
  288                                       md->md_size);
  289                                 return;
  290                         }
  291                 }
  292                 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) {
  293                         if (addr >= md->md_va + PAGE_SIZE &&
  294                             addr < md->md_va + 2 * PAGE_SIZE) {
  295                                 (*pr)("Memory at address 0x%x is just outside "
  296                                       "an allocated area. type %s, size: %d\n",
  297                                       addr, md->md_type->ks_shortdesc,
  298                                       md->md_size);
  299                                 return;
  300                         }
  301                 }
  302                 (*pr)("Memory at address 0x%x is outside debugged malloc.\n");
  303                 return;
  304         }
  305 
  306         (*pr)("allocs: %d\n", debug_malloc_allocs);
  307         (*pr)("frees: %d\n", debug_malloc_frees);
  308         (*pr)("pages used: %d\n", debug_malloc_pages);
  309         (*pr)("chunks on freelist: %d\n", debug_malloc_chunks_on_freelist);
  310 
  311         (*pr)("\taddr:\tsize:\n");
  312         (*pr)("free chunks:\n");
  313         TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
  314                 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size,
  315                       md->md_type->ks_shortdesc);
  316         (*pr)("used chunks:\n");
  317         TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
  318                 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size,
  319                       md->md_type->ks_shortdesc);
  320 }

Cache object: 9f8ed8b72af373f63c50450c348664eb


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.