The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_malloc_debug.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_malloc_debug.c,v 1.11 2003/10/24 00:53:43 yamt Exp $      */
    2 
    3 /*
    4  * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
    5  * All rights reserved. 
    6  *
    7  * Redistribution and use in source and binary forms, with or without 
    8  * modification, are permitted provided that the following conditions 
    9  * are met: 
   10  *
   11  * 1. Redistributions of source code must retain the above copyright 
   12  *    notice, this list of conditions and the following disclaimer. 
   13  * 2. Redistributions in binary form must reproduce the above copyright 
   14  *    notice, this list of conditions and the following disclaimer in the 
   15  *    documentation and/or other materials provided with the distribution. 
   16  * 3. The name of the author may not be used to endorse or promote products
   17  *    derived from this software without specific prior written permission. 
   18  *
   19  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
   20  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
   21  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
   22  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
   23  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   24  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   25  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   26  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   27  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   28  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
   29  *
   30  * OpenBSD: kern_malloc_debug.c,v 1.10 2001/07/26 13:33:52 art Exp
   31  */
   32 
   33 /*
   34  * This really belongs in kern/kern_malloc.c, but it was too much pollution.
   35  */
   36 
   37 /*
   38  * It's only possible to debug one type/size at a time. The question is
   39  * if this is a limitation or a feature. We never want to run this as the
   40  * default malloc because we'll run out of memory really fast. Adding
   41  * more types will also add to the complexity of the code.
   42  *
   43  * This is really simple. Every malloc() allocates two virtual pages,
   44  * the second page is left unmapped, and the the value returned is aligned
   45  * so that it ends at (or very close to) the page boundary to catch overflows.
   46  * Every free() changes the protection of the first page to VM_PROT_NONE so
   47  * that we can catch any dangling writes to it.
   48  * To minimize the risk of writes to recycled chunks we keep an LRU of latest
   49  * freed chunks. The length of it is controlled by MALLOC_DEBUG_CHUNKS.
   50  *
   51  * Don't expect any performance.
   52  *
   53  * TODO:
   54  *  - support for size >= PAGE_SIZE
   55  *  - add support to the fault handler to give better diagnostics if we fail.
   56  */
   57 
   58 #include <sys/cdefs.h>
   59 __KERNEL_RCSID(0, "$NetBSD: kern_malloc_debug.c,v 1.11 2003/10/24 00:53:43 yamt Exp $");
   60 
   61 #include <sys/param.h>
   62 #include <sys/proc.h>
   63 #include <sys/kernel.h>
   64 #include <sys/malloc.h>
   65 #include <sys/systm.h>
   66 #include <sys/pool.h>
   67 
   68 #include <uvm/uvm.h>
   69 
   70 /*
   71  * debug_malloc_type and debug_malloc_size define the type and size of
   72  * memory to be debugged. Use 0 for a wildcard. debug_malloc_size_lo
   73  * is the lower limit and debug_malloc_size_hi the upper limit of sizes
   74  * being debugged; 0 will not work as a wildcard for the upper limit.
   75  * For any debugging to take place, type must be != NULL, size must be >= 0,
   76  * and if the limits are being used, size must be set to 0.
   77  * See /usr/src/sys/sys/malloc.h and malloc(9) for a list of types.
   78  *
   79  * Although those are variables, it's a really bad idea to change the type
   80  * if any memory chunks of this type are used. It's ok to change the size
   81  * in runtime.
   82  */
   83 struct malloc_type *debug_malloc_type = NULL;
   84 int debug_malloc_size = -1;
   85 int debug_malloc_size_lo = -1;
   86 int debug_malloc_size_hi = -1;
   87 
   88 /*
   89  * MALLOC_DEBUG_CHUNKS is the number of memory chunks we require on the
   90  * freelist before we reuse them.
   91  */
   92 #define MALLOC_DEBUG_CHUNKS 16
   93 
   94 void debug_malloc_allocate_free(int);
   95 
   96 struct debug_malloc_entry {
   97         TAILQ_ENTRY(debug_malloc_entry) md_list;
   98         vaddr_t md_va;
   99         paddr_t md_pa;
  100         size_t md_size;
  101         struct malloc_type *md_type;
  102 };
  103 
  104 TAILQ_HEAD(,debug_malloc_entry) debug_malloc_freelist;
  105 TAILQ_HEAD(,debug_malloc_entry) debug_malloc_usedlist;
  106 
  107 int debug_malloc_allocs;
  108 int debug_malloc_frees;
  109 int debug_malloc_pages;
  110 int debug_malloc_chunks_on_freelist;
  111 
  112 struct pool debug_malloc_pool;
  113 
  114 int
  115 debug_malloc(unsigned long size, struct malloc_type *type, int flags,
  116     void **addr)
  117 {
  118         struct debug_malloc_entry *md = NULL;
  119         int s, wait = !(flags & M_NOWAIT);
  120 
  121         /* Careful not to compare unsigned long to int -1 */
  122         if ((type != debug_malloc_type && debug_malloc_type != 0) ||
  123             (size != debug_malloc_size && debug_malloc_size != 0) ||
  124             (debug_malloc_size_lo != -1 && size < debug_malloc_size_lo) ||
  125             (debug_malloc_size_hi != -1 && size > debug_malloc_size_hi))
  126                 return (0);
  127 
  128         /* XXX - fix later */
  129         if (size > PAGE_SIZE)
  130                 return (0);
  131 
  132         s = splvm();
  133         if (debug_malloc_chunks_on_freelist < MALLOC_DEBUG_CHUNKS)
  134                 debug_malloc_allocate_free(wait);
  135 
  136         md = TAILQ_FIRST(&debug_malloc_freelist);
  137         if (md == NULL) {
  138                 splx(s);
  139                 return (0);
  140         }
  141         TAILQ_REMOVE(&debug_malloc_freelist, md, md_list);
  142         debug_malloc_chunks_on_freelist--;
  143 
  144         TAILQ_INSERT_HEAD(&debug_malloc_usedlist, md, md_list);
  145         debug_malloc_allocs++;
  146         splx(s);
  147 
  148         pmap_kenter_pa(md->md_va, md->md_pa, VM_PROT_READ|VM_PROT_WRITE);
  149 
  150         md->md_size = size;
  151         md->md_type = type;
  152 
  153         /*
  154          * Align the returned addr so that it ends where the first page
  155          * ends. roundup to get decent alignment.
  156          */
  157         *addr = (void *)(md->md_va + PAGE_SIZE - roundup(size, sizeof(long)));
  158         if (*addr != NULL && (flags & M_ZERO))
  159                 memset(*addr, 0, size);
  160         return (1);
  161 }
  162 
  163 int
  164 debug_free(void *addr, struct malloc_type *type)
  165 {
  166         struct debug_malloc_entry *md;
  167         vaddr_t va;
  168         int s;
  169 
  170         if (type != debug_malloc_type && debug_malloc_type != 0)
  171                 return (0);
  172 
  173         /*
  174          * trunc_page to get the address of the page.
  175          */
  176         va = trunc_page((vaddr_t)addr);
  177 
  178         s = splvm();
  179         TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
  180                 if (md->md_va == va)
  181                         break;
  182 
  183         /*
  184          * If we are not responsible for this entry, let the normal free
  185          * handle it
  186          */
  187         if (md == NULL) {
  188                 /*
  189                  * sanity check. Check for multiple frees.
  190                  */
  191                 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
  192                         if (md->md_va == va)
  193                                 panic("debug_free: already free");
  194                 splx(s);
  195                 return (0);
  196         }
  197 
  198         debug_malloc_frees++;
  199         TAILQ_REMOVE(&debug_malloc_usedlist, md, md_list);
  200 
  201         TAILQ_INSERT_TAIL(&debug_malloc_freelist, md, md_list);
  202         debug_malloc_chunks_on_freelist++;
  203         /*
  204          * unmap the page.
  205          */
  206         pmap_kremove(md->md_va, PAGE_SIZE);
  207         splx(s);
  208 
  209         return (1);
  210 }
  211 
  212 void
  213 debug_malloc_init(void)
  214 {
  215 
  216         TAILQ_INIT(&debug_malloc_freelist);
  217         TAILQ_INIT(&debug_malloc_usedlist);
  218 
  219         debug_malloc_allocs = 0;
  220         debug_malloc_frees = 0;
  221         debug_malloc_pages = 0;
  222         debug_malloc_chunks_on_freelist = 0;
  223 
  224         pool_init(&debug_malloc_pool, sizeof(struct debug_malloc_entry),
  225             0, 0, 0, "mdbepl", NULL);
  226 }
  227 
  228 /*
  229  * Add one chunk to the freelist.
  230  *
  231  * called at splvm.
  232  */
  233 void
  234 debug_malloc_allocate_free(int wait)
  235 {
  236         vaddr_t va, offset;
  237         struct vm_page *pg;
  238         struct debug_malloc_entry *md;
  239 
  240         md = pool_get(&debug_malloc_pool, wait ? PR_WAITOK : PR_NOWAIT);
  241         if (md == NULL)
  242                 return;
  243 
  244         va = uvm_km_kmemalloc(kmem_map, NULL, PAGE_SIZE * 2,
  245             UVM_KMF_VALLOC | (wait ? UVM_KMF_NOWAIT : 0));
  246         if (va == 0) {
  247                 pool_put(&debug_malloc_pool, md);
  248                 return;
  249         }
  250 
  251         offset = va - vm_map_min(kernel_map);
  252         for (;;) {
  253                 pg = uvm_pagealloc(NULL, offset, NULL, 0);
  254                 if (pg) {
  255                         pg->flags &= ~PG_BUSY;  /* new page */
  256                         UVM_PAGE_OWN(pg, NULL);
  257                 }
  258 
  259                 if (pg)
  260                         break;
  261 
  262                 if (wait == 0) {
  263                         uvm_unmap(kmem_map, va, va + PAGE_SIZE * 2);
  264                         pool_put(&debug_malloc_pool, md);
  265                         return;
  266                 }
  267                 uvm_wait("debug_malloc");
  268         }
  269 
  270         md->md_va = va;
  271         md->md_pa = VM_PAGE_TO_PHYS(pg);
  272 
  273         debug_malloc_pages++;
  274         TAILQ_INSERT_HEAD(&debug_malloc_freelist, md, md_list);
  275         debug_malloc_chunks_on_freelist++;
  276 }
  277 
  278 void
  279 debug_malloc_print(void)
  280 {
  281 
  282         debug_malloc_printit(printf, 0);
  283 }
  284 
  285 void
  286 debug_malloc_printit(void (*pr)(const char *, ...), vaddr_t addr)
  287 {
  288         struct debug_malloc_entry *md;
  289 
  290         if (addr) {
  291                 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) {
  292                         if (addr >= md->md_va &&
  293                             addr < md->md_va + 2 * PAGE_SIZE) {
  294                                 (*pr)("Memory at address 0x%x is in a freed "
  295                                       "area. type %s, size: %d\n ",
  296                                       addr, md->md_type->ks_shortdesc,
  297                                       md->md_size);
  298                                 return;
  299                         }
  300                 }
  301                 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) {
  302                         if (addr >= md->md_va + PAGE_SIZE &&
  303                             addr < md->md_va + 2 * PAGE_SIZE) {
  304                                 (*pr)("Memory at address 0x%x is just outside "
  305                                       "an allocated area. type %s, size: %d\n",
  306                                       addr, md->md_type->ks_shortdesc,
  307                                       md->md_size);
  308                                 return;
  309                         }
  310                 }
  311                 (*pr)("Memory at address 0x%x is outside debugged malloc.\n");
  312                 return;
  313         }
  314 
  315         (*pr)("allocs: %d\n", debug_malloc_allocs);
  316         (*pr)("frees: %d\n", debug_malloc_frees);
  317         (*pr)("pages used: %d\n", debug_malloc_pages);
  318         (*pr)("chunks on freelist: %d\n", debug_malloc_chunks_on_freelist);
  319 
  320         (*pr)("\taddr:\tsize:\n");
  321         (*pr)("free chunks:\n");
  322         TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
  323                 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size,
  324                       md->md_type->ks_shortdesc);
  325         (*pr)("used chunks:\n");
  326         TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
  327                 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size,
  328                       md->md_type->ks_shortdesc);
  329 }

Cache object: f4d6325b1d4d9fa00acea9c82294d9cd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.