The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_malloc_debug.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_malloc_debug.c,v 1.14 2005/02/26 21:34:55 perry Exp $     */
    2 
    3 /*
    4  * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. The name of the author may not be used to endorse or promote products
   17  *    derived from this software without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
   20  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
   21  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
   22  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
   23  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   24  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   25  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   26  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   27  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   28  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  *
   30  * OpenBSD: kern_malloc_debug.c,v 1.10 2001/07/26 13:33:52 art Exp
   31  */
   32 
   33 /*
   34  * This really belongs in kern/kern_malloc.c, but it was too much pollution.
   35  */
   36 
   37 /*
   38  * It's only possible to debug one type/size at a time. The question is
   39  * if this is a limitation or a feature. We never want to run this as the
   40  * default malloc because we'll run out of memory really fast. Adding
   41  * more types will also add to the complexity of the code.
   42  *
   43  * This is really simple. Every malloc() allocates two virtual pages,
   44  * the second page is left unmapped, and the value returned is aligned
   45  * so that it ends at (or very close to) the page boundary to catch overflows.
   46  * Every free() changes the protection of the first page to VM_PROT_NONE so
   47  * that we can catch any dangling writes to it.
   48  * To minimize the risk of writes to recycled chunks we keep an LRU of latest
   49  * freed chunks. The length of it is controlled by MALLOC_DEBUG_CHUNKS.
   50  *
   51  * Don't expect any performance.
   52  *
   53  * TODO:
   54  *  - support for size >= PAGE_SIZE
   55  *  - add support to the fault handler to give better diagnostics if we fail.
   56  */
   57 
   58 #include <sys/cdefs.h>
   59 __KERNEL_RCSID(0, "$NetBSD: kern_malloc_debug.c,v 1.14 2005/02/26 21:34:55 perry Exp $");
   60 
   61 #include <sys/param.h>
   62 #include <sys/proc.h>
   63 #include <sys/kernel.h>
   64 #include <sys/malloc.h>
   65 #include <sys/systm.h>
   66 #include <sys/pool.h>
   67 
   68 #include <uvm/uvm.h>
   69 
   70 /*
   71  * debug_malloc_type and debug_malloc_size define the type and size of
   72  * memory to be debugged. Use 0 for a wildcard. debug_malloc_size_lo
   73  * is the lower limit and debug_malloc_size_hi the upper limit of sizes
   74  * being debugged; 0 will not work as a wildcard for the upper limit.
   75  * For any debugging to take place, type must be != NULL, size must be >= 0,
   76  * and if the limits are being used, size must be set to 0.
   77  * See /usr/src/sys/sys/malloc.h and malloc(9) for a list of types.
   78  *
   79  * Although those are variables, it's a really bad idea to change the type
   80  * if any memory chunks of this type are used. It's ok to change the size
   81  * in runtime.
   82  */
   83 struct malloc_type *debug_malloc_type = NULL;
   84 int debug_malloc_size = -1;
   85 int debug_malloc_size_lo = -1;
   86 int debug_malloc_size_hi = -1;
   87 
   88 /*
   89  * MALLOC_DEBUG_CHUNKS is the number of memory chunks we require on the
   90  * freelist before we reuse them.
   91  */
   92 #define MALLOC_DEBUG_CHUNKS 16
   93 
   94 void debug_malloc_allocate_free(int);
   95 
   96 struct debug_malloc_entry {
   97         TAILQ_ENTRY(debug_malloc_entry) md_list;
   98         vaddr_t md_va;
   99         paddr_t md_pa;
  100         size_t md_size;
  101         struct malloc_type *md_type;
  102 };
  103 
  104 TAILQ_HEAD(,debug_malloc_entry) debug_malloc_freelist;
  105 TAILQ_HEAD(,debug_malloc_entry) debug_malloc_usedlist;
  106 
  107 int debug_malloc_allocs;
  108 int debug_malloc_frees;
  109 int debug_malloc_pages;
  110 int debug_malloc_chunks_on_freelist;
  111 
  112 POOL_INIT(debug_malloc_pool, sizeof(struct debug_malloc_entry), 0, 0, 0,
  113     "mdbepl", NULL);
  114 
  115 int
  116 debug_malloc(unsigned long size, struct malloc_type *type, int flags,
  117     void **addr)
  118 {
  119         struct debug_malloc_entry *md = NULL;
  120         int s, wait = !(flags & M_NOWAIT);
  121 
  122         /* Careful not to compare unsigned long to int -1 */
  123         if ((type != debug_malloc_type && debug_malloc_type != 0) ||
  124             (size != debug_malloc_size && debug_malloc_size != 0) ||
  125             (debug_malloc_size_lo != -1 && size < debug_malloc_size_lo) ||
  126             (debug_malloc_size_hi != -1 && size > debug_malloc_size_hi))
  127                 return (0);
  128 
  129         /* XXX - fix later */
  130         if (size > PAGE_SIZE)
  131                 return (0);
  132 
  133         s = splvm();
  134         if (debug_malloc_chunks_on_freelist < MALLOC_DEBUG_CHUNKS)
  135                 debug_malloc_allocate_free(wait);
  136 
  137         md = TAILQ_FIRST(&debug_malloc_freelist);
  138         if (md == NULL) {
  139                 splx(s);
  140                 return (0);
  141         }
  142         TAILQ_REMOVE(&debug_malloc_freelist, md, md_list);
  143         debug_malloc_chunks_on_freelist--;
  144 
  145         TAILQ_INSERT_HEAD(&debug_malloc_usedlist, md, md_list);
  146         debug_malloc_allocs++;
  147         splx(s);
  148 
  149         pmap_kenter_pa(md->md_va, md->md_pa, VM_PROT_READ|VM_PROT_WRITE);
  150 
  151         md->md_size = size;
  152         md->md_type = type;
  153 
  154         /*
  155          * Align the returned addr so that it ends where the first page
  156          * ends. roundup to get decent alignment.
  157          */
  158         *addr = (void *)(md->md_va + PAGE_SIZE - roundup(size, sizeof(long)));
  159         if (*addr != NULL && (flags & M_ZERO))
  160                 memset(*addr, 0, size);
  161         return (1);
  162 }
  163 
  164 int
  165 debug_free(void *addr, struct malloc_type *type)
  166 {
  167         struct debug_malloc_entry *md;
  168         vaddr_t va;
  169         int s;
  170 
  171         if (type != debug_malloc_type && debug_malloc_type != 0)
  172                 return (0);
  173 
  174         /*
  175          * trunc_page to get the address of the page.
  176          */
  177         va = trunc_page((vaddr_t)addr);
  178 
  179         s = splvm();
  180         TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
  181                 if (md->md_va == va)
  182                         break;
  183 
  184         /*
  185          * If we are not responsible for this entry, let the normal free
  186          * handle it
  187          */
  188         if (md == NULL) {
  189                 /*
  190                  * sanity check. Check for multiple frees.
  191                  */
  192                 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
  193                         if (md->md_va == va)
  194                                 panic("debug_free: already free");
  195                 splx(s);
  196                 return (0);
  197         }
  198 
  199         debug_malloc_frees++;
  200         TAILQ_REMOVE(&debug_malloc_usedlist, md, md_list);
  201 
  202         TAILQ_INSERT_TAIL(&debug_malloc_freelist, md, md_list);
  203         debug_malloc_chunks_on_freelist++;
  204         /*
  205          * unmap the page.
  206          */
  207         pmap_kremove(md->md_va, PAGE_SIZE);
  208         splx(s);
  209 
  210         return (1);
  211 }
  212 
  213 void
  214 debug_malloc_init(void)
  215 {
  216 
  217         TAILQ_INIT(&debug_malloc_freelist);
  218         TAILQ_INIT(&debug_malloc_usedlist);
  219 
  220         debug_malloc_allocs = 0;
  221         debug_malloc_frees = 0;
  222         debug_malloc_pages = 0;
  223         debug_malloc_chunks_on_freelist = 0;
  224 }
  225 
  226 /*
  227  * Add one chunk to the freelist.
  228  *
  229  * called at splvm.
  230  */
  231 void
  232 debug_malloc_allocate_free(int wait)
  233 {
  234         vaddr_t va, offset;
  235         struct vm_page *pg;
  236         struct debug_malloc_entry *md;
  237 
  238         md = pool_get(&debug_malloc_pool, wait ? PR_WAITOK : PR_NOWAIT);
  239         if (md == NULL)
  240                 return;
  241 
  242         va = uvm_km_kmemalloc(kmem_map, NULL, PAGE_SIZE * 2,
  243             UVM_KMF_VALLOC | (wait ? UVM_KMF_NOWAIT : 0));
  244         if (va == 0) {
  245                 pool_put(&debug_malloc_pool, md);
  246                 return;
  247         }
  248 
  249         offset = va - vm_map_min(kernel_map);
  250         for (;;) {
  251                 pg = uvm_pagealloc(NULL, offset, NULL, 0);
  252                 if (pg) {
  253                         pg->flags &= ~PG_BUSY;  /* new page */
  254                         UVM_PAGE_OWN(pg, NULL);
  255                 }
  256 
  257                 if (pg)
  258                         break;
  259 
  260                 if (wait == 0) {
  261                         uvm_unmap(kmem_map, va, va + PAGE_SIZE * 2);
  262                         pool_put(&debug_malloc_pool, md);
  263                         return;
  264                 }
  265                 uvm_wait("debug_malloc");
  266         }
  267 
  268         md->md_va = va;
  269         md->md_pa = VM_PAGE_TO_PHYS(pg);
  270 
  271         debug_malloc_pages++;
  272         TAILQ_INSERT_HEAD(&debug_malloc_freelist, md, md_list);
  273         debug_malloc_chunks_on_freelist++;
  274 }
  275 
  276 void
  277 debug_malloc_print(void)
  278 {
  279 
  280         debug_malloc_printit(printf, 0);
  281 }
  282 
  283 void
  284 debug_malloc_printit(void (*pr)(const char *, ...), vaddr_t addr)
  285 {
  286         struct debug_malloc_entry *md;
  287 
  288         if (addr) {
  289                 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) {
  290                         if (addr >= md->md_va &&
  291                             addr < md->md_va + 2 * PAGE_SIZE) {
  292                                 (*pr)("Memory at address 0x%x is in a freed "
  293                                       "area. type %s, size: %d\n ",
  294                                       addr, md->md_type->ks_shortdesc,
  295                                       md->md_size);
  296                                 return;
  297                         }
  298                 }
  299                 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) {
  300                         if (addr >= md->md_va + PAGE_SIZE &&
  301                             addr < md->md_va + 2 * PAGE_SIZE) {
  302                                 (*pr)("Memory at address 0x%x is just outside "
  303                                       "an allocated area. type %s, size: %d\n",
  304                                       addr, md->md_type->ks_shortdesc,
  305                                       md->md_size);
  306                                 return;
  307                         }
  308                 }
  309                 (*pr)("Memory at address 0x%x is outside debugged malloc.\n");
  310                 return;
  311         }
  312 
  313         (*pr)("allocs: %d\n", debug_malloc_allocs);
  314         (*pr)("frees: %d\n", debug_malloc_frees);
  315         (*pr)("pages used: %d\n", debug_malloc_pages);
  316         (*pr)("chunks on freelist: %d\n", debug_malloc_chunks_on_freelist);
  317 
  318         (*pr)("\taddr:\tsize:\n");
  319         (*pr)("free chunks:\n");
  320         TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
  321                 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size,
  322                       md->md_type->ks_shortdesc);
  323         (*pr)("used chunks:\n");
  324         TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
  325                 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size,
  326                       md->md_type->ks_shortdesc);
  327 }

Cache object: e4ec0f5748755d827acdc871c250d668


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.