1 /*-
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/6.4/sys/kern/kern_malloc.c 182404 2008-08-28 20:29:33Z emaste $");
36
37 #include "opt_ddb.h"
38 #include "opt_vm.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kdb.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/mutex.h>
48 #include <sys/vmmeter.h>
49 #include <sys/proc.h>
50 #include <sys/sbuf.h>
51 #include <sys/sysctl.h>
52 #include <sys/time.h>
53
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_page.h>
61 #include <vm/uma.h>
62 #include <vm/uma_int.h>
63 #include <vm/uma_dbg.h>
64
65 #ifdef DEBUG_MEMGUARD
66 #include <vm/memguard.h>
67 #endif
68 #ifdef DEBUG_REDZONE
69 #include <vm/redzone.h>
70 #endif
71
72 #if defined(INVARIANTS) && defined(__i386__)
73 #include <machine/cpu.h>
74 #endif
75
76 #include <ddb/ddb.h>
77
78 /*
79 * When realloc() is called, if the new size is sufficiently smaller than
80 * the old size, realloc() will allocate a new, smaller block to avoid
81 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
82 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
83 */
84 #ifndef REALLOC_FRACTION
85 #define REALLOC_FRACTION 1 /* new block if <= half the size */
86 #endif
87
88 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
89 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
90 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
91
92 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
93 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
94
95 static void kmeminit(void *);
96 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
97
98 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
99
100 static struct malloc_type *kmemstatistics;
101 static char *kmembase;
102 static char *kmemlimit;
103 static int kmemcount;
104
105 #define KMEM_ZSHIFT 4
106 #define KMEM_ZBASE 16
107 #define KMEM_ZMASK (KMEM_ZBASE - 1)
108
109 #define KMEM_ZMAX PAGE_SIZE
110 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
111 static u_int8_t kmemsize[KMEM_ZSIZE + 1];
112
113 /* These won't be powers of two for long */
114 struct {
115 int kz_size;
116 char *kz_name;
117 uma_zone_t kz_zone;
118 } kmemzones[] = {
119 {16, "16", NULL},
120 {32, "32", NULL},
121 {64, "64", NULL},
122 {128, "128", NULL},
123 {256, "256", NULL},
124 {512, "512", NULL},
125 {1024, "1024", NULL},
126 {2048, "2048", NULL},
127 {4096, "4096", NULL},
128 #if PAGE_SIZE > 4096
129 {8192, "8192", NULL},
130 #if PAGE_SIZE > 8192
131 {16384, "16384", NULL},
132 #if PAGE_SIZE > 16384
133 {32768, "32768", NULL},
134 #if PAGE_SIZE > 32768
135 {65536, "65536", NULL},
136 #if PAGE_SIZE > 65536
137 #error "Unsupported PAGE_SIZE"
138 #endif /* 65536 */
139 #endif /* 32768 */
140 #endif /* 16384 */
141 #endif /* 8192 */
142 #endif /* 4096 */
143 {0, NULL},
144 };
145
146 static uma_zone_t mt_zone;
147
148 u_int vm_kmem_size;
149 SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0,
150 "Size of kernel memory");
151
152 u_int vm_kmem_size_max;
153 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RD, &vm_kmem_size_max, 0,
154 "Maximum size of kernel memory");
155
156 u_int vm_kmem_size_scale;
157 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RD, &vm_kmem_size_scale, 0,
158 "Scale factor for kernel memory size");
159
160 /*
161 * The malloc_mtx protects the kmemstatistics linked list.
162 */
163
164 struct mtx malloc_mtx;
165
166 #ifdef MALLOC_PROFILE
167 uint64_t krequests[KMEM_ZSIZE + 1];
168
169 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
170 #endif
171
172 static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS);
173 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
174
175 /* time_uptime of last malloc(9) failure */
176 static time_t t_malloc_fail;
177
178 #ifdef MALLOC_MAKE_FAILURES
179 /*
180 * Causes malloc failures every (n) mallocs with M_NOWAIT. If set to 0,
181 * doesn't cause failures.
182 */
183 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
184 "Kernel malloc debugging options");
185
186 static int malloc_failure_rate;
187 static int malloc_nowait_count;
188 static int malloc_failure_count;
189 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
190 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
191 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
192 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
193 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
194 #endif
195
196 int
197 malloc_last_fail(void)
198 {
199
200 return (time_uptime - t_malloc_fail);
201 }
202
203 /*
204 * Add this to the informational malloc_type bucket.
205 */
206 static void
207 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
208 int zindx)
209 {
210 struct malloc_type_internal *mtip;
211 struct malloc_type_stats *mtsp;
212
213 critical_enter();
214 mtip = mtp->ks_handle;
215 mtsp = &mtip->mti_stats[curcpu];
216 if (size > 0) {
217 mtsp->mts_memalloced += size;
218 mtsp->mts_numallocs++;
219 }
220 if (zindx != -1)
221 mtsp->mts_size |= 1 << zindx;
222 critical_exit();
223 }
224
225 void
226 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
227 {
228
229 if (size > 0)
230 malloc_type_zone_allocated(mtp, size, -1);
231 }
232
233 /*
234 * Remove this allocation from the informational malloc_type bucket.
235 */
236 void
237 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
238 {
239 struct malloc_type_internal *mtip;
240 struct malloc_type_stats *mtsp;
241
242 critical_enter();
243 mtip = mtp->ks_handle;
244 mtsp = &mtip->mti_stats[curcpu];
245 mtsp->mts_memfreed += size;
246 mtsp->mts_numfrees++;
247 critical_exit();
248 }
249
250 /*
251 * malloc:
252 *
253 * Allocate a block of memory.
254 *
255 * If M_NOWAIT is set, this routine will not block and return NULL if
256 * the allocation fails.
257 */
258 void *
259 malloc(unsigned long size, struct malloc_type *mtp, int flags)
260 {
261 int indx;
262 caddr_t va;
263 uma_zone_t zone;
264 uma_keg_t keg;
265 #if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE)
266 unsigned long osize = size;
267 #endif
268
269 #ifdef INVARIANTS
270 /*
271 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
272 */
273 indx = flags & (M_WAITOK | M_NOWAIT);
274 if (indx != M_NOWAIT && indx != M_WAITOK) {
275 static struct timeval lasterr;
276 static int curerr, once;
277 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
278 printf("Bad malloc flags: %x\n", indx);
279 kdb_backtrace();
280 flags |= M_WAITOK;
281 once++;
282 }
283 }
284 #endif
285 #if 0
286 if (size == 0)
287 kdb_enter("zero size malloc");
288 #endif
289 #ifdef MALLOC_MAKE_FAILURES
290 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
291 atomic_add_int(&malloc_nowait_count, 1);
292 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
293 atomic_add_int(&malloc_failure_count, 1);
294 t_malloc_fail = time_uptime;
295 return (NULL);
296 }
297 }
298 #endif
299 if (flags & M_WAITOK)
300 KASSERT(curthread->td_intr_nesting_level == 0,
301 ("malloc(M_WAITOK) in interrupt context"));
302
303 #ifdef DEBUG_MEMGUARD
304 if (memguard_cmp(mtp))
305 return memguard_alloc(size, flags);
306 #endif
307
308 #ifdef DEBUG_REDZONE
309 size = redzone_size_ntor(size);
310 #endif
311
312 if (size <= KMEM_ZMAX) {
313 if (size & KMEM_ZMASK)
314 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
315 indx = kmemsize[size >> KMEM_ZSHIFT];
316 zone = kmemzones[indx].kz_zone;
317 keg = zone->uz_keg;
318 #ifdef MALLOC_PROFILE
319 krequests[size >> KMEM_ZSHIFT]++;
320 #endif
321 va = uma_zalloc(zone, flags);
322 if (va != NULL)
323 size = keg->uk_size;
324 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
325 } else {
326 size = roundup(size, PAGE_SIZE);
327 zone = NULL;
328 keg = NULL;
329 va = uma_large_malloc(size, flags);
330 malloc_type_allocated(mtp, va == NULL ? 0 : size);
331 }
332 if (flags & M_WAITOK)
333 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
334 else if (va == NULL)
335 t_malloc_fail = time_uptime;
336 #ifdef DIAGNOSTIC
337 if (va != NULL && !(flags & M_ZERO)) {
338 memset(va, 0x70, osize);
339 }
340 #endif
341 #ifdef DEBUG_REDZONE
342 if (va != NULL)
343 va = redzone_setup(va, osize);
344 #endif
345 return ((void *) va);
346 }
347
348 /*
349 * free:
350 *
351 * Free a block of memory allocated by malloc.
352 *
353 * This routine may not block.
354 */
355 void
356 free(void *addr, struct malloc_type *mtp)
357 {
358 uma_slab_t slab;
359 u_long size;
360
361 /* free(NULL, ...) does nothing */
362 if (addr == NULL)
363 return;
364
365 #ifdef DEBUG_MEMGUARD
366 if (memguard_cmp(mtp)) {
367 memguard_free(addr);
368 return;
369 }
370 #endif
371
372 #ifdef DEBUG_REDZONE
373 redzone_check(addr);
374 addr = redzone_addr_ntor(addr);
375 #endif
376
377 size = 0;
378
379 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
380
381 if (slab == NULL)
382 panic("free: address %p(%p) has not been allocated.\n",
383 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
384
385
386 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
387 #ifdef INVARIANTS
388 struct malloc_type **mtpp = addr;
389 #endif
390 size = slab->us_keg->uk_size;
391 #ifdef INVARIANTS
392 /*
393 * Cache a pointer to the malloc_type that most recently freed
394 * this memory here. This way we know who is most likely to
395 * have stepped on it later.
396 *
397 * This code assumes that size is a multiple of 8 bytes for
398 * 64 bit machines
399 */
400 mtpp = (struct malloc_type **)
401 ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
402 mtpp += (size - sizeof(struct malloc_type *)) /
403 sizeof(struct malloc_type *);
404 *mtpp = mtp;
405 #endif
406 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
407 } else {
408 size = slab->us_size;
409 uma_large_free(slab);
410 }
411 malloc_type_freed(mtp, size);
412 }
413
414 /*
415 * realloc: change the size of a memory block
416 */
417 void *
418 realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
419 {
420 uma_slab_t slab;
421 unsigned long alloc;
422 void *newaddr;
423
424 /* realloc(NULL, ...) is equivalent to malloc(...) */
425 if (addr == NULL)
426 return (malloc(size, mtp, flags));
427
428 /*
429 * XXX: Should report free of old memory and alloc of new memory to
430 * per-CPU stats.
431 */
432
433 #ifdef DEBUG_MEMGUARD
434 if (memguard_cmp(mtp)) {
435 slab = NULL;
436 alloc = size;
437 } else {
438 #endif
439
440 #ifdef DEBUG_REDZONE
441 slab = NULL;
442 alloc = redzone_get_size(addr);
443 #else
444 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
445
446 /* Sanity check */
447 KASSERT(slab != NULL,
448 ("realloc: address %p out of range", (void *)addr));
449
450 /* Get the size of the original block */
451 if (!(slab->us_flags & UMA_SLAB_MALLOC))
452 alloc = slab->us_keg->uk_size;
453 else
454 alloc = slab->us_size;
455
456 /* Reuse the original block if appropriate */
457 if (size <= alloc
458 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
459 return (addr);
460 #endif /* !DEBUG_REDZONE */
461
462 #ifdef DEBUG_MEMGUARD
463 }
464 #endif
465
466 /* Allocate a new, bigger (or smaller) block */
467 if ((newaddr = malloc(size, mtp, flags)) == NULL)
468 return (NULL);
469
470 /* Copy over original contents */
471 bcopy(addr, newaddr, min(size, alloc));
472 free(addr, mtp);
473 return (newaddr);
474 }
475
476 /*
477 * reallocf: same as realloc() but free memory on failure.
478 */
479 void *
480 reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
481 {
482 void *mem;
483
484 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
485 free(addr, mtp);
486 return (mem);
487 }
488
489 /*
490 * Initialize the kernel memory allocator
491 */
492 /* ARGSUSED*/
493 static void
494 kmeminit(void *dummy)
495 {
496 u_int8_t indx;
497 u_long mem_size;
498 int i;
499
500 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
501
502 /*
503 * Try to auto-tune the kernel memory size, so that it is
504 * more applicable for a wider range of machine sizes.
505 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
506 * a VM_KMEM_SIZE of 12MB is a fair compromise. The
507 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
508 * available, and on an X86 with a total KVA space of 256MB,
509 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
510 *
511 * Note that the kmem_map is also used by the zone allocator,
512 * so make sure that there is enough space.
513 */
514 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
515 mem_size = cnt.v_page_count;
516
517 #if defined(VM_KMEM_SIZE_SCALE)
518 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
519 #endif
520 TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale);
521 if (vm_kmem_size_scale > 0 &&
522 (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE))
523 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
524
525 #if defined(VM_KMEM_SIZE_MAX)
526 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
527 #endif
528 TUNABLE_INT_FETCH("vm.kmem_size_max", &vm_kmem_size_max);
529 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
530 vm_kmem_size = vm_kmem_size_max;
531
532 /* Allow final override from the kernel environment */
533 #ifndef BURN_BRIDGES
534 if (TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0)
535 printf("kern.vm.kmem.size is now called vm.kmem_size!\n");
536 #endif
537 TUNABLE_INT_FETCH("vm.kmem_size", &vm_kmem_size);
538
539 /*
540 * Limit kmem virtual size to twice the physical memory.
541 * This allows for kmem map sparseness, but limits the size
542 * to something sane. Be careful to not overflow the 32bit
543 * ints while doing the check.
544 */
545 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
546 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
547
548 /*
549 * Tune settings based on the kernel map's size at this time.
550 */
551 init_param3(vm_kmem_size / PAGE_SIZE);
552
553 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
554 (vm_offset_t *)&kmemlimit, vm_kmem_size);
555 kmem_map->system_map = 1;
556
557 #ifdef DEBUG_MEMGUARD
558 /*
559 * Initialize MemGuard if support compiled in. MemGuard is a
560 * replacement allocator used for detecting tamper-after-free
561 * scenarios as they occur. It is only used for debugging.
562 */
563 vm_memguard_divisor = 10;
564 TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
565
566 /* Pick a conservative value if provided value sucks. */
567 if ((vm_memguard_divisor <= 0) ||
568 ((vm_kmem_size / vm_memguard_divisor) == 0))
569 vm_memguard_divisor = 10;
570 memguard_init(kmem_map, vm_kmem_size / vm_memguard_divisor);
571 #endif
572
573 uma_startup2();
574
575 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
576 #ifdef INVARIANTS
577 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
578 #else
579 NULL, NULL, NULL, NULL,
580 #endif
581 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
582 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
583 int size = kmemzones[indx].kz_size;
584 char *name = kmemzones[indx].kz_name;
585
586 kmemzones[indx].kz_zone = uma_zcreate(name, size,
587 #ifdef INVARIANTS
588 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
589 #else
590 NULL, NULL, NULL, NULL,
591 #endif
592 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
593
594 for (;i <= size; i+= KMEM_ZBASE)
595 kmemsize[i >> KMEM_ZSHIFT] = indx;
596
597 }
598 }
599
600 void
601 malloc_init(void *data)
602 {
603 struct malloc_type_internal *mtip;
604 struct malloc_type *mtp;
605
606 KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));
607
608 mtp = data;
609 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
610 mtp->ks_handle = mtip;
611
612 mtx_lock(&malloc_mtx);
613 mtp->ks_next = kmemstatistics;
614 kmemstatistics = mtp;
615 kmemcount++;
616 mtx_unlock(&malloc_mtx);
617 }
618
619 void
620 malloc_uninit(void *data)
621 {
622 struct malloc_type_internal *mtip;
623 struct malloc_type_stats *mtsp;
624 struct malloc_type *mtp, *temp;
625 uma_slab_t slab;
626 long temp_allocs, temp_bytes;
627 int i;
628
629 mtp = data;
630 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
631 mtx_lock(&malloc_mtx);
632 mtip = mtp->ks_handle;
633 mtp->ks_handle = NULL;
634 if (mtp != kmemstatistics) {
635 for (temp = kmemstatistics; temp != NULL;
636 temp = temp->ks_next) {
637 if (temp->ks_next == mtp)
638 temp->ks_next = mtp->ks_next;
639 }
640 } else
641 kmemstatistics = mtp->ks_next;
642 kmemcount--;
643 mtx_unlock(&malloc_mtx);
644
645 /*
646 * Look for memory leaks.
647 */
648 temp_allocs = temp_bytes = 0;
649 for (i = 0; i < MAXCPU; i++) {
650 mtsp = &mtip->mti_stats[i];
651 temp_allocs += mtsp->mts_numallocs;
652 temp_allocs -= mtsp->mts_numfrees;
653 temp_bytes += mtsp->mts_memalloced;
654 temp_bytes -= mtsp->mts_memfreed;
655 }
656 if (temp_allocs > 0 || temp_bytes > 0) {
657 printf("Warning: memory type %s leaked memory on destroy "
658 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
659 temp_allocs, temp_bytes);
660 }
661
662 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
663 uma_zfree_arg(mt_zone, mtip, slab);
664 }
665
666 struct malloc_type *
667 malloc_desc2type(const char *desc)
668 {
669 struct malloc_type *mtp;
670
671 mtx_assert(&malloc_mtx, MA_OWNED);
672 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
673 if (strcmp(mtp->ks_shortdesc, desc) == 0)
674 return (mtp);
675 }
676 return (NULL);
677 }
678
679 static int
680 sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
681 {
682 struct malloc_type_stats mts_local, *mtsp;
683 struct malloc_type_internal *mtip;
684 struct malloc_type *mtp;
685 struct sbuf sbuf;
686 long temp_allocs, temp_bytes;
687 int linesize = 128;
688 int bufsize;
689 int first;
690 int error;
691 char *buf;
692 int cnt;
693 int i;
694
695 cnt = 0;
696
697 /* Guess at how much room is needed. */
698 mtx_lock(&malloc_mtx);
699 cnt = kmemcount;
700 mtx_unlock(&malloc_mtx);
701
702 bufsize = linesize * (cnt + 1);
703 buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
704 sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN);
705
706 mtx_lock(&malloc_mtx);
707 sbuf_printf(&sbuf,
708 "\n Type InUse MemUse HighUse Requests Size(s)\n");
709 for (mtp = kmemstatistics; cnt != 0 && mtp != NULL;
710 mtp = mtp->ks_next, cnt--) {
711 mtip = mtp->ks_handle;
712 bzero(&mts_local, sizeof(mts_local));
713 for (i = 0; i < MAXCPU; i++) {
714 mtsp = &mtip->mti_stats[i];
715 mts_local.mts_memalloced += mtsp->mts_memalloced;
716 mts_local.mts_memfreed += mtsp->mts_memfreed;
717 mts_local.mts_numallocs += mtsp->mts_numallocs;
718 mts_local.mts_numfrees += mtsp->mts_numfrees;
719 mts_local.mts_size |= mtsp->mts_size;
720 }
721 if (mts_local.mts_numallocs == 0)
722 continue;
723
724 /*
725 * Due to races in per-CPU statistics gather, it's possible to
726 * get a slightly negative number here. If we do, approximate
727 * with 0.
728 */
729 if (mts_local.mts_numallocs > mts_local.mts_numfrees)
730 temp_allocs = mts_local.mts_numallocs -
731 mts_local.mts_numfrees;
732 else
733 temp_allocs = 0;
734
735 /*
736 * Ditto for bytes allocated.
737 */
738 if (mts_local.mts_memalloced > mts_local.mts_memfreed)
739 temp_bytes = mts_local.mts_memalloced -
740 mts_local.mts_memfreed;
741 else
742 temp_bytes = 0;
743
744 /*
745 * High-waterwark is no longer easily available, so we just
746 * print '-' for that column.
747 */
748 sbuf_printf(&sbuf, "%13s%6lu%6luK -%9llu",
749 mtp->ks_shortdesc,
750 temp_allocs,
751 (temp_bytes + 1023) / 1024,
752 (unsigned long long)mts_local.mts_numallocs);
753
754 first = 1;
755 for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1;
756 i++) {
757 if (mts_local.mts_size & (1 << i)) {
758 if (first)
759 sbuf_printf(&sbuf, " ");
760 else
761 sbuf_printf(&sbuf, ",");
762 sbuf_printf(&sbuf, "%s",
763 kmemzones[i].kz_name);
764 first = 0;
765 }
766 }
767 sbuf_printf(&sbuf, "\n");
768 }
769 sbuf_finish(&sbuf);
770 mtx_unlock(&malloc_mtx);
771
772 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
773
774 sbuf_delete(&sbuf);
775 free(buf, M_TEMP);
776 return (error);
777 }
778
779 SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD,
780 NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats");
781
782 static int
783 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
784 {
785 struct malloc_type_stream_header mtsh;
786 struct malloc_type_internal *mtip;
787 struct malloc_type_header mth;
788 struct malloc_type *mtp;
789 int buflen, count, error, i;
790 struct sbuf sbuf;
791 char *buffer;
792
793 mtx_lock(&malloc_mtx);
794 restart:
795 mtx_assert(&malloc_mtx, MA_OWNED);
796 count = kmemcount;
797 mtx_unlock(&malloc_mtx);
798 buflen = sizeof(mtsh) + count * (sizeof(mth) +
799 sizeof(struct malloc_type_stats) * MAXCPU) + 1;
800 buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
801 mtx_lock(&malloc_mtx);
802 if (count < kmemcount) {
803 free(buffer, M_TEMP);
804 goto restart;
805 }
806
807 sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
808
809 /*
810 * Insert stream header.
811 */
812 bzero(&mtsh, sizeof(mtsh));
813 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
814 mtsh.mtsh_maxcpus = MAXCPU;
815 mtsh.mtsh_count = kmemcount;
816 if (sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)) < 0) {
817 mtx_unlock(&malloc_mtx);
818 error = ENOMEM;
819 goto out;
820 }
821
822 /*
823 * Insert alternating sequence of type headers and type statistics.
824 */
825 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
826 mtip = (struct malloc_type_internal *)mtp->ks_handle;
827
828 /*
829 * Insert type header.
830 */
831 bzero(&mth, sizeof(mth));
832 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
833 if (sbuf_bcat(&sbuf, &mth, sizeof(mth)) < 0) {
834 mtx_unlock(&malloc_mtx);
835 error = ENOMEM;
836 goto out;
837 }
838
839 /*
840 * Insert type statistics for each CPU.
841 */
842 for (i = 0; i < MAXCPU; i++) {
843 if (sbuf_bcat(&sbuf, &mtip->mti_stats[i],
844 sizeof(mtip->mti_stats[i])) < 0) {
845 mtx_unlock(&malloc_mtx);
846 error = ENOMEM;
847 goto out;
848 }
849 }
850 }
851 mtx_unlock(&malloc_mtx);
852 sbuf_finish(&sbuf);
853 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
854 out:
855 sbuf_delete(&sbuf);
856 free(buffer, M_TEMP);
857 return (error);
858 }
859
860 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
861 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
862 "Return malloc types");
863
864 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
865 "Count of kernel malloc types");
866
867 #ifdef DDB
868 DB_SHOW_COMMAND(malloc, db_show_malloc)
869 {
870 struct malloc_type_internal *mtip;
871 struct malloc_type *mtp;
872 u_int64_t allocs, frees;
873 int i;
874
875 db_printf("%18s %12s %12s %12s\n", "Type", "Allocs", "Frees",
876 "Used");
877 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
878 mtip = (struct malloc_type_internal *)mtp->ks_handle;
879 allocs = 0;
880 frees = 0;
881 for (i = 0; i < MAXCPU; i++) {
882 allocs += mtip->mti_stats[i].mts_numallocs;
883 frees += mtip->mti_stats[i].mts_numfrees;
884 }
885 db_printf("%18s %12ju %12ju %12ju\n", mtp->ks_shortdesc,
886 allocs, frees, allocs - frees);
887 }
888 }
889 #endif
890
891 #ifdef MALLOC_PROFILE
892
893 static int
894 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
895 {
896 int linesize = 64;
897 struct sbuf sbuf;
898 uint64_t count;
899 uint64_t waste;
900 uint64_t mem;
901 int bufsize;
902 int error;
903 char *buf;
904 int rsize;
905 int size;
906 int i;
907
908 bufsize = linesize * (KMEM_ZSIZE + 1);
909 bufsize += 128; /* For the stats line */
910 bufsize += 128; /* For the banner line */
911 waste = 0;
912 mem = 0;
913
914 buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
915 sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN);
916 sbuf_printf(&sbuf,
917 "\n Size Requests Real Size\n");
918 for (i = 0; i < KMEM_ZSIZE; i++) {
919 size = i << KMEM_ZSHIFT;
920 rsize = kmemzones[kmemsize[i]].kz_size;
921 count = (long long unsigned)krequests[i];
922
923 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
924 (unsigned long long)count, rsize);
925
926 if ((rsize * count) > (size * count))
927 waste += (rsize * count) - (size * count);
928 mem += (rsize * count);
929 }
930 sbuf_printf(&sbuf,
931 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
932 (unsigned long long)mem, (unsigned long long)waste);
933 sbuf_finish(&sbuf);
934
935 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
936
937 sbuf_delete(&sbuf);
938 free(buf, M_TEMP);
939 return (error);
940 }
941
942 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
943 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
944 #endif /* MALLOC_PROFILE */
Cache object: 8aabb14b9077f84011526668b509fdd9
|