1 /*
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/5.3/sys/kern/kern_malloc.c 136588 2004-10-16 08:43:07Z cvs2svn $");
34
35 #include "opt_vm.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kdb.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/mutex.h>
45 #include <sys/vmmeter.h>
46 #include <sys/proc.h>
47 #include <sys/sysctl.h>
48 #include <sys/time.h>
49
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_param.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_page.h>
57 #include <vm/uma.h>
58 #include <vm/uma_int.h>
59 #include <vm/uma_dbg.h>
60
61 #if defined(INVARIANTS) && defined(__i386__)
62 #include <machine/cpu.h>
63 #endif
64
65 /*
66 * When realloc() is called, if the new size is sufficiently smaller than
67 * the old size, realloc() will allocate a new, smaller block to avoid
68 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
69 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
70 */
71 #ifndef REALLOC_FRACTION
72 #define REALLOC_FRACTION 1 /* new block if <= half the size */
73 #endif
74
75 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
76 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
77 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
78
79 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
80 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
81
82 static void kmeminit(void *);
83 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
84
85 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
86
87 static struct malloc_type *kmemstatistics;
88 static char *kmembase;
89 static char *kmemlimit;
90
91 #define KMEM_ZSHIFT 4
92 #define KMEM_ZBASE 16
93 #define KMEM_ZMASK (KMEM_ZBASE - 1)
94
95 #define KMEM_ZMAX PAGE_SIZE
96 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
97 static u_int8_t kmemsize[KMEM_ZSIZE + 1];
98
99 /* These won't be powers of two for long */
100 struct {
101 int kz_size;
102 char *kz_name;
103 uma_zone_t kz_zone;
104 } kmemzones[] = {
105 {16, "16", NULL},
106 {32, "32", NULL},
107 {64, "64", NULL},
108 {128, "128", NULL},
109 {256, "256", NULL},
110 {512, "512", NULL},
111 {1024, "1024", NULL},
112 {2048, "2048", NULL},
113 {4096, "4096", NULL},
114 #if PAGE_SIZE > 4096
115 {8192, "8192", NULL},
116 #if PAGE_SIZE > 8192
117 {16384, "16384", NULL},
118 #if PAGE_SIZE > 16384
119 {32768, "32768", NULL},
120 #if PAGE_SIZE > 32768
121 {65536, "65536", NULL},
122 #if PAGE_SIZE > 65536
123 #error "Unsupported PAGE_SIZE"
124 #endif /* 65536 */
125 #endif /* 32768 */
126 #endif /* 16384 */
127 #endif /* 8192 */
128 #endif /* 4096 */
129 {0, NULL},
130 };
131
132 u_int vm_kmem_size;
133 SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0,
134 "Size of kernel memory");
135
136 u_int vm_kmem_size_max;
137 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RD, &vm_kmem_size_max, 0,
138 "Maximum size of kernel memory");
139
140 u_int vm_kmem_size_scale;
141 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RD, &vm_kmem_size_scale, 0,
142 "Scale factor for kernel memory size");
143
144 /*
145 * The malloc_mtx protects the kmemstatistics linked list.
146 */
147
148 struct mtx malloc_mtx;
149
150 #ifdef MALLOC_PROFILE
151 uint64_t krequests[KMEM_ZSIZE + 1];
152
153 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
154 #endif
155
156 static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS);
157
158 /* time_uptime of last malloc(9) failure */
159 static time_t t_malloc_fail;
160
161 #ifdef MALLOC_MAKE_FAILURES
162 /*
163 * Causes malloc failures every (n) mallocs with M_NOWAIT. If set to 0,
164 * doesn't cause failures.
165 */
166 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
167 "Kernel malloc debugging options");
168
169 static int malloc_failure_rate;
170 static int malloc_nowait_count;
171 static int malloc_failure_count;
172 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
173 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
174 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
175 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
176 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
177 #endif
178
179 int
180 malloc_last_fail(void)
181 {
182
183 return (time_uptime - t_malloc_fail);
184 }
185
186 /*
187 * Add this to the informational malloc_type bucket.
188 */
189 static void
190 malloc_type_zone_allocated(struct malloc_type *ksp, unsigned long size,
191 int zindx)
192 {
193 mtx_lock(&ksp->ks_mtx);
194 ksp->ks_calls++;
195 if (zindx != -1)
196 ksp->ks_size |= 1 << zindx;
197 if (size != 0) {
198 ksp->ks_memuse += size;
199 ksp->ks_inuse++;
200 if (ksp->ks_memuse > ksp->ks_maxused)
201 ksp->ks_maxused = ksp->ks_memuse;
202 }
203 mtx_unlock(&ksp->ks_mtx);
204 }
205
206 void
207 malloc_type_allocated(struct malloc_type *ksp, unsigned long size)
208 {
209 malloc_type_zone_allocated(ksp, size, -1);
210 }
211
212 /*
213 * Remove this allocation from the informational malloc_type bucket.
214 */
215 void
216 malloc_type_freed(struct malloc_type *ksp, unsigned long size)
217 {
218 mtx_lock(&ksp->ks_mtx);
219 KASSERT(size <= ksp->ks_memuse,
220 ("malloc(9)/free(9) confusion.\n%s",
221 "Probably freeing with wrong type, but maybe not here."));
222 ksp->ks_memuse -= size;
223 ksp->ks_inuse--;
224 mtx_unlock(&ksp->ks_mtx);
225 }
226
227 /*
228 * malloc:
229 *
230 * Allocate a block of memory.
231 *
232 * If M_NOWAIT is set, this routine will not block and return NULL if
233 * the allocation fails.
234 */
235 void *
236 malloc(size, type, flags)
237 unsigned long size;
238 struct malloc_type *type;
239 int flags;
240 {
241 int indx;
242 caddr_t va;
243 uma_zone_t zone;
244 uma_keg_t keg;
245 #ifdef DIAGNOSTIC
246 unsigned long osize = size;
247 #endif
248
249 #ifdef INVARIANTS
250 /*
251 * To make sure that WAITOK or NOWAIT is set, but not more than
252 * one, and check against the API botches that are common.
253 */
254 indx = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT);
255 if (indx != M_NOWAIT && indx != M_WAITOK) {
256 static struct timeval lasterr;
257 static int curerr, once;
258 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
259 printf("Bad malloc flags: %x\n", indx);
260 kdb_backtrace();
261 flags |= M_WAITOK;
262 once++;
263 }
264 }
265 #endif
266 #if 0
267 if (size == 0)
268 kdb_enter("zero size malloc");
269 #endif
270 #ifdef MALLOC_MAKE_FAILURES
271 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
272 atomic_add_int(&malloc_nowait_count, 1);
273 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
274 atomic_add_int(&malloc_failure_count, 1);
275 t_malloc_fail = time_uptime;
276 return (NULL);
277 }
278 }
279 #endif
280 if (flags & M_WAITOK)
281 KASSERT(curthread->td_intr_nesting_level == 0,
282 ("malloc(M_WAITOK) in interrupt context"));
283 if (size <= KMEM_ZMAX) {
284 if (size & KMEM_ZMASK)
285 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
286 indx = kmemsize[size >> KMEM_ZSHIFT];
287 zone = kmemzones[indx].kz_zone;
288 keg = zone->uz_keg;
289 #ifdef MALLOC_PROFILE
290 krequests[size >> KMEM_ZSHIFT]++;
291 #endif
292 va = uma_zalloc(zone, flags);
293 if (va != NULL)
294 size = keg->uk_size;
295 malloc_type_zone_allocated(type, va == NULL ? 0 : size, indx);
296 } else {
297 size = roundup(size, PAGE_SIZE);
298 zone = NULL;
299 keg = NULL;
300 va = uma_large_malloc(size, flags);
301 malloc_type_allocated(type, va == NULL ? 0 : size);
302 }
303 if (flags & M_WAITOK)
304 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
305 else if (va == NULL)
306 t_malloc_fail = time_uptime;
307 #ifdef DIAGNOSTIC
308 if (va != NULL && !(flags & M_ZERO)) {
309 memset(va, 0x70, osize);
310 }
311 #endif
312 return ((void *) va);
313 }
314
315 /*
316 * free:
317 *
318 * Free a block of memory allocated by malloc.
319 *
320 * This routine may not block.
321 */
322 void
323 free(addr, type)
324 void *addr;
325 struct malloc_type *type;
326 {
327 uma_slab_t slab;
328 u_long size;
329
330 /* free(NULL, ...) does nothing */
331 if (addr == NULL)
332 return;
333
334 KASSERT(type->ks_memuse > 0,
335 ("malloc(9)/free(9) confusion.\n%s",
336 "Probably freeing with wrong type, but maybe not here."));
337 size = 0;
338
339 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
340
341 if (slab == NULL)
342 panic("free: address %p(%p) has not been allocated.\n",
343 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
344
345
346 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
347 #ifdef INVARIANTS
348 struct malloc_type **mtp = addr;
349 #endif
350 size = slab->us_keg->uk_size;
351 #ifdef INVARIANTS
352 /*
353 * Cache a pointer to the malloc_type that most recently freed
354 * this memory here. This way we know who is most likely to
355 * have stepped on it later.
356 *
357 * This code assumes that size is a multiple of 8 bytes for
358 * 64 bit machines
359 */
360 mtp = (struct malloc_type **)
361 ((unsigned long)mtp & ~UMA_ALIGN_PTR);
362 mtp += (size - sizeof(struct malloc_type *)) /
363 sizeof(struct malloc_type *);
364 *mtp = type;
365 #endif
366 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
367 } else {
368 size = slab->us_size;
369 uma_large_free(slab);
370 }
371 malloc_type_freed(type, size);
372 }
373
374 /*
375 * realloc: change the size of a memory block
376 */
377 void *
378 realloc(addr, size, type, flags)
379 void *addr;
380 unsigned long size;
381 struct malloc_type *type;
382 int flags;
383 {
384 uma_slab_t slab;
385 unsigned long alloc;
386 void *newaddr;
387
388 /* realloc(NULL, ...) is equivalent to malloc(...) */
389 if (addr == NULL)
390 return (malloc(size, type, flags));
391
392 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
393
394 /* Sanity check */
395 KASSERT(slab != NULL,
396 ("realloc: address %p out of range", (void *)addr));
397
398 /* Get the size of the original block */
399 if (slab->us_keg)
400 alloc = slab->us_keg->uk_size;
401 else
402 alloc = slab->us_size;
403
404 /* Reuse the original block if appropriate */
405 if (size <= alloc
406 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
407 return (addr);
408
409 /* Allocate a new, bigger (or smaller) block */
410 if ((newaddr = malloc(size, type, flags)) == NULL)
411 return (NULL);
412
413 /* Copy over original contents */
414 bcopy(addr, newaddr, min(size, alloc));
415 free(addr, type);
416 return (newaddr);
417 }
418
419 /*
420 * reallocf: same as realloc() but free memory on failure.
421 */
422 void *
423 reallocf(addr, size, type, flags)
424 void *addr;
425 unsigned long size;
426 struct malloc_type *type;
427 int flags;
428 {
429 void *mem;
430
431 if ((mem = realloc(addr, size, type, flags)) == NULL)
432 free(addr, type);
433 return (mem);
434 }
435
436 /*
437 * Initialize the kernel memory allocator
438 */
439 /* ARGSUSED*/
440 static void
441 kmeminit(dummy)
442 void *dummy;
443 {
444 u_int8_t indx;
445 u_long mem_size;
446 int i;
447
448 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
449
450 /*
451 * Try to auto-tune the kernel memory size, so that it is
452 * more applicable for a wider range of machine sizes.
453 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
454 * a VM_KMEM_SIZE of 12MB is a fair compromise. The
455 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
456 * available, and on an X86 with a total KVA space of 256MB,
457 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
458 *
459 * Note that the kmem_map is also used by the zone allocator,
460 * so make sure that there is enough space.
461 */
462 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
463 mem_size = cnt.v_page_count;
464
465 #if defined(VM_KMEM_SIZE_SCALE)
466 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
467 #endif
468 TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale);
469 if (vm_kmem_size_scale > 0 &&
470 (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE))
471 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
472
473 #if defined(VM_KMEM_SIZE_MAX)
474 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
475 #endif
476 TUNABLE_INT_FETCH("vm.kmem_size_max", &vm_kmem_size_max);
477 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
478 vm_kmem_size = vm_kmem_size_max;
479
480 /* Allow final override from the kernel environment */
481 #ifndef BURN_BRIDGES
482 if (TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0)
483 printf("kern.vm.kmem.size is now called vm.kmem_size!\n");
484 #endif
485 TUNABLE_INT_FETCH("vm.kmem_size", &vm_kmem_size);
486
487 /*
488 * Limit kmem virtual size to twice the physical memory.
489 * This allows for kmem map sparseness, but limits the size
490 * to something sane. Be careful to not overflow the 32bit
491 * ints while doing the check.
492 */
493 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
494 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
495
496 /*
497 * Tune settings based on the kernel map's size at this time.
498 */
499 init_param3(vm_kmem_size / PAGE_SIZE);
500
501 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
502 (vm_offset_t *)&kmemlimit, vm_kmem_size);
503 kmem_map->system_map = 1;
504
505 uma_startup2();
506
507 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
508 int size = kmemzones[indx].kz_size;
509 char *name = kmemzones[indx].kz_name;
510
511 kmemzones[indx].kz_zone = uma_zcreate(name, size,
512 #ifdef INVARIANTS
513 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
514 #else
515 NULL, NULL, NULL, NULL,
516 #endif
517 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
518
519 for (;i <= size; i+= KMEM_ZBASE)
520 kmemsize[i >> KMEM_ZSHIFT] = indx;
521
522 }
523 }
524
525 void
526 malloc_init(data)
527 void *data;
528 {
529 struct malloc_type *type = (struct malloc_type *)data;
530
531 mtx_lock(&malloc_mtx);
532 if (type->ks_magic != M_MAGIC)
533 panic("malloc type lacks magic");
534
535 if (cnt.v_page_count == 0)
536 panic("malloc_init not allowed before vm init");
537
538 if (type->ks_next != NULL)
539 return;
540
541 type->ks_next = kmemstatistics;
542 kmemstatistics = type;
543 mtx_init(&type->ks_mtx, type->ks_shortdesc, "Malloc Stats", MTX_DEF);
544 mtx_unlock(&malloc_mtx);
545 }
546
547 void
548 malloc_uninit(data)
549 void *data;
550 {
551 struct malloc_type *type = (struct malloc_type *)data;
552 struct malloc_type *t;
553
554 mtx_lock(&malloc_mtx);
555 mtx_lock(&type->ks_mtx);
556 if (type->ks_magic != M_MAGIC)
557 panic("malloc type lacks magic");
558
559 if (cnt.v_page_count == 0)
560 panic("malloc_uninit not allowed before vm init");
561
562 if (type == kmemstatistics)
563 kmemstatistics = type->ks_next;
564 else {
565 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
566 if (t->ks_next == type) {
567 t->ks_next = type->ks_next;
568 break;
569 }
570 }
571 }
572 type->ks_next = NULL;
573 mtx_destroy(&type->ks_mtx);
574 mtx_unlock(&malloc_mtx);
575 }
576
577 static int
578 sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
579 {
580 struct malloc_type *type;
581 int linesize = 128;
582 int curline;
583 int bufsize;
584 int first;
585 int error;
586 char *buf;
587 char *p;
588 int cnt;
589 int len;
590 int i;
591
592 cnt = 0;
593
594 mtx_lock(&malloc_mtx);
595 for (type = kmemstatistics; type != NULL; type = type->ks_next)
596 cnt++;
597
598 mtx_unlock(&malloc_mtx);
599 bufsize = linesize * (cnt + 1);
600 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
601 mtx_lock(&malloc_mtx);
602
603 len = snprintf(p, linesize,
604 "\n Type InUse MemUse HighUse Requests Size(s)\n");
605 p += len;
606
607 for (type = kmemstatistics; cnt != 0 && type != NULL;
608 type = type->ks_next, cnt--) {
609 if (type->ks_calls == 0)
610 continue;
611
612 curline = linesize - 2; /* Leave room for the \n */
613 len = snprintf(p, curline, "%13s%6lu%6luK%7luK%9llu",
614 type->ks_shortdesc,
615 type->ks_inuse,
616 (type->ks_memuse + 1023) / 1024,
617 (type->ks_maxused + 1023) / 1024,
618 (long long unsigned)type->ks_calls);
619 curline -= len;
620 p += len;
621
622 first = 1;
623 for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1;
624 i++) {
625 if (type->ks_size & (1 << i)) {
626 if (first)
627 len = snprintf(p, curline, " ");
628 else
629 len = snprintf(p, curline, ",");
630 curline -= len;
631 p += len;
632
633 len = snprintf(p, curline,
634 "%s", kmemzones[i].kz_name);
635 curline -= len;
636 p += len;
637
638 first = 0;
639 }
640 }
641
642 len = snprintf(p, 2, "\n");
643 p += len;
644 }
645
646 mtx_unlock(&malloc_mtx);
647 error = SYSCTL_OUT(req, buf, p - buf);
648
649 free(buf, M_TEMP);
650 return (error);
651 }
652
653 SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD,
654 NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats");
655
656 #ifdef MALLOC_PROFILE
657
658 static int
659 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
660 {
661 int linesize = 64;
662 uint64_t count;
663 uint64_t waste;
664 uint64_t mem;
665 int bufsize;
666 int error;
667 char *buf;
668 int rsize;
669 int size;
670 char *p;
671 int len;
672 int i;
673
674 bufsize = linesize * (KMEM_ZSIZE + 1);
675 bufsize += 128; /* For the stats line */
676 bufsize += 128; /* For the banner line */
677 waste = 0;
678 mem = 0;
679
680 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
681 len = snprintf(p, bufsize,
682 "\n Size Requests Real Size\n");
683 bufsize -= len;
684 p += len;
685
686 for (i = 0; i < KMEM_ZSIZE; i++) {
687 size = i << KMEM_ZSHIFT;
688 rsize = kmemzones[kmemsize[i]].kz_size;
689 count = (long long unsigned)krequests[i];
690
691 len = snprintf(p, bufsize, "%6d%28llu%11d\n",
692 size, (unsigned long long)count, rsize);
693 bufsize -= len;
694 p += len;
695
696 if ((rsize * count) > (size * count))
697 waste += (rsize * count) - (size * count);
698 mem += (rsize * count);
699 }
700
701 len = snprintf(p, bufsize,
702 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
703 (unsigned long long)mem, (unsigned long long)waste);
704 p += len;
705
706 error = SYSCTL_OUT(req, buf, p - buf);
707
708 free(buf, M_TEMP);
709 return (error);
710 }
711
712 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
713 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
714 #endif /* MALLOC_PROFILE */
Cache object: 4c2c0dfd7efc14fdf7ac2e01c972329b
|