1 /*-
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
32 */
33
34 /*
35 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
36 * based on memory types. Back end is implemented using the UMA(9) zone
37 * allocator. A set of fixed-size buckets are used for smaller allocations,
38 * and a special UMA allocation interface is used for larger allocations.
39 * Callers declare memory types, and statistics are maintained independently
40 * for each memory type. Statistics are maintained per-CPU for performance
41 * reasons. See malloc(9) and comments in malloc.h for a detailed
42 * description.
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include "opt_ddb.h"
49 #include "opt_vm.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kdb.h>
54 #include <sys/kernel.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/mbuf.h>
58 #include <sys/mutex.h>
59 #include <sys/vmmeter.h>
60 #include <sys/proc.h>
61 #include <sys/sbuf.h>
62 #include <sys/sysctl.h>
63 #include <sys/time.h>
64
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_page.h>
72 #include <vm/uma.h>
73 #include <vm/uma_int.h>
74 #include <vm/uma_dbg.h>
75
76 #ifdef DEBUG_MEMGUARD
77 #include <vm/memguard.h>
78 #endif
79 #ifdef DEBUG_REDZONE
80 #include <vm/redzone.h>
81 #endif
82
83 #if defined(INVARIANTS) && defined(__i386__)
84 #include <machine/cpu.h>
85 #endif
86
87 #include <ddb/ddb.h>
88
89 /*
90 * When realloc() is called, if the new size is sufficiently smaller than
91 * the old size, realloc() will allocate a new, smaller block to avoid
92 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
93 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
94 */
95 #ifndef REALLOC_FRACTION
96 #define REALLOC_FRACTION 1 /* new block if <= half the size */
97 #endif
98
99 /*
100 * Centrally define some common malloc types.
101 */
102 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
103 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
104 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
105
106 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
107 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
108
109 static void kmeminit(void *);
110 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
111
112 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
113
114 static struct malloc_type *kmemstatistics;
115 static vm_offset_t kmembase;
116 static vm_offset_t kmemlimit;
117 static int kmemcount;
118
119 #define KMEM_ZSHIFT 4
120 #define KMEM_ZBASE 16
121 #define KMEM_ZMASK (KMEM_ZBASE - 1)
122
123 #define KMEM_ZMAX PAGE_SIZE
124 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
125 static u_int8_t kmemsize[KMEM_ZSIZE + 1];
126
127 /*
128 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
129 * of various sizes.
130 *
131 * XXX: The comment here used to read "These won't be powers of two for
132 * long." It's possible that a significant amount of wasted memory could be
133 * recovered by tuning the sizes of these buckets.
134 */
135 struct {
136 int kz_size;
137 char *kz_name;
138 uma_zone_t kz_zone;
139 } kmemzones[] = {
140 {16, "16", NULL},
141 {32, "32", NULL},
142 {64, "64", NULL},
143 {128, "128", NULL},
144 {256, "256", NULL},
145 {512, "512", NULL},
146 {1024, "1024", NULL},
147 {2048, "2048", NULL},
148 {4096, "4096", NULL},
149 #if PAGE_SIZE > 4096
150 {8192, "8192", NULL},
151 #if PAGE_SIZE > 8192
152 {16384, "16384", NULL},
153 #if PAGE_SIZE > 16384
154 {32768, "32768", NULL},
155 #if PAGE_SIZE > 32768
156 {65536, "65536", NULL},
157 #if PAGE_SIZE > 65536
158 #error "Unsupported PAGE_SIZE"
159 #endif /* 65536 */
160 #endif /* 32768 */
161 #endif /* 16384 */
162 #endif /* 8192 */
163 #endif /* 4096 */
164 {0, NULL},
165 };
166
167 /*
168 * Zone to allocate malloc type descriptions from. For ABI reasons, memory
169 * types are described by a data structure passed by the declaring code, but
170 * the malloc(9) implementation has its own data structure describing the
171 * type and statistics. This permits the malloc(9)-internal data structures
172 * to be modified without breaking binary-compiled kernel modules that
173 * declare malloc types.
174 */
175 static uma_zone_t mt_zone;
176
177 u_int vm_kmem_size;
178 SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0,
179 "Size of kernel memory");
180
181 u_int vm_kmem_size_min;
182 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RD, &vm_kmem_size_min, 0,
183 "Minimum size of kernel memory");
184
185 u_int vm_kmem_size_max;
186 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RD, &vm_kmem_size_max, 0,
187 "Maximum size of kernel memory");
188
189 u_int vm_kmem_size_scale;
190 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RD, &vm_kmem_size_scale, 0,
191 "Scale factor for kernel memory size");
192
193 /*
194 * The malloc_mtx protects the kmemstatistics linked list.
195 */
196 struct mtx malloc_mtx;
197
198 #ifdef MALLOC_PROFILE
199 uint64_t krequests[KMEM_ZSIZE + 1];
200
201 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
202 #endif
203
204 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
205
206 /*
207 * time_uptime of the last malloc(9) failure (induced or real).
208 */
209 static time_t t_malloc_fail;
210
211 /*
212 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
213 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
214 */
215 #ifdef MALLOC_MAKE_FAILURES
216 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
217 "Kernel malloc debugging options");
218
219 static int malloc_failure_rate;
220 static int malloc_nowait_count;
221 static int malloc_failure_count;
222 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
223 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
224 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
225 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
226 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
227 #endif
228
229 int
230 malloc_last_fail(void)
231 {
232
233 return (time_uptime - t_malloc_fail);
234 }
235
236 /*
237 * An allocation has succeeded -- update malloc type statistics for the
238 * amount of bucket size. Occurs within a critical section so that the
239 * thread isn't preempted and doesn't migrate while updating per-PCU
240 * statistics.
241 */
242 static void
243 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
244 int zindx)
245 {
246 struct malloc_type_internal *mtip;
247 struct malloc_type_stats *mtsp;
248
249 critical_enter();
250 mtip = mtp->ks_handle;
251 mtsp = &mtip->mti_stats[curcpu];
252 if (size > 0) {
253 mtsp->mts_memalloced += size;
254 mtsp->mts_numallocs++;
255 }
256 if (zindx != -1)
257 mtsp->mts_size |= 1 << zindx;
258 critical_exit();
259 }
260
261 void
262 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
263 {
264
265 if (size > 0)
266 malloc_type_zone_allocated(mtp, size, -1);
267 }
268
269 /*
270 * A free operation has occurred -- update malloc type statistics for the
271 * amount of the bucket size. Occurs within a critical section so that the
272 * thread isn't preempted and doesn't migrate while updating per-CPU
273 * statistics.
274 */
275 void
276 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
277 {
278 struct malloc_type_internal *mtip;
279 struct malloc_type_stats *mtsp;
280
281 critical_enter();
282 mtip = mtp->ks_handle;
283 mtsp = &mtip->mti_stats[curcpu];
284 mtsp->mts_memfreed += size;
285 mtsp->mts_numfrees++;
286 critical_exit();
287 }
288
289 /*
290 * malloc:
291 *
292 * Allocate a block of memory.
293 *
294 * If M_NOWAIT is set, this routine will not block and return NULL if
295 * the allocation fails.
296 */
297 void *
298 malloc(unsigned long size, struct malloc_type *mtp, int flags)
299 {
300 int indx;
301 caddr_t va;
302 uma_zone_t zone;
303 uma_keg_t keg;
304 #if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE)
305 unsigned long osize = size;
306 #endif
307
308 #ifdef INVARIANTS
309 /*
310 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
311 */
312 indx = flags & (M_WAITOK | M_NOWAIT);
313 if (indx != M_NOWAIT && indx != M_WAITOK) {
314 static struct timeval lasterr;
315 static int curerr, once;
316 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
317 printf("Bad malloc flags: %x\n", indx);
318 kdb_backtrace();
319 flags |= M_WAITOK;
320 once++;
321 }
322 }
323 #endif
324 #ifdef MALLOC_MAKE_FAILURES
325 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
326 atomic_add_int(&malloc_nowait_count, 1);
327 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
328 atomic_add_int(&malloc_failure_count, 1);
329 t_malloc_fail = time_uptime;
330 return (NULL);
331 }
332 }
333 #endif
334 if (flags & M_WAITOK)
335 KASSERT(curthread->td_intr_nesting_level == 0,
336 ("malloc(M_WAITOK) in interrupt context"));
337
338 #ifdef DEBUG_MEMGUARD
339 if (memguard_cmp(mtp))
340 return memguard_alloc(size, flags);
341 #endif
342
343 #ifdef DEBUG_REDZONE
344 size = redzone_size_ntor(size);
345 #endif
346
347 if (size <= KMEM_ZMAX) {
348 if (size & KMEM_ZMASK)
349 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
350 indx = kmemsize[size >> KMEM_ZSHIFT];
351 zone = kmemzones[indx].kz_zone;
352 keg = zone->uz_keg;
353 #ifdef MALLOC_PROFILE
354 krequests[size >> KMEM_ZSHIFT]++;
355 #endif
356 va = uma_zalloc(zone, flags);
357 if (va != NULL)
358 size = keg->uk_size;
359 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
360 } else {
361 size = roundup(size, PAGE_SIZE);
362 zone = NULL;
363 keg = NULL;
364 va = uma_large_malloc(size, flags);
365 malloc_type_allocated(mtp, va == NULL ? 0 : size);
366 }
367 if (flags & M_WAITOK)
368 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
369 else if (va == NULL)
370 t_malloc_fail = time_uptime;
371 #ifdef DIAGNOSTIC
372 if (va != NULL && !(flags & M_ZERO)) {
373 memset(va, 0x70, osize);
374 }
375 #endif
376 #ifdef DEBUG_REDZONE
377 if (va != NULL)
378 va = redzone_setup(va, osize);
379 #endif
380 return ((void *) va);
381 }
382
383 /*
384 * free:
385 *
386 * Free a block of memory allocated by malloc.
387 *
388 * This routine may not block.
389 */
390 void
391 free(void *addr, struct malloc_type *mtp)
392 {
393 uma_slab_t slab;
394 u_long size;
395
396 /* free(NULL, ...) does nothing */
397 if (addr == NULL)
398 return;
399
400 #ifdef DEBUG_MEMGUARD
401 if (memguard_cmp(mtp)) {
402 memguard_free(addr);
403 return;
404 }
405 #endif
406
407 #ifdef DEBUG_REDZONE
408 redzone_check(addr);
409 addr = redzone_addr_ntor(addr);
410 #endif
411
412 size = 0;
413
414 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
415
416 if (slab == NULL)
417 panic("free: address %p(%p) has not been allocated.\n",
418 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
419
420
421 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
422 #ifdef INVARIANTS
423 struct malloc_type **mtpp = addr;
424 #endif
425 size = slab->us_keg->uk_size;
426 #ifdef INVARIANTS
427 /*
428 * Cache a pointer to the malloc_type that most recently freed
429 * this memory here. This way we know who is most likely to
430 * have stepped on it later.
431 *
432 * This code assumes that size is a multiple of 8 bytes for
433 * 64 bit machines
434 */
435 mtpp = (struct malloc_type **)
436 ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
437 mtpp += (size - sizeof(struct malloc_type *)) /
438 sizeof(struct malloc_type *);
439 *mtpp = mtp;
440 #endif
441 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
442 } else {
443 size = slab->us_size;
444 uma_large_free(slab);
445 }
446 malloc_type_freed(mtp, size);
447 }
448
449 /*
450 * realloc: change the size of a memory block
451 */
452 void *
453 realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
454 {
455 uma_slab_t slab;
456 unsigned long alloc;
457 void *newaddr;
458
459 /* realloc(NULL, ...) is equivalent to malloc(...) */
460 if (addr == NULL)
461 return (malloc(size, mtp, flags));
462
463 /*
464 * XXX: Should report free of old memory and alloc of new memory to
465 * per-CPU stats.
466 */
467
468 #ifdef DEBUG_MEMGUARD
469 if (memguard_cmp(mtp)) {
470 slab = NULL;
471 alloc = size;
472 } else {
473 #endif
474
475 #ifdef DEBUG_REDZONE
476 slab = NULL;
477 alloc = redzone_get_size(addr);
478 #else
479 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
480
481 /* Sanity check */
482 KASSERT(slab != NULL,
483 ("realloc: address %p out of range", (void *)addr));
484
485 /* Get the size of the original block */
486 if (!(slab->us_flags & UMA_SLAB_MALLOC))
487 alloc = slab->us_keg->uk_size;
488 else
489 alloc = slab->us_size;
490
491 /* Reuse the original block if appropriate */
492 if (size <= alloc
493 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
494 return (addr);
495 #endif /* !DEBUG_REDZONE */
496
497 #ifdef DEBUG_MEMGUARD
498 }
499 #endif
500
501 /* Allocate a new, bigger (or smaller) block */
502 if ((newaddr = malloc(size, mtp, flags)) == NULL)
503 return (NULL);
504
505 /* Copy over original contents */
506 bcopy(addr, newaddr, min(size, alloc));
507 free(addr, mtp);
508 return (newaddr);
509 }
510
511 /*
512 * reallocf: same as realloc() but free memory on failure.
513 */
514 void *
515 reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
516 {
517 void *mem;
518
519 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
520 free(addr, mtp);
521 return (mem);
522 }
523
524 /*
525 * Initialize the kernel memory allocator
526 */
527 /* ARGSUSED*/
528 static void
529 kmeminit(void *dummy)
530 {
531 u_int8_t indx;
532 u_long mem_size;
533 int i;
534
535 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
536
537 /*
538 * Try to auto-tune the kernel memory size, so that it is
539 * more applicable for a wider range of machine sizes.
540 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
541 * a VM_KMEM_SIZE of 12MB is a fair compromise. The
542 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
543 * available, and on an X86 with a total KVA space of 256MB,
544 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
545 *
546 * Note that the kmem_map is also used by the zone allocator,
547 * so make sure that there is enough space.
548 */
549 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
550 mem_size = cnt.v_page_count;
551
552 #if defined(VM_KMEM_SIZE_SCALE)
553 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
554 #endif
555 TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale);
556 if (vm_kmem_size_scale > 0 &&
557 (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE))
558 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
559
560 #if defined(VM_KMEM_SIZE_MIN)
561 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
562 #endif
563 TUNABLE_INT_FETCH("vm.kmem_size_min", &vm_kmem_size_min);
564 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) {
565 vm_kmem_size = vm_kmem_size_min;
566 }
567
568 #if defined(VM_KMEM_SIZE_MAX)
569 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
570 #endif
571 TUNABLE_INT_FETCH("vm.kmem_size_max", &vm_kmem_size_max);
572 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
573 vm_kmem_size = vm_kmem_size_max;
574
575 /* Allow final override from the kernel environment */
576 #ifndef BURN_BRIDGES
577 if (TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0)
578 printf("kern.vm.kmem.size is now called vm.kmem_size!\n");
579 #endif
580 TUNABLE_INT_FETCH("vm.kmem_size", &vm_kmem_size);
581
582 /*
583 * Limit kmem virtual size to twice the physical memory.
584 * This allows for kmem map sparseness, but limits the size
585 * to something sane. Be careful to not overflow the 32bit
586 * ints while doing the check.
587 */
588 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
589 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
590
591 /*
592 * Tune settings based on the kernel map's size at this time.
593 */
594 init_param3(vm_kmem_size / PAGE_SIZE);
595
596 kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit,
597 vm_kmem_size);
598 kmem_map->system_map = 1;
599
600 #ifdef DEBUG_MEMGUARD
601 /*
602 * Initialize MemGuard if support compiled in. MemGuard is a
603 * replacement allocator used for detecting tamper-after-free
604 * scenarios as they occur. It is only used for debugging.
605 */
606 vm_memguard_divisor = 10;
607 TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
608
609 /* Pick a conservative value if provided value sucks. */
610 if ((vm_memguard_divisor <= 0) ||
611 ((vm_kmem_size / vm_memguard_divisor) == 0))
612 vm_memguard_divisor = 10;
613 memguard_init(kmem_map, vm_kmem_size / vm_memguard_divisor);
614 #endif
615
616 uma_startup2();
617
618 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
619 #ifdef INVARIANTS
620 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
621 #else
622 NULL, NULL, NULL, NULL,
623 #endif
624 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
625 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
626 int size = kmemzones[indx].kz_size;
627 char *name = kmemzones[indx].kz_name;
628
629 kmemzones[indx].kz_zone = uma_zcreate(name, size,
630 #ifdef INVARIANTS
631 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
632 #else
633 NULL, NULL, NULL, NULL,
634 #endif
635 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
636
637 for (;i <= size; i+= KMEM_ZBASE)
638 kmemsize[i >> KMEM_ZSHIFT] = indx;
639
640 }
641 }
642
643 void
644 malloc_init(void *data)
645 {
646 struct malloc_type_internal *mtip;
647 struct malloc_type *mtp;
648
649 KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));
650
651 mtp = data;
652 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
653 mtp->ks_handle = mtip;
654
655 mtx_lock(&malloc_mtx);
656 mtp->ks_next = kmemstatistics;
657 kmemstatistics = mtp;
658 kmemcount++;
659 mtx_unlock(&malloc_mtx);
660 }
661
662 void
663 malloc_uninit(void *data)
664 {
665 struct malloc_type_internal *mtip;
666 struct malloc_type_stats *mtsp;
667 struct malloc_type *mtp, *temp;
668 uma_slab_t slab;
669 long temp_allocs, temp_bytes;
670 int i;
671
672 mtp = data;
673 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
674 mtx_lock(&malloc_mtx);
675 mtip = mtp->ks_handle;
676 mtp->ks_handle = NULL;
677 if (mtp != kmemstatistics) {
678 for (temp = kmemstatistics; temp != NULL;
679 temp = temp->ks_next) {
680 if (temp->ks_next == mtp)
681 temp->ks_next = mtp->ks_next;
682 }
683 } else
684 kmemstatistics = mtp->ks_next;
685 kmemcount--;
686 mtx_unlock(&malloc_mtx);
687
688 /*
689 * Look for memory leaks.
690 */
691 temp_allocs = temp_bytes = 0;
692 for (i = 0; i < MAXCPU; i++) {
693 mtsp = &mtip->mti_stats[i];
694 temp_allocs += mtsp->mts_numallocs;
695 temp_allocs -= mtsp->mts_numfrees;
696 temp_bytes += mtsp->mts_memalloced;
697 temp_bytes -= mtsp->mts_memfreed;
698 }
699 if (temp_allocs > 0 || temp_bytes > 0) {
700 printf("Warning: memory type %s leaked memory on destroy "
701 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
702 temp_allocs, temp_bytes);
703 }
704
705 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
706 uma_zfree_arg(mt_zone, mtip, slab);
707 }
708
709 struct malloc_type *
710 malloc_desc2type(const char *desc)
711 {
712 struct malloc_type *mtp;
713
714 mtx_assert(&malloc_mtx, MA_OWNED);
715 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
716 if (strcmp(mtp->ks_shortdesc, desc) == 0)
717 return (mtp);
718 }
719 return (NULL);
720 }
721
722 static int
723 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
724 {
725 struct malloc_type_stream_header mtsh;
726 struct malloc_type_internal *mtip;
727 struct malloc_type_header mth;
728 struct malloc_type *mtp;
729 int buflen, count, error, i;
730 struct sbuf sbuf;
731 char *buffer;
732
733 mtx_lock(&malloc_mtx);
734 restart:
735 mtx_assert(&malloc_mtx, MA_OWNED);
736 count = kmemcount;
737 mtx_unlock(&malloc_mtx);
738 buflen = sizeof(mtsh) + count * (sizeof(mth) +
739 sizeof(struct malloc_type_stats) * MAXCPU) + 1;
740 buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
741 mtx_lock(&malloc_mtx);
742 if (count < kmemcount) {
743 free(buffer, M_TEMP);
744 goto restart;
745 }
746
747 sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
748
749 /*
750 * Insert stream header.
751 */
752 bzero(&mtsh, sizeof(mtsh));
753 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
754 mtsh.mtsh_maxcpus = MAXCPU;
755 mtsh.mtsh_count = kmemcount;
756 if (sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)) < 0) {
757 mtx_unlock(&malloc_mtx);
758 error = ENOMEM;
759 goto out;
760 }
761
762 /*
763 * Insert alternating sequence of type headers and type statistics.
764 */
765 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
766 mtip = (struct malloc_type_internal *)mtp->ks_handle;
767
768 /*
769 * Insert type header.
770 */
771 bzero(&mth, sizeof(mth));
772 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
773 if (sbuf_bcat(&sbuf, &mth, sizeof(mth)) < 0) {
774 mtx_unlock(&malloc_mtx);
775 error = ENOMEM;
776 goto out;
777 }
778
779 /*
780 * Insert type statistics for each CPU.
781 */
782 for (i = 0; i < MAXCPU; i++) {
783 if (sbuf_bcat(&sbuf, &mtip->mti_stats[i],
784 sizeof(mtip->mti_stats[i])) < 0) {
785 mtx_unlock(&malloc_mtx);
786 error = ENOMEM;
787 goto out;
788 }
789 }
790 }
791 mtx_unlock(&malloc_mtx);
792 sbuf_finish(&sbuf);
793 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
794 out:
795 sbuf_delete(&sbuf);
796 free(buffer, M_TEMP);
797 return (error);
798 }
799
800 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
801 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
802 "Return malloc types");
803
804 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
805 "Count of kernel malloc types");
806
807 #ifdef DDB
808 DB_SHOW_COMMAND(malloc, db_show_malloc)
809 {
810 struct malloc_type_internal *mtip;
811 struct malloc_type *mtp;
812 u_int64_t allocs, frees;
813 u_int64_t alloced, freed;
814 int i;
815
816 db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse",
817 "Requests");
818 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
819 mtip = (struct malloc_type_internal *)mtp->ks_handle;
820 allocs = 0;
821 frees = 0;
822 alloced = 0;
823 freed = 0;
824 for (i = 0; i < MAXCPU; i++) {
825 allocs += mtip->mti_stats[i].mts_numallocs;
826 frees += mtip->mti_stats[i].mts_numfrees;
827 alloced += mtip->mti_stats[i].mts_memalloced;
828 freed += mtip->mti_stats[i].mts_memfreed;
829 }
830 db_printf("%18s %12ju %12juK %12ju\n",
831 mtp->ks_shortdesc, allocs - frees,
832 (alloced - freed + 1023) / 1024, allocs);
833 }
834 }
835 #endif
836
837 #ifdef MALLOC_PROFILE
838
839 static int
840 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
841 {
842 int linesize = 64;
843 struct sbuf sbuf;
844 uint64_t count;
845 uint64_t waste;
846 uint64_t mem;
847 int bufsize;
848 int error;
849 char *buf;
850 int rsize;
851 int size;
852 int i;
853
854 bufsize = linesize * (KMEM_ZSIZE + 1);
855 bufsize += 128; /* For the stats line */
856 bufsize += 128; /* For the banner line */
857 waste = 0;
858 mem = 0;
859
860 buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
861 sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN);
862 sbuf_printf(&sbuf,
863 "\n Size Requests Real Size\n");
864 for (i = 0; i < KMEM_ZSIZE; i++) {
865 size = i << KMEM_ZSHIFT;
866 rsize = kmemzones[kmemsize[i]].kz_size;
867 count = (long long unsigned)krequests[i];
868
869 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
870 (unsigned long long)count, rsize);
871
872 if ((rsize * count) > (size * count))
873 waste += (rsize * count) - (size * count);
874 mem += (rsize * count);
875 }
876 sbuf_printf(&sbuf,
877 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
878 (unsigned long long)mem, (unsigned long long)waste);
879 sbuf_finish(&sbuf);
880
881 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
882
883 sbuf_delete(&sbuf);
884 free(buf, M_TEMP);
885 return (error);
886 }
887
888 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
889 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
890 #endif /* MALLOC_PROFILE */
Cache object: 92294e38ee76ae0a5d090b62d9a0c119
|