1 /*-
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
32 */
33
34 /*
35 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
36 * based on memory types. Back end is implemented using the UMA(9) zone
37 * allocator. A set of fixed-size buckets are used for smaller allocations,
38 * and a special UMA allocation interface is used for larger allocations.
39 * Callers declare memory types, and statistics are maintained independently
40 * for each memory type. Statistics are maintained per-CPU for performance
41 * reasons. See malloc(9) and comments in malloc.h for a detailed
42 * description.
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD: releng/7.4/sys/kern/kern_malloc.c 213923 2010-10-16 11:44:58Z avg $");
47
48 #include "opt_ddb.h"
49 #include "opt_kdtrace.h"
50 #include "opt_vm.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kdb.h>
55 #include <sys/kernel.h>
56 #include <sys/lock.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/mutex.h>
60 #include <sys/vmmeter.h>
61 #include <sys/proc.h>
62 #include <sys/sbuf.h>
63 #include <sys/sysctl.h>
64 #include <sys/time.h>
65
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_param.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_page.h>
73 #include <vm/uma.h>
74 #include <vm/uma_int.h>
75 #include <vm/uma_dbg.h>
76
77 #ifdef DEBUG_MEMGUARD
78 #include <vm/memguard.h>
79 #endif
80 #ifdef DEBUG_REDZONE
81 #include <vm/redzone.h>
82 #endif
83
84 #if defined(INVARIANTS) && defined(__i386__)
85 #include <machine/cpu.h>
86 #endif
87
88 #include <ddb/ddb.h>
89
90 #ifdef KDTRACE_HOOKS
91 #include <sys/dtrace_bsd.h>
92
93 dtrace_malloc_probe_func_t dtrace_malloc_probe;
94 #endif
95
96 /*
97 * When realloc() is called, if the new size is sufficiently smaller than
98 * the old size, realloc() will allocate a new, smaller block to avoid
99 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
100 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
101 */
102 #ifndef REALLOC_FRACTION
103 #define REALLOC_FRACTION 1 /* new block if <= half the size */
104 #endif
105
106 /*
107 * Centrally define some common malloc types.
108 */
109 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
110 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
111 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
112
113 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
114 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
115
116 static void kmeminit(void *);
117 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL);
118
119 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
120
121 static struct malloc_type *kmemstatistics;
122 static vm_offset_t kmembase;
123 static vm_offset_t kmemlimit;
124 static int kmemcount;
125
126 #define KMEM_ZSHIFT 4
127 #define KMEM_ZBASE 16
128 #define KMEM_ZMASK (KMEM_ZBASE - 1)
129
130 #define KMEM_ZMAX PAGE_SIZE
131 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
132 static u_int8_t kmemsize[KMEM_ZSIZE + 1];
133
134 /*
135 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
136 * of various sizes.
137 *
138 * XXX: The comment here used to read "These won't be powers of two for
139 * long." It's possible that a significant amount of wasted memory could be
140 * recovered by tuning the sizes of these buckets.
141 */
142 struct {
143 int kz_size;
144 char *kz_name;
145 uma_zone_t kz_zone;
146 } kmemzones[] = {
147 {16, "16", NULL},
148 {32, "32", NULL},
149 {64, "64", NULL},
150 {128, "128", NULL},
151 {256, "256", NULL},
152 {512, "512", NULL},
153 {1024, "1024", NULL},
154 {2048, "2048", NULL},
155 {4096, "4096", NULL},
156 #if PAGE_SIZE > 4096
157 {8192, "8192", NULL},
158 #if PAGE_SIZE > 8192
159 {16384, "16384", NULL},
160 #if PAGE_SIZE > 16384
161 {32768, "32768", NULL},
162 #if PAGE_SIZE > 32768
163 {65536, "65536", NULL},
164 #if PAGE_SIZE > 65536
165 #error "Unsupported PAGE_SIZE"
166 #endif /* 65536 */
167 #endif /* 32768 */
168 #endif /* 16384 */
169 #endif /* 8192 */
170 #endif /* 4096 */
171 {0, NULL},
172 };
173
174 /*
175 * Zone to allocate malloc type descriptions from. For ABI reasons, memory
176 * types are described by a data structure passed by the declaring code, but
177 * the malloc(9) implementation has its own data structure describing the
178 * type and statistics. This permits the malloc(9)-internal data structures
179 * to be modified without breaking binary-compiled kernel modules that
180 * declare malloc types.
181 */
182 static uma_zone_t mt_zone;
183
184 u_long vm_kmem_size;
185 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
186 "Size of kernel memory");
187
188 static u_long vm_kmem_size_min;
189 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
190 "Minimum size of kernel memory");
191
192 static u_long vm_kmem_size_max;
193 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
194 "Maximum size of kernel memory");
195
196 static u_int vm_kmem_size_scale;
197 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
198 "Scale factor for kernel memory size");
199
200 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
201 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
202 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
203 sysctl_kmem_map_size, "LU", "Current kmem_map allocation size");
204
205 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
206 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
207 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
208 sysctl_kmem_map_free, "LU", "Largest contiguous free range in kmem_map");
209
210 /*
211 * The malloc_mtx protects the kmemstatistics linked list.
212 */
213 struct mtx malloc_mtx;
214
215 #ifdef MALLOC_PROFILE
216 uint64_t krequests[KMEM_ZSIZE + 1];
217
218 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
219 #endif
220
221 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
222
223 /*
224 * time_uptime of the last malloc(9) failure (induced or real).
225 */
226 static time_t t_malloc_fail;
227
228 /*
229 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
230 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
231 */
232 #ifdef MALLOC_MAKE_FAILURES
233 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
234 "Kernel malloc debugging options");
235
236 static int malloc_failure_rate;
237 static int malloc_nowait_count;
238 static int malloc_failure_count;
239 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
240 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
241 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
242 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
243 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
244 #endif
245
246 static int
247 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
248 {
249 u_long size;
250
251 size = kmem_map->size;
252 return (sysctl_handle_long(oidp, &size, 0, req));
253 }
254
255 static int
256 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
257 {
258 u_long size;
259
260 vm_map_lock_read(kmem_map);
261 size = kmem_map->root != NULL ?
262 kmem_map->root->max_free : kmem_map->size;
263 vm_map_unlock_read(kmem_map);
264 return (sysctl_handle_long(oidp, &size, 0, req));
265 }
266
267 int
268 malloc_last_fail(void)
269 {
270
271 return (time_uptime - t_malloc_fail);
272 }
273
274 /*
275 * An allocation has succeeded -- update malloc type statistics for the
276 * amount of bucket size. Occurs within a critical section so that the
277 * thread isn't preempted and doesn't migrate while updating per-PCU
278 * statistics.
279 */
280 static void
281 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
282 int zindx)
283 {
284 struct malloc_type_internal *mtip;
285 struct malloc_type_stats *mtsp;
286
287 critical_enter();
288 mtip = mtp->ks_handle;
289 mtsp = &mtip->mti_stats[curcpu];
290 if (size > 0) {
291 mtsp->mts_memalloced += size;
292 mtsp->mts_numallocs++;
293 }
294 if (zindx != -1)
295 mtsp->mts_size |= 1 << zindx;
296
297 #ifdef KDTRACE_HOOKS
298 if (dtrace_malloc_probe != NULL) {
299 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
300 if (probe_id != 0)
301 (dtrace_malloc_probe)(probe_id,
302 (uintptr_t) mtp, (uintptr_t) mtip,
303 (uintptr_t) mtsp, size, zindx);
304 }
305 #endif
306
307 critical_exit();
308 }
309
310 void
311 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
312 {
313
314 if (size > 0)
315 malloc_type_zone_allocated(mtp, size, -1);
316 }
317
318 /*
319 * A free operation has occurred -- update malloc type statistics for the
320 * amount of the bucket size. Occurs within a critical section so that the
321 * thread isn't preempted and doesn't migrate while updating per-CPU
322 * statistics.
323 */
324 void
325 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
326 {
327 struct malloc_type_internal *mtip;
328 struct malloc_type_stats *mtsp;
329
330 critical_enter();
331 mtip = mtp->ks_handle;
332 mtsp = &mtip->mti_stats[curcpu];
333 mtsp->mts_memfreed += size;
334 mtsp->mts_numfrees++;
335
336 #ifdef KDTRACE_HOOKS
337 if (dtrace_malloc_probe != NULL) {
338 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
339 if (probe_id != 0)
340 (dtrace_malloc_probe)(probe_id,
341 (uintptr_t) mtp, (uintptr_t) mtip,
342 (uintptr_t) mtsp, size, 0);
343 }
344 #endif
345
346 critical_exit();
347 }
348
349 /*
350 * malloc:
351 *
352 * Allocate a block of memory.
353 *
354 * If M_NOWAIT is set, this routine will not block and return NULL if
355 * the allocation fails.
356 */
357 void *
358 malloc(unsigned long size, struct malloc_type *mtp, int flags)
359 {
360 int indx;
361 caddr_t va;
362 uma_zone_t zone;
363 uma_keg_t keg;
364 #if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE)
365 unsigned long osize = size;
366 #endif
367
368 #ifdef INVARIANTS
369 /*
370 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
371 */
372 indx = flags & (M_WAITOK | M_NOWAIT);
373 if (indx != M_NOWAIT && indx != M_WAITOK) {
374 static struct timeval lasterr;
375 static int curerr, once;
376 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
377 printf("Bad malloc flags: %x\n", indx);
378 kdb_backtrace();
379 flags |= M_WAITOK;
380 once++;
381 }
382 }
383 #endif
384 #ifdef MALLOC_MAKE_FAILURES
385 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
386 atomic_add_int(&malloc_nowait_count, 1);
387 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
388 atomic_add_int(&malloc_failure_count, 1);
389 t_malloc_fail = time_uptime;
390 return (NULL);
391 }
392 }
393 #endif
394 if (flags & M_WAITOK)
395 KASSERT(curthread->td_intr_nesting_level == 0,
396 ("malloc(M_WAITOK) in interrupt context"));
397
398 #ifdef DEBUG_MEMGUARD
399 if (memguard_cmp(mtp, size)) {
400 va = memguard_alloc(size, flags);
401 if (va != NULL)
402 return (va);
403 /* This is unfortunate but should not be fatal. */
404 }
405 #endif
406
407 #ifdef DEBUG_REDZONE
408 size = redzone_size_ntor(size);
409 #endif
410
411 if (size <= KMEM_ZMAX) {
412 if (size & KMEM_ZMASK)
413 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
414 indx = kmemsize[size >> KMEM_ZSHIFT];
415 zone = kmemzones[indx].kz_zone;
416 keg = zone->uz_keg;
417 #ifdef MALLOC_PROFILE
418 krequests[size >> KMEM_ZSHIFT]++;
419 #endif
420 va = uma_zalloc(zone, flags);
421 if (va != NULL)
422 size = keg->uk_size;
423 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
424 } else {
425 size = roundup(size, PAGE_SIZE);
426 zone = NULL;
427 keg = NULL;
428 va = uma_large_malloc(size, flags);
429 malloc_type_allocated(mtp, va == NULL ? 0 : size);
430 }
431 if (flags & M_WAITOK)
432 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
433 else if (va == NULL)
434 t_malloc_fail = time_uptime;
435 #ifdef DIAGNOSTIC
436 if (va != NULL && !(flags & M_ZERO)) {
437 memset(va, 0x70, osize);
438 }
439 #endif
440 #ifdef DEBUG_REDZONE
441 if (va != NULL)
442 va = redzone_setup(va, osize);
443 #endif
444 return ((void *) va);
445 }
446
447 /*
448 * free:
449 *
450 * Free a block of memory allocated by malloc.
451 *
452 * This routine may not block.
453 */
454 void
455 free(void *addr, struct malloc_type *mtp)
456 {
457 uma_slab_t slab;
458 u_long size;
459
460 /* free(NULL, ...) does nothing */
461 if (addr == NULL)
462 return;
463
464 #ifdef DEBUG_MEMGUARD
465 if (is_memguard_addr(addr)) {
466 memguard_free(addr);
467 return;
468 }
469 #endif
470
471 #ifdef DEBUG_REDZONE
472 redzone_check(addr);
473 addr = redzone_addr_ntor(addr);
474 #endif
475
476 size = 0;
477
478 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
479
480 if (slab == NULL)
481 panic("free: address %p(%p) has not been allocated.\n",
482 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
483
484
485 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
486 #ifdef INVARIANTS
487 struct malloc_type **mtpp = addr;
488 #endif
489 size = slab->us_keg->uk_size;
490 #ifdef INVARIANTS
491 /*
492 * Cache a pointer to the malloc_type that most recently freed
493 * this memory here. This way we know who is most likely to
494 * have stepped on it later.
495 *
496 * This code assumes that size is a multiple of 8 bytes for
497 * 64 bit machines
498 */
499 mtpp = (struct malloc_type **)
500 ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
501 mtpp += (size - sizeof(struct malloc_type *)) /
502 sizeof(struct malloc_type *);
503 *mtpp = mtp;
504 #endif
505 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
506 } else {
507 size = slab->us_size;
508 uma_large_free(slab);
509 }
510 malloc_type_freed(mtp, size);
511 }
512
513 /*
514 * realloc: change the size of a memory block
515 */
516 void *
517 realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
518 {
519 uma_slab_t slab;
520 unsigned long alloc;
521 void *newaddr;
522
523 /* realloc(NULL, ...) is equivalent to malloc(...) */
524 if (addr == NULL)
525 return (malloc(size, mtp, flags));
526
527 /*
528 * XXX: Should report free of old memory and alloc of new memory to
529 * per-CPU stats.
530 */
531
532 #ifdef DEBUG_MEMGUARD
533 if (is_memguard_addr(addr))
534 return (memguard_realloc(addr, size, mtp, flags));
535 #endif
536
537 #ifdef DEBUG_REDZONE
538 slab = NULL;
539 alloc = redzone_get_size(addr);
540 #else
541 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
542
543 /* Sanity check */
544 KASSERT(slab != NULL,
545 ("realloc: address %p out of range", (void *)addr));
546
547 /* Get the size of the original block */
548 if (!(slab->us_flags & UMA_SLAB_MALLOC))
549 alloc = slab->us_keg->uk_size;
550 else
551 alloc = slab->us_size;
552
553 /* Reuse the original block if appropriate */
554 if (size <= alloc
555 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
556 return (addr);
557 #endif /* !DEBUG_REDZONE */
558
559 /* Allocate a new, bigger (or smaller) block */
560 if ((newaddr = malloc(size, mtp, flags)) == NULL)
561 return (NULL);
562
563 /* Copy over original contents */
564 bcopy(addr, newaddr, min(size, alloc));
565 free(addr, mtp);
566 return (newaddr);
567 }
568
569 /*
570 * reallocf: same as realloc() but free memory on failure.
571 */
572 void *
573 reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
574 {
575 void *mem;
576
577 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
578 free(addr, mtp);
579 return (mem);
580 }
581
582 /*
583 * Initialize the kernel memory allocator
584 */
585 /* ARGSUSED*/
586 static void
587 kmeminit(void *dummy)
588 {
589 u_int8_t indx;
590 u_long mem_size, tmp;
591 int i;
592
593 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
594
595 /*
596 * Try to auto-tune the kernel memory size, so that it is
597 * more applicable for a wider range of machine sizes.
598 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
599 * a VM_KMEM_SIZE of 12MB is a fair compromise. The
600 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
601 * available, and on an X86 with a total KVA space of 256MB,
602 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
603 *
604 * Note that the kmem_map is also used by the zone allocator,
605 * so make sure that there is enough space.
606 */
607 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
608 mem_size = cnt.v_page_count;
609
610 #if defined(VM_KMEM_SIZE_SCALE)
611 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
612 #endif
613 TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale);
614 if (vm_kmem_size_scale > 0 &&
615 (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE))
616 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
617
618 #if defined(VM_KMEM_SIZE_MIN)
619 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
620 #endif
621 TUNABLE_ULONG_FETCH("vm.kmem_size_min", &vm_kmem_size_min);
622 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) {
623 vm_kmem_size = vm_kmem_size_min;
624 }
625
626 #if defined(VM_KMEM_SIZE_MAX)
627 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
628 #endif
629 TUNABLE_ULONG_FETCH("vm.kmem_size_max", &vm_kmem_size_max);
630 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
631 vm_kmem_size = vm_kmem_size_max;
632
633 /* Allow final override from the kernel environment */
634 #ifndef BURN_BRIDGES
635 if (TUNABLE_ULONG_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0)
636 printf("kern.vm.kmem.size is now called vm.kmem_size!\n");
637 #endif
638 TUNABLE_ULONG_FETCH("vm.kmem_size", &vm_kmem_size);
639
640 /*
641 * Limit kmem virtual size to twice the physical memory.
642 * This allows for kmem map sparseness, but limits the size
643 * to something sane. Be careful to not overflow the 32bit
644 * ints while doing the check.
645 */
646 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
647 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
648
649 /*
650 * Tune settings based on the kmem map's size at this time.
651 */
652 init_param3(vm_kmem_size / PAGE_SIZE);
653
654 #ifdef DEBUG_MEMGUARD
655 tmp = memguard_fudge(vm_kmem_size, vm_kmem_size_max);
656 #else
657 tmp = vm_kmem_size;
658 #endif
659 kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit,
660 tmp, TRUE);
661 kmem_map->system_map = 1;
662
663 #ifdef DEBUG_MEMGUARD
664 /*
665 * Initialize MemGuard if support compiled in. MemGuard is a
666 * replacement allocator used for detecting tamper-after-free
667 * scenarios as they occur. It is only used for debugging.
668 */
669 memguard_init(kmem_map);
670 #endif
671
672 uma_startup2();
673
674 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
675 #ifdef INVARIANTS
676 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
677 #else
678 NULL, NULL, NULL, NULL,
679 #endif
680 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
681 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
682 int size = kmemzones[indx].kz_size;
683 char *name = kmemzones[indx].kz_name;
684
685 kmemzones[indx].kz_zone = uma_zcreate(name, size,
686 #ifdef INVARIANTS
687 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
688 #else
689 NULL, NULL, NULL, NULL,
690 #endif
691 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
692
693 for (;i <= size; i+= KMEM_ZBASE)
694 kmemsize[i >> KMEM_ZSHIFT] = indx;
695
696 }
697 }
698
699 void
700 malloc_init(void *data)
701 {
702 struct malloc_type_internal *mtip;
703 struct malloc_type *mtp;
704
705 KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));
706
707 mtp = data;
708 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
709 mtp->ks_handle = mtip;
710
711 mtx_lock(&malloc_mtx);
712 mtp->ks_next = kmemstatistics;
713 kmemstatistics = mtp;
714 kmemcount++;
715 mtx_unlock(&malloc_mtx);
716 }
717
718 void
719 malloc_uninit(void *data)
720 {
721 struct malloc_type_internal *mtip;
722 struct malloc_type_stats *mtsp;
723 struct malloc_type *mtp, *temp;
724 uma_slab_t slab;
725 long temp_allocs, temp_bytes;
726 int i;
727
728 mtp = data;
729 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
730 mtx_lock(&malloc_mtx);
731 mtip = mtp->ks_handle;
732 mtp->ks_handle = NULL;
733 if (mtp != kmemstatistics) {
734 for (temp = kmemstatistics; temp != NULL;
735 temp = temp->ks_next) {
736 if (temp->ks_next == mtp)
737 temp->ks_next = mtp->ks_next;
738 }
739 } else
740 kmemstatistics = mtp->ks_next;
741 kmemcount--;
742 mtx_unlock(&malloc_mtx);
743
744 /*
745 * Look for memory leaks.
746 */
747 temp_allocs = temp_bytes = 0;
748 for (i = 0; i < MAXCPU; i++) {
749 mtsp = &mtip->mti_stats[i];
750 temp_allocs += mtsp->mts_numallocs;
751 temp_allocs -= mtsp->mts_numfrees;
752 temp_bytes += mtsp->mts_memalloced;
753 temp_bytes -= mtsp->mts_memfreed;
754 }
755 if (temp_allocs > 0 || temp_bytes > 0) {
756 printf("Warning: memory type %s leaked memory on destroy "
757 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
758 temp_allocs, temp_bytes);
759 }
760
761 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
762 uma_zfree_arg(mt_zone, mtip, slab);
763 }
764
765 struct malloc_type *
766 malloc_desc2type(const char *desc)
767 {
768 struct malloc_type *mtp;
769
770 mtx_assert(&malloc_mtx, MA_OWNED);
771 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
772 if (strcmp(mtp->ks_shortdesc, desc) == 0)
773 return (mtp);
774 }
775 return (NULL);
776 }
777
778 static int
779 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
780 {
781 struct malloc_type_stream_header mtsh;
782 struct malloc_type_internal *mtip;
783 struct malloc_type_header mth;
784 struct malloc_type *mtp;
785 int buflen, count, error, i;
786 struct sbuf sbuf;
787 char *buffer;
788
789 mtx_lock(&malloc_mtx);
790 restart:
791 mtx_assert(&malloc_mtx, MA_OWNED);
792 count = kmemcount;
793 mtx_unlock(&malloc_mtx);
794 buflen = sizeof(mtsh) + count * (sizeof(mth) +
795 sizeof(struct malloc_type_stats) * MAXCPU) + 1;
796 buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
797 mtx_lock(&malloc_mtx);
798 if (count < kmemcount) {
799 free(buffer, M_TEMP);
800 goto restart;
801 }
802
803 sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
804
805 /*
806 * Insert stream header.
807 */
808 bzero(&mtsh, sizeof(mtsh));
809 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
810 mtsh.mtsh_maxcpus = MAXCPU;
811 mtsh.mtsh_count = kmemcount;
812 if (sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)) < 0) {
813 mtx_unlock(&malloc_mtx);
814 error = ENOMEM;
815 goto out;
816 }
817
818 /*
819 * Insert alternating sequence of type headers and type statistics.
820 */
821 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
822 mtip = (struct malloc_type_internal *)mtp->ks_handle;
823
824 /*
825 * Insert type header.
826 */
827 bzero(&mth, sizeof(mth));
828 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
829 if (sbuf_bcat(&sbuf, &mth, sizeof(mth)) < 0) {
830 mtx_unlock(&malloc_mtx);
831 error = ENOMEM;
832 goto out;
833 }
834
835 /*
836 * Insert type statistics for each CPU.
837 */
838 for (i = 0; i < MAXCPU; i++) {
839 if (sbuf_bcat(&sbuf, &mtip->mti_stats[i],
840 sizeof(mtip->mti_stats[i])) < 0) {
841 mtx_unlock(&malloc_mtx);
842 error = ENOMEM;
843 goto out;
844 }
845 }
846 }
847 mtx_unlock(&malloc_mtx);
848 sbuf_finish(&sbuf);
849 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
850 out:
851 sbuf_delete(&sbuf);
852 free(buffer, M_TEMP);
853 return (error);
854 }
855
856 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
857 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
858 "Return malloc types");
859
860 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
861 "Count of kernel malloc types");
862
863 void
864 malloc_type_list(malloc_type_list_func_t *func, void *arg)
865 {
866 struct malloc_type *mtp, **bufmtp;
867 int count, i;
868 size_t buflen;
869
870 mtx_lock(&malloc_mtx);
871 restart:
872 mtx_assert(&malloc_mtx, MA_OWNED);
873 count = kmemcount;
874 mtx_unlock(&malloc_mtx);
875
876 buflen = sizeof(struct malloc_type *) * count;
877 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
878
879 mtx_lock(&malloc_mtx);
880
881 if (count < kmemcount) {
882 free(bufmtp, M_TEMP);
883 goto restart;
884 }
885
886 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
887 bufmtp[i] = mtp;
888
889 mtx_unlock(&malloc_mtx);
890
891 for (i = 0; i < count; i++)
892 (func)(bufmtp[i], arg);
893
894 free(bufmtp, M_TEMP);
895 }
896
897 #ifdef DDB
898 DB_SHOW_COMMAND(malloc, db_show_malloc)
899 {
900 struct malloc_type_internal *mtip;
901 struct malloc_type *mtp;
902 u_int64_t allocs, frees;
903 u_int64_t alloced, freed;
904 int i;
905
906 db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse",
907 "Requests");
908 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
909 mtip = (struct malloc_type_internal *)mtp->ks_handle;
910 allocs = 0;
911 frees = 0;
912 alloced = 0;
913 freed = 0;
914 for (i = 0; i < MAXCPU; i++) {
915 allocs += mtip->mti_stats[i].mts_numallocs;
916 frees += mtip->mti_stats[i].mts_numfrees;
917 alloced += mtip->mti_stats[i].mts_memalloced;
918 freed += mtip->mti_stats[i].mts_memfreed;
919 }
920 db_printf("%18s %12ju %12juK %12ju\n",
921 mtp->ks_shortdesc, allocs - frees,
922 (alloced - freed + 1023) / 1024, allocs);
923 }
924 }
925 #endif
926
927 #ifdef MALLOC_PROFILE
928
929 static int
930 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
931 {
932 int linesize = 64;
933 struct sbuf sbuf;
934 uint64_t count;
935 uint64_t waste;
936 uint64_t mem;
937 int bufsize;
938 int error;
939 char *buf;
940 int rsize;
941 int size;
942 int i;
943
944 bufsize = linesize * (KMEM_ZSIZE + 1);
945 bufsize += 128; /* For the stats line */
946 bufsize += 128; /* For the banner line */
947 waste = 0;
948 mem = 0;
949
950 buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
951 sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN);
952 sbuf_printf(&sbuf,
953 "\n Size Requests Real Size\n");
954 for (i = 0; i < KMEM_ZSIZE; i++) {
955 size = i << KMEM_ZSHIFT;
956 rsize = kmemzones[kmemsize[i]].kz_size;
957 count = (long long unsigned)krequests[i];
958
959 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
960 (unsigned long long)count, rsize);
961
962 if ((rsize * count) > (size * count))
963 waste += (rsize * count) - (size * count);
964 mem += (rsize * count);
965 }
966 sbuf_printf(&sbuf,
967 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
968 (unsigned long long)mem, (unsigned long long)waste);
969 sbuf_finish(&sbuf);
970
971 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
972
973 sbuf_delete(&sbuf);
974 free(buf, M_TEMP);
975 return (error);
976 }
977
978 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
979 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
980 #endif /* MALLOC_PROFILE */
Cache object: 8ebf3922fe214d6ca1dca5320b7ff589
|