1 /*-
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005-2009 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
32 */
33
34 /*
35 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
36 * based on memory types. Back end is implemented using the UMA(9) zone
37 * allocator. A set of fixed-size buckets are used for smaller allocations,
38 * and a special UMA allocation interface is used for larger allocations.
39 * Callers declare memory types, and statistics are maintained independently
40 * for each memory type. Statistics are maintained per-CPU for performance
41 * reasons. See malloc(9) and comments in malloc.h for a detailed
42 * description.
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD: releng/9.1/sys/kern/kern_malloc.c 230418 2012-01-21 05:03:10Z alc $");
47
48 #include "opt_ddb.h"
49 #include "opt_kdtrace.h"
50 #include "opt_vm.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kdb.h>
55 #include <sys/kernel.h>
56 #include <sys/lock.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/mutex.h>
60 #include <sys/vmmeter.h>
61 #include <sys/proc.h>
62 #include <sys/sbuf.h>
63 #include <sys/sysctl.h>
64 #include <sys/time.h>
65
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_param.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_page.h>
73 #include <vm/uma.h>
74 #include <vm/uma_int.h>
75 #include <vm/uma_dbg.h>
76
77 #ifdef DEBUG_MEMGUARD
78 #include <vm/memguard.h>
79 #endif
80 #ifdef DEBUG_REDZONE
81 #include <vm/redzone.h>
82 #endif
83
84 #if defined(INVARIANTS) && defined(__i386__)
85 #include <machine/cpu.h>
86 #endif
87
88 #include <ddb/ddb.h>
89
90 #ifdef KDTRACE_HOOKS
91 #include <sys/dtrace_bsd.h>
92
93 dtrace_malloc_probe_func_t dtrace_malloc_probe;
94 #endif
95
96 /*
97 * When realloc() is called, if the new size is sufficiently smaller than
98 * the old size, realloc() will allocate a new, smaller block to avoid
99 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
100 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
101 */
102 #ifndef REALLOC_FRACTION
103 #define REALLOC_FRACTION 1 /* new block if <= half the size */
104 #endif
105
106 /*
107 * Centrally define some common malloc types.
108 */
109 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
110 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
111 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
112
113 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
114 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
115
116 static void kmeminit(void *);
117 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL);
118
119 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
120
121 static struct malloc_type *kmemstatistics;
122 static vm_offset_t kmembase;
123 static vm_offset_t kmemlimit;
124 static int kmemcount;
125
126 #define KMEM_ZSHIFT 4
127 #define KMEM_ZBASE 16
128 #define KMEM_ZMASK (KMEM_ZBASE - 1)
129
130 #define KMEM_ZMAX PAGE_SIZE
131 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
132 static uint8_t kmemsize[KMEM_ZSIZE + 1];
133
134 #ifndef MALLOC_DEBUG_MAXZONES
135 #define MALLOC_DEBUG_MAXZONES 1
136 #endif
137 static int numzones = MALLOC_DEBUG_MAXZONES;
138
139 /*
140 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
141 * of various sizes.
142 *
143 * XXX: The comment here used to read "These won't be powers of two for
144 * long." It's possible that a significant amount of wasted memory could be
145 * recovered by tuning the sizes of these buckets.
146 */
147 struct {
148 int kz_size;
149 char *kz_name;
150 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
151 } kmemzones[] = {
152 {16, "16", },
153 {32, "32", },
154 {64, "64", },
155 {128, "128", },
156 {256, "256", },
157 {512, "512", },
158 {1024, "1024", },
159 {2048, "2048", },
160 {4096, "4096", },
161 #if PAGE_SIZE > 4096
162 {8192, "8192", },
163 #if PAGE_SIZE > 8192
164 {16384, "16384", },
165 #if PAGE_SIZE > 16384
166 {32768, "32768", },
167 #if PAGE_SIZE > 32768
168 {65536, "65536", },
169 #if PAGE_SIZE > 65536
170 #error "Unsupported PAGE_SIZE"
171 #endif /* 65536 */
172 #endif /* 32768 */
173 #endif /* 16384 */
174 #endif /* 8192 */
175 #endif /* 4096 */
176 {0, NULL},
177 };
178
179 /*
180 * Zone to allocate malloc type descriptions from. For ABI reasons, memory
181 * types are described by a data structure passed by the declaring code, but
182 * the malloc(9) implementation has its own data structure describing the
183 * type and statistics. This permits the malloc(9)-internal data structures
184 * to be modified without breaking binary-compiled kernel modules that
185 * declare malloc types.
186 */
187 static uma_zone_t mt_zone;
188
189 u_long vm_kmem_size;
190 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
191 "Size of kernel memory");
192
193 static u_long vm_kmem_size_min;
194 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
195 "Minimum size of kernel memory");
196
197 static u_long vm_kmem_size_max;
198 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
199 "Maximum size of kernel memory");
200
201 static u_int vm_kmem_size_scale;
202 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
203 "Scale factor for kernel memory size");
204
205 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
206 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
207 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
208 sysctl_kmem_map_size, "LU", "Current kmem_map allocation size");
209
210 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
211 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
212 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
213 sysctl_kmem_map_free, "LU", "Largest contiguous free range in kmem_map");
214
215 /*
216 * The malloc_mtx protects the kmemstatistics linked list.
217 */
218 struct mtx malloc_mtx;
219
220 #ifdef MALLOC_PROFILE
221 uint64_t krequests[KMEM_ZSIZE + 1];
222
223 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
224 #endif
225
226 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
227
228 /*
229 * time_uptime of the last malloc(9) failure (induced or real).
230 */
231 static time_t t_malloc_fail;
232
233 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
234 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
235 "Kernel malloc debugging options");
236 #endif
237
238 /*
239 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
240 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
241 */
242 #ifdef MALLOC_MAKE_FAILURES
243 static int malloc_failure_rate;
244 static int malloc_nowait_count;
245 static int malloc_failure_count;
246 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
247 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
248 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
249 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
250 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
251 #endif
252
253 static int
254 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
255 {
256 u_long size;
257
258 size = kmem_map->size;
259 return (sysctl_handle_long(oidp, &size, 0, req));
260 }
261
262 static int
263 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
264 {
265 u_long size;
266
267 vm_map_lock_read(kmem_map);
268 size = kmem_map->root != NULL ? kmem_map->root->max_free :
269 kmem_map->max_offset - kmem_map->min_offset;
270 vm_map_unlock_read(kmem_map);
271 return (sysctl_handle_long(oidp, &size, 0, req));
272 }
273
274 /*
275 * malloc(9) uma zone separation -- sub-page buffer overruns in one
276 * malloc type will affect only a subset of other malloc types.
277 */
278 #if MALLOC_DEBUG_MAXZONES > 1
279 static void
280 tunable_set_numzones(void)
281 {
282
283 TUNABLE_INT_FETCH("debug.malloc.numzones",
284 &numzones);
285
286 /* Sanity check the number of malloc uma zones. */
287 if (numzones <= 0)
288 numzones = 1;
289 if (numzones > MALLOC_DEBUG_MAXZONES)
290 numzones = MALLOC_DEBUG_MAXZONES;
291 }
292 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
293 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN,
294 &numzones, 0, "Number of malloc uma subzones");
295
296 /*
297 * Any number that changes regularly is an okay choice for the
298 * offset. Build numbers are pretty good of you have them.
299 */
300 static u_int zone_offset = __FreeBSD_version;
301 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
302 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
303 &zone_offset, 0, "Separate malloc types by examining the "
304 "Nth character in the malloc type short description.");
305
306 static u_int
307 mtp_get_subzone(const char *desc)
308 {
309 size_t len;
310 u_int val;
311
312 if (desc == NULL || (len = strlen(desc)) == 0)
313 return (0);
314 val = desc[zone_offset % len];
315 return (val % numzones);
316 }
317 #elif MALLOC_DEBUG_MAXZONES == 0
318 #error "MALLOC_DEBUG_MAXZONES must be positive."
319 #else
320 static inline u_int
321 mtp_get_subzone(const char *desc)
322 {
323
324 return (0);
325 }
326 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
327
328 int
329 malloc_last_fail(void)
330 {
331
332 return (time_uptime - t_malloc_fail);
333 }
334
335 /*
336 * An allocation has succeeded -- update malloc type statistics for the
337 * amount of bucket size. Occurs within a critical section so that the
338 * thread isn't preempted and doesn't migrate while updating per-PCU
339 * statistics.
340 */
341 static void
342 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
343 int zindx)
344 {
345 struct malloc_type_internal *mtip;
346 struct malloc_type_stats *mtsp;
347
348 critical_enter();
349 mtip = mtp->ks_handle;
350 mtsp = &mtip->mti_stats[curcpu];
351 if (size > 0) {
352 mtsp->mts_memalloced += size;
353 mtsp->mts_numallocs++;
354 }
355 if (zindx != -1)
356 mtsp->mts_size |= 1 << zindx;
357
358 #ifdef KDTRACE_HOOKS
359 if (dtrace_malloc_probe != NULL) {
360 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
361 if (probe_id != 0)
362 (dtrace_malloc_probe)(probe_id,
363 (uintptr_t) mtp, (uintptr_t) mtip,
364 (uintptr_t) mtsp, size, zindx);
365 }
366 #endif
367
368 critical_exit();
369 }
370
371 void
372 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
373 {
374
375 if (size > 0)
376 malloc_type_zone_allocated(mtp, size, -1);
377 }
378
379 /*
380 * A free operation has occurred -- update malloc type statistics for the
381 * amount of the bucket size. Occurs within a critical section so that the
382 * thread isn't preempted and doesn't migrate while updating per-CPU
383 * statistics.
384 */
385 void
386 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
387 {
388 struct malloc_type_internal *mtip;
389 struct malloc_type_stats *mtsp;
390
391 critical_enter();
392 mtip = mtp->ks_handle;
393 mtsp = &mtip->mti_stats[curcpu];
394 mtsp->mts_memfreed += size;
395 mtsp->mts_numfrees++;
396
397 #ifdef KDTRACE_HOOKS
398 if (dtrace_malloc_probe != NULL) {
399 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
400 if (probe_id != 0)
401 (dtrace_malloc_probe)(probe_id,
402 (uintptr_t) mtp, (uintptr_t) mtip,
403 (uintptr_t) mtsp, size, 0);
404 }
405 #endif
406
407 critical_exit();
408 }
409
410 /*
411 * malloc:
412 *
413 * Allocate a block of memory.
414 *
415 * If M_NOWAIT is set, this routine will not block and return NULL if
416 * the allocation fails.
417 */
418 void *
419 malloc(unsigned long size, struct malloc_type *mtp, int flags)
420 {
421 int indx;
422 struct malloc_type_internal *mtip;
423 caddr_t va;
424 uma_zone_t zone;
425 #if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE)
426 unsigned long osize = size;
427 #endif
428
429 #ifdef INVARIANTS
430 KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
431 /*
432 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
433 */
434 indx = flags & (M_WAITOK | M_NOWAIT);
435 if (indx != M_NOWAIT && indx != M_WAITOK) {
436 static struct timeval lasterr;
437 static int curerr, once;
438 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
439 printf("Bad malloc flags: %x\n", indx);
440 kdb_backtrace();
441 flags |= M_WAITOK;
442 once++;
443 }
444 }
445 #endif
446 #ifdef MALLOC_MAKE_FAILURES
447 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
448 atomic_add_int(&malloc_nowait_count, 1);
449 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
450 atomic_add_int(&malloc_failure_count, 1);
451 t_malloc_fail = time_uptime;
452 return (NULL);
453 }
454 }
455 #endif
456 if (flags & M_WAITOK)
457 KASSERT(curthread->td_intr_nesting_level == 0,
458 ("malloc(M_WAITOK) in interrupt context"));
459
460 #ifdef DEBUG_MEMGUARD
461 if (memguard_cmp(mtp, size)) {
462 va = memguard_alloc(size, flags);
463 if (va != NULL)
464 return (va);
465 /* This is unfortunate but should not be fatal. */
466 }
467 #endif
468
469 #ifdef DEBUG_REDZONE
470 size = redzone_size_ntor(size);
471 #endif
472
473 if (size <= KMEM_ZMAX) {
474 mtip = mtp->ks_handle;
475 if (size & KMEM_ZMASK)
476 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
477 indx = kmemsize[size >> KMEM_ZSHIFT];
478 KASSERT(mtip->mti_zone < numzones,
479 ("mti_zone %u out of range %d",
480 mtip->mti_zone, numzones));
481 zone = kmemzones[indx].kz_zone[mtip->mti_zone];
482 #ifdef MALLOC_PROFILE
483 krequests[size >> KMEM_ZSHIFT]++;
484 #endif
485 va = uma_zalloc(zone, flags);
486 if (va != NULL)
487 size = zone->uz_size;
488 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
489 } else {
490 size = roundup(size, PAGE_SIZE);
491 zone = NULL;
492 va = uma_large_malloc(size, flags);
493 malloc_type_allocated(mtp, va == NULL ? 0 : size);
494 }
495 if (flags & M_WAITOK)
496 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
497 else if (va == NULL)
498 t_malloc_fail = time_uptime;
499 #ifdef DIAGNOSTIC
500 if (va != NULL && !(flags & M_ZERO)) {
501 memset(va, 0x70, osize);
502 }
503 #endif
504 #ifdef DEBUG_REDZONE
505 if (va != NULL)
506 va = redzone_setup(va, osize);
507 #endif
508 return ((void *) va);
509 }
510
511 /*
512 * free:
513 *
514 * Free a block of memory allocated by malloc.
515 *
516 * This routine may not block.
517 */
518 void
519 free(void *addr, struct malloc_type *mtp)
520 {
521 uma_slab_t slab;
522 u_long size;
523
524 KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
525
526 /* free(NULL, ...) does nothing */
527 if (addr == NULL)
528 return;
529
530 #ifdef DEBUG_MEMGUARD
531 if (is_memguard_addr(addr)) {
532 memguard_free(addr);
533 return;
534 }
535 #endif
536
537 #ifdef DEBUG_REDZONE
538 redzone_check(addr);
539 addr = redzone_addr_ntor(addr);
540 #endif
541
542 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
543
544 if (slab == NULL)
545 panic("free: address %p(%p) has not been allocated.\n",
546 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
547
548
549 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
550 #ifdef INVARIANTS
551 struct malloc_type **mtpp = addr;
552 #endif
553 size = slab->us_keg->uk_size;
554 #ifdef INVARIANTS
555 /*
556 * Cache a pointer to the malloc_type that most recently freed
557 * this memory here. This way we know who is most likely to
558 * have stepped on it later.
559 *
560 * This code assumes that size is a multiple of 8 bytes for
561 * 64 bit machines
562 */
563 mtpp = (struct malloc_type **)
564 ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
565 mtpp += (size - sizeof(struct malloc_type *)) /
566 sizeof(struct malloc_type *);
567 *mtpp = mtp;
568 #endif
569 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
570 } else {
571 size = slab->us_size;
572 uma_large_free(slab);
573 }
574 malloc_type_freed(mtp, size);
575 }
576
577 /*
578 * realloc: change the size of a memory block
579 */
580 void *
581 realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
582 {
583 uma_slab_t slab;
584 unsigned long alloc;
585 void *newaddr;
586
587 KASSERT(mtp->ks_magic == M_MAGIC,
588 ("realloc: bad malloc type magic"));
589
590 /* realloc(NULL, ...) is equivalent to malloc(...) */
591 if (addr == NULL)
592 return (malloc(size, mtp, flags));
593
594 /*
595 * XXX: Should report free of old memory and alloc of new memory to
596 * per-CPU stats.
597 */
598
599 #ifdef DEBUG_MEMGUARD
600 if (is_memguard_addr(addr))
601 return (memguard_realloc(addr, size, mtp, flags));
602 #endif
603
604 #ifdef DEBUG_REDZONE
605 slab = NULL;
606 alloc = redzone_get_size(addr);
607 #else
608 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
609
610 /* Sanity check */
611 KASSERT(slab != NULL,
612 ("realloc: address %p out of range", (void *)addr));
613
614 /* Get the size of the original block */
615 if (!(slab->us_flags & UMA_SLAB_MALLOC))
616 alloc = slab->us_keg->uk_size;
617 else
618 alloc = slab->us_size;
619
620 /* Reuse the original block if appropriate */
621 if (size <= alloc
622 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
623 return (addr);
624 #endif /* !DEBUG_REDZONE */
625
626 /* Allocate a new, bigger (or smaller) block */
627 if ((newaddr = malloc(size, mtp, flags)) == NULL)
628 return (NULL);
629
630 /* Copy over original contents */
631 bcopy(addr, newaddr, min(size, alloc));
632 free(addr, mtp);
633 return (newaddr);
634 }
635
636 /*
637 * reallocf: same as realloc() but free memory on failure.
638 */
639 void *
640 reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
641 {
642 void *mem;
643
644 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
645 free(addr, mtp);
646 return (mem);
647 }
648
649 /*
650 * Initialize the kernel memory allocator
651 */
652 /* ARGSUSED*/
653 static void
654 kmeminit(void *dummy)
655 {
656 uint8_t indx;
657 u_long mem_size, tmp;
658 int i;
659
660 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
661
662 /*
663 * Try to auto-tune the kernel memory size, so that it is
664 * more applicable for a wider range of machine sizes. The
665 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
666 * available.
667 *
668 * Note that the kmem_map is also used by the zone allocator,
669 * so make sure that there is enough space.
670 */
671 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
672 mem_size = cnt.v_page_count;
673
674 #if defined(VM_KMEM_SIZE_SCALE)
675 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
676 #endif
677 TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale);
678 if (vm_kmem_size_scale > 0 &&
679 (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE))
680 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
681
682 #if defined(VM_KMEM_SIZE_MIN)
683 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
684 #endif
685 TUNABLE_ULONG_FETCH("vm.kmem_size_min", &vm_kmem_size_min);
686 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) {
687 vm_kmem_size = vm_kmem_size_min;
688 }
689
690 #if defined(VM_KMEM_SIZE_MAX)
691 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
692 #endif
693 TUNABLE_ULONG_FETCH("vm.kmem_size_max", &vm_kmem_size_max);
694 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
695 vm_kmem_size = vm_kmem_size_max;
696
697 /* Allow final override from the kernel environment */
698 TUNABLE_ULONG_FETCH("vm.kmem_size", &vm_kmem_size);
699
700 /*
701 * Limit kmem virtual size to twice the physical memory.
702 * This allows for kmem map sparseness, but limits the size
703 * to something sane. Be careful to not overflow the 32bit
704 * ints while doing the check or the adjustment.
705 */
706 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
707 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
708
709 #ifdef DEBUG_MEMGUARD
710 tmp = memguard_fudge(vm_kmem_size, vm_kmem_size_max);
711 #else
712 tmp = vm_kmem_size;
713 #endif
714 kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit,
715 tmp, TRUE);
716 kmem_map->system_map = 1;
717
718 #ifdef DEBUG_MEMGUARD
719 /*
720 * Initialize MemGuard if support compiled in. MemGuard is a
721 * replacement allocator used for detecting tamper-after-free
722 * scenarios as they occur. It is only used for debugging.
723 */
724 memguard_init(kmem_map);
725 #endif
726
727 uma_startup2();
728
729 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
730 #ifdef INVARIANTS
731 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
732 #else
733 NULL, NULL, NULL, NULL,
734 #endif
735 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
736 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
737 int size = kmemzones[indx].kz_size;
738 char *name = kmemzones[indx].kz_name;
739 int subzone;
740
741 for (subzone = 0; subzone < numzones; subzone++) {
742 kmemzones[indx].kz_zone[subzone] =
743 uma_zcreate(name, size,
744 #ifdef INVARIANTS
745 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
746 #else
747 NULL, NULL, NULL, NULL,
748 #endif
749 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
750 }
751 for (;i <= size; i+= KMEM_ZBASE)
752 kmemsize[i >> KMEM_ZSHIFT] = indx;
753
754 }
755 }
756
757 void
758 malloc_init(void *data)
759 {
760 struct malloc_type_internal *mtip;
761 struct malloc_type *mtp;
762
763 KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));
764
765 mtp = data;
766 if (mtp->ks_magic != M_MAGIC)
767 panic("malloc_init: bad malloc type magic");
768
769 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
770 mtp->ks_handle = mtip;
771 mtip->mti_zone = mtp_get_subzone(mtp->ks_shortdesc);
772
773 mtx_lock(&malloc_mtx);
774 mtp->ks_next = kmemstatistics;
775 kmemstatistics = mtp;
776 kmemcount++;
777 mtx_unlock(&malloc_mtx);
778 }
779
780 void
781 malloc_uninit(void *data)
782 {
783 struct malloc_type_internal *mtip;
784 struct malloc_type_stats *mtsp;
785 struct malloc_type *mtp, *temp;
786 uma_slab_t slab;
787 long temp_allocs, temp_bytes;
788 int i;
789
790 mtp = data;
791 KASSERT(mtp->ks_magic == M_MAGIC,
792 ("malloc_uninit: bad malloc type magic"));
793 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
794
795 mtx_lock(&malloc_mtx);
796 mtip = mtp->ks_handle;
797 mtp->ks_handle = NULL;
798 if (mtp != kmemstatistics) {
799 for (temp = kmemstatistics; temp != NULL;
800 temp = temp->ks_next) {
801 if (temp->ks_next == mtp) {
802 temp->ks_next = mtp->ks_next;
803 break;
804 }
805 }
806 KASSERT(temp,
807 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
808 } else
809 kmemstatistics = mtp->ks_next;
810 kmemcount--;
811 mtx_unlock(&malloc_mtx);
812
813 /*
814 * Look for memory leaks.
815 */
816 temp_allocs = temp_bytes = 0;
817 for (i = 0; i < MAXCPU; i++) {
818 mtsp = &mtip->mti_stats[i];
819 temp_allocs += mtsp->mts_numallocs;
820 temp_allocs -= mtsp->mts_numfrees;
821 temp_bytes += mtsp->mts_memalloced;
822 temp_bytes -= mtsp->mts_memfreed;
823 }
824 if (temp_allocs > 0 || temp_bytes > 0) {
825 printf("Warning: memory type %s leaked memory on destroy "
826 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
827 temp_allocs, temp_bytes);
828 }
829
830 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
831 uma_zfree_arg(mt_zone, mtip, slab);
832 }
833
834 struct malloc_type *
835 malloc_desc2type(const char *desc)
836 {
837 struct malloc_type *mtp;
838
839 mtx_assert(&malloc_mtx, MA_OWNED);
840 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
841 if (strcmp(mtp->ks_shortdesc, desc) == 0)
842 return (mtp);
843 }
844 return (NULL);
845 }
846
847 static int
848 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
849 {
850 struct malloc_type_stream_header mtsh;
851 struct malloc_type_internal *mtip;
852 struct malloc_type_header mth;
853 struct malloc_type *mtp;
854 int error, i;
855 struct sbuf sbuf;
856
857 error = sysctl_wire_old_buffer(req, 0);
858 if (error != 0)
859 return (error);
860 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
861 mtx_lock(&malloc_mtx);
862
863 /*
864 * Insert stream header.
865 */
866 bzero(&mtsh, sizeof(mtsh));
867 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
868 mtsh.mtsh_maxcpus = MAXCPU;
869 mtsh.mtsh_count = kmemcount;
870 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
871
872 /*
873 * Insert alternating sequence of type headers and type statistics.
874 */
875 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
876 mtip = (struct malloc_type_internal *)mtp->ks_handle;
877
878 /*
879 * Insert type header.
880 */
881 bzero(&mth, sizeof(mth));
882 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
883 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
884
885 /*
886 * Insert type statistics for each CPU.
887 */
888 for (i = 0; i < MAXCPU; i++) {
889 (void)sbuf_bcat(&sbuf, &mtip->mti_stats[i],
890 sizeof(mtip->mti_stats[i]));
891 }
892 }
893 mtx_unlock(&malloc_mtx);
894 error = sbuf_finish(&sbuf);
895 sbuf_delete(&sbuf);
896 return (error);
897 }
898
899 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
900 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
901 "Return malloc types");
902
903 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
904 "Count of kernel malloc types");
905
906 void
907 malloc_type_list(malloc_type_list_func_t *func, void *arg)
908 {
909 struct malloc_type *mtp, **bufmtp;
910 int count, i;
911 size_t buflen;
912
913 mtx_lock(&malloc_mtx);
914 restart:
915 mtx_assert(&malloc_mtx, MA_OWNED);
916 count = kmemcount;
917 mtx_unlock(&malloc_mtx);
918
919 buflen = sizeof(struct malloc_type *) * count;
920 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
921
922 mtx_lock(&malloc_mtx);
923
924 if (count < kmemcount) {
925 free(bufmtp, M_TEMP);
926 goto restart;
927 }
928
929 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
930 bufmtp[i] = mtp;
931
932 mtx_unlock(&malloc_mtx);
933
934 for (i = 0; i < count; i++)
935 (func)(bufmtp[i], arg);
936
937 free(bufmtp, M_TEMP);
938 }
939
940 #ifdef DDB
941 DB_SHOW_COMMAND(malloc, db_show_malloc)
942 {
943 struct malloc_type_internal *mtip;
944 struct malloc_type *mtp;
945 uint64_t allocs, frees;
946 uint64_t alloced, freed;
947 int i;
948
949 db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse",
950 "Requests");
951 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
952 mtip = (struct malloc_type_internal *)mtp->ks_handle;
953 allocs = 0;
954 frees = 0;
955 alloced = 0;
956 freed = 0;
957 for (i = 0; i < MAXCPU; i++) {
958 allocs += mtip->mti_stats[i].mts_numallocs;
959 frees += mtip->mti_stats[i].mts_numfrees;
960 alloced += mtip->mti_stats[i].mts_memalloced;
961 freed += mtip->mti_stats[i].mts_memfreed;
962 }
963 db_printf("%18s %12ju %12juK %12ju\n",
964 mtp->ks_shortdesc, allocs - frees,
965 (alloced - freed + 1023) / 1024, allocs);
966 }
967 }
968
969 #if MALLOC_DEBUG_MAXZONES > 1
970 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
971 {
972 struct malloc_type_internal *mtip;
973 struct malloc_type *mtp;
974 u_int subzone;
975
976 if (!have_addr) {
977 db_printf("Usage: show multizone_matches <malloc type/addr>\n");
978 return;
979 }
980 mtp = (void *)addr;
981 if (mtp->ks_magic != M_MAGIC) {
982 db_printf("Magic %lx does not match expected %x\n",
983 mtp->ks_magic, M_MAGIC);
984 return;
985 }
986
987 mtip = mtp->ks_handle;
988 subzone = mtip->mti_zone;
989
990 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
991 mtip = mtp->ks_handle;
992 if (mtip->mti_zone != subzone)
993 continue;
994 db_printf("%s\n", mtp->ks_shortdesc);
995 }
996 }
997 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
998 #endif /* DDB */
999
1000 #ifdef MALLOC_PROFILE
1001
1002 static int
1003 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
1004 {
1005 struct sbuf sbuf;
1006 uint64_t count;
1007 uint64_t waste;
1008 uint64_t mem;
1009 int error;
1010 int rsize;
1011 int size;
1012 int i;
1013
1014 waste = 0;
1015 mem = 0;
1016
1017 error = sysctl_wire_old_buffer(req, 0);
1018 if (error != 0)
1019 return (error);
1020 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1021 sbuf_printf(&sbuf,
1022 "\n Size Requests Real Size\n");
1023 for (i = 0; i < KMEM_ZSIZE; i++) {
1024 size = i << KMEM_ZSHIFT;
1025 rsize = kmemzones[kmemsize[i]].kz_size;
1026 count = (long long unsigned)krequests[i];
1027
1028 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
1029 (unsigned long long)count, rsize);
1030
1031 if ((rsize * count) > (size * count))
1032 waste += (rsize * count) - (size * count);
1033 mem += (rsize * count);
1034 }
1035 sbuf_printf(&sbuf,
1036 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
1037 (unsigned long long)mem, (unsigned long long)waste);
1038 error = sbuf_finish(&sbuf);
1039 sbuf_delete(&sbuf);
1040 return (error);
1041 }
1042
1043 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
1044 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
1045 #endif /* MALLOC_PROFILE */
Cache object: 1ee8e359b36ca6e995766e1058e4de9a
|