1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1987, 1991, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005-2009 Robert N. M. Watson
7 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
35 */
36
37 /*
38 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
39 * based on memory types. Back end is implemented using the UMA(9) zone
40 * allocator. A set of fixed-size buckets are used for smaller allocations,
41 * and a special UMA allocation interface is used for larger allocations.
42 * Callers declare memory types, and statistics are maintained independently
43 * for each memory type. Statistics are maintained per-CPU for performance
44 * reasons. See malloc(9) and comments in malloc.h for a detailed
45 * description.
46 */
47
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50
51 #include "opt_ddb.h"
52 #include "opt_vm.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kdb.h>
57 #include <sys/kernel.h>
58 #include <sys/lock.h>
59 #include <sys/malloc.h>
60 #include <sys/mutex.h>
61 #include <sys/vmmeter.h>
62 #include <sys/proc.h>
63 #include <sys/sbuf.h>
64 #include <sys/smp.h>
65 #include <sys/sysctl.h>
66 #include <sys/time.h>
67 #include <sys/vmem.h>
68
69 #include <vm/vm.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_domainset.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_param.h>
74 #include <vm/vm_kern.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_page.h>
78 #include <vm/uma.h>
79 #include <vm/uma_int.h>
80 #include <vm/uma_dbg.h>
81
82 #ifdef DEBUG_MEMGUARD
83 #include <vm/memguard.h>
84 #endif
85 #ifdef DEBUG_REDZONE
86 #include <vm/redzone.h>
87 #endif
88
89 #if defined(INVARIANTS) && defined(__i386__)
90 #include <machine/cpu.h>
91 #endif
92
93 #include <ddb/ddb.h>
94
95 #ifdef KDTRACE_HOOKS
96 #include <sys/dtrace_bsd.h>
97
98 bool __read_frequently dtrace_malloc_enabled;
99 dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe;
100 #endif
101
102 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \
103 defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
104 #define MALLOC_DEBUG 1
105 #endif
106
107 /*
108 * When realloc() is called, if the new size is sufficiently smaller than
109 * the old size, realloc() will allocate a new, smaller block to avoid
110 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
111 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
112 */
113 #ifndef REALLOC_FRACTION
114 #define REALLOC_FRACTION 1 /* new block if <= half the size */
115 #endif
116
117 /*
118 * Centrally define some common malloc types.
119 */
120 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
121 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
122 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
123
124 static struct malloc_type *kmemstatistics;
125 static int kmemcount;
126
127 #define KMEM_ZSHIFT 4
128 #define KMEM_ZBASE 16
129 #define KMEM_ZMASK (KMEM_ZBASE - 1)
130
131 #define KMEM_ZMAX 65536
132 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
133 static uint8_t kmemsize[KMEM_ZSIZE + 1];
134
135 #ifndef MALLOC_DEBUG_MAXZONES
136 #define MALLOC_DEBUG_MAXZONES 1
137 #endif
138 static int numzones = MALLOC_DEBUG_MAXZONES;
139
140 /*
141 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
142 * of various sizes.
143 *
144 * XXX: The comment here used to read "These won't be powers of two for
145 * long." It's possible that a significant amount of wasted memory could be
146 * recovered by tuning the sizes of these buckets.
147 */
148 struct {
149 int kz_size;
150 char *kz_name;
151 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
152 } kmemzones[] = {
153 {16, "16", },
154 {32, "32", },
155 {64, "64", },
156 {128, "128", },
157 {256, "256", },
158 {512, "512", },
159 {1024, "1024", },
160 {2048, "2048", },
161 {4096, "4096", },
162 {8192, "8192", },
163 {16384, "16384", },
164 {32768, "32768", },
165 {65536, "65536", },
166 {0, NULL},
167 };
168
169 /*
170 * Zone to allocate malloc type descriptions from. For ABI reasons, memory
171 * types are described by a data structure passed by the declaring code, but
172 * the malloc(9) implementation has its own data structure describing the
173 * type and statistics. This permits the malloc(9)-internal data structures
174 * to be modified without breaking binary-compiled kernel modules that
175 * declare malloc types.
176 */
177 static uma_zone_t mt_zone;
178 static uma_zone_t mt_stats_zone;
179
180 u_long vm_kmem_size;
181 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
182 "Size of kernel memory");
183
184 static u_long kmem_zmax = KMEM_ZMAX;
185 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
186 "Maximum allocation size that malloc(9) would use UMA as backend");
187
188 static u_long vm_kmem_size_min;
189 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
190 "Minimum size of kernel memory");
191
192 static u_long vm_kmem_size_max;
193 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
194 "Maximum size of kernel memory");
195
196 static u_int vm_kmem_size_scale;
197 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
198 "Scale factor for kernel memory size");
199
200 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
201 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
202 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
203 sysctl_kmem_map_size, "LU", "Current kmem allocation size");
204
205 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
206 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
207 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
208 sysctl_kmem_map_free, "LU", "Free space in kmem");
209
210 /*
211 * The malloc_mtx protects the kmemstatistics linked list.
212 */
213 struct mtx malloc_mtx;
214
215 #ifdef MALLOC_PROFILE
216 uint64_t krequests[KMEM_ZSIZE + 1];
217
218 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
219 #endif
220
221 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
222
223 /*
224 * time_uptime of the last malloc(9) failure (induced or real).
225 */
226 static time_t t_malloc_fail;
227
228 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
229 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
230 "Kernel malloc debugging options");
231 #endif
232
233 /*
234 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
235 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
236 */
237 #ifdef MALLOC_MAKE_FAILURES
238 static int malloc_failure_rate;
239 static int malloc_nowait_count;
240 static int malloc_failure_count;
241 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
242 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
243 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
244 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
245 #endif
246
247 static int
248 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
249 {
250 u_long size;
251
252 size = uma_size();
253 return (sysctl_handle_long(oidp, &size, 0, req));
254 }
255
256 static int
257 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
258 {
259 u_long size, limit;
260
261 /* The sysctl is unsigned, implement as a saturation value. */
262 size = uma_size();
263 limit = uma_limit();
264 if (size > limit)
265 size = 0;
266 else
267 size = limit - size;
268 return (sysctl_handle_long(oidp, &size, 0, req));
269 }
270
271 /*
272 * malloc(9) uma zone separation -- sub-page buffer overruns in one
273 * malloc type will affect only a subset of other malloc types.
274 */
275 #if MALLOC_DEBUG_MAXZONES > 1
276 static void
277 tunable_set_numzones(void)
278 {
279
280 TUNABLE_INT_FETCH("debug.malloc.numzones",
281 &numzones);
282
283 /* Sanity check the number of malloc uma zones. */
284 if (numzones <= 0)
285 numzones = 1;
286 if (numzones > MALLOC_DEBUG_MAXZONES)
287 numzones = MALLOC_DEBUG_MAXZONES;
288 }
289 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
290 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
291 &numzones, 0, "Number of malloc uma subzones");
292
293 /*
294 * Any number that changes regularly is an okay choice for the
295 * offset. Build numbers are pretty good of you have them.
296 */
297 static u_int zone_offset = __FreeBSD_version;
298 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
299 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
300 &zone_offset, 0, "Separate malloc types by examining the "
301 "Nth character in the malloc type short description.");
302
303 static void
304 mtp_set_subzone(struct malloc_type *mtp)
305 {
306 struct malloc_type_internal *mtip;
307 const char *desc;
308 size_t len;
309 u_int val;
310
311 mtip = mtp->ks_handle;
312 desc = mtp->ks_shortdesc;
313 if (desc == NULL || (len = strlen(desc)) == 0)
314 val = 0;
315 else
316 val = desc[zone_offset % len];
317 mtip->mti_zone = (val % numzones);
318 }
319
320 static inline u_int
321 mtp_get_subzone(struct malloc_type *mtp)
322 {
323 struct malloc_type_internal *mtip;
324
325 mtip = mtp->ks_handle;
326
327 KASSERT(mtip->mti_zone < numzones,
328 ("mti_zone %u out of range %d",
329 mtip->mti_zone, numzones));
330 return (mtip->mti_zone);
331 }
332 #elif MALLOC_DEBUG_MAXZONES == 0
333 #error "MALLOC_DEBUG_MAXZONES must be positive."
334 #else
335 static void
336 mtp_set_subzone(struct malloc_type *mtp)
337 {
338 struct malloc_type_internal *mtip;
339
340 mtip = mtp->ks_handle;
341 mtip->mti_zone = 0;
342 }
343
344 static inline u_int
345 mtp_get_subzone(struct malloc_type *mtp)
346 {
347
348 return (0);
349 }
350 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
351
352 int
353 malloc_last_fail(void)
354 {
355
356 return (time_uptime - t_malloc_fail);
357 }
358
359 /*
360 * An allocation has succeeded -- update malloc type statistics for the
361 * amount of bucket size. Occurs within a critical section so that the
362 * thread isn't preempted and doesn't migrate while updating per-PCU
363 * statistics.
364 */
365 static void
366 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
367 int zindx)
368 {
369 struct malloc_type_internal *mtip;
370 struct malloc_type_stats *mtsp;
371
372 critical_enter();
373 mtip = mtp->ks_handle;
374 mtsp = zpcpu_get(mtip->mti_stats);
375 if (size > 0) {
376 mtsp->mts_memalloced += size;
377 mtsp->mts_numallocs++;
378 }
379 if (zindx != -1)
380 mtsp->mts_size |= 1 << zindx;
381
382 #ifdef KDTRACE_HOOKS
383 if (__predict_false(dtrace_malloc_enabled)) {
384 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
385 if (probe_id != 0)
386 (dtrace_malloc_probe)(probe_id,
387 (uintptr_t) mtp, (uintptr_t) mtip,
388 (uintptr_t) mtsp, size, zindx);
389 }
390 #endif
391
392 critical_exit();
393 }
394
395 void
396 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
397 {
398
399 if (size > 0)
400 malloc_type_zone_allocated(mtp, size, -1);
401 }
402
403 /*
404 * A free operation has occurred -- update malloc type statistics for the
405 * amount of the bucket size. Occurs within a critical section so that the
406 * thread isn't preempted and doesn't migrate while updating per-CPU
407 * statistics.
408 */
409 void
410 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
411 {
412 struct malloc_type_internal *mtip;
413 struct malloc_type_stats *mtsp;
414
415 critical_enter();
416 mtip = mtp->ks_handle;
417 mtsp = zpcpu_get(mtip->mti_stats);
418 mtsp->mts_memfreed += size;
419 mtsp->mts_numfrees++;
420
421 #ifdef KDTRACE_HOOKS
422 if (__predict_false(dtrace_malloc_enabled)) {
423 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
424 if (probe_id != 0)
425 (dtrace_malloc_probe)(probe_id,
426 (uintptr_t) mtp, (uintptr_t) mtip,
427 (uintptr_t) mtsp, size, 0);
428 }
429 #endif
430
431 critical_exit();
432 }
433
434 /*
435 * contigmalloc:
436 *
437 * Allocate a block of physically contiguous memory.
438 *
439 * If M_NOWAIT is set, this routine will not block and return NULL if
440 * the allocation fails.
441 */
442 void *
443 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
444 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
445 vm_paddr_t boundary)
446 {
447 void *ret;
448
449 ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
450 boundary, VM_MEMATTR_DEFAULT);
451 if (ret != NULL)
452 malloc_type_allocated(type, round_page(size));
453 return (ret);
454 }
455
456 void *
457 contigmalloc_domainset(unsigned long size, struct malloc_type *type,
458 struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
459 unsigned long alignment, vm_paddr_t boundary)
460 {
461 void *ret;
462
463 ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
464 alignment, boundary, VM_MEMATTR_DEFAULT);
465 if (ret != NULL)
466 malloc_type_allocated(type, round_page(size));
467 return (ret);
468 }
469
470 /*
471 * contigfree:
472 *
473 * Free a block of memory allocated by contigmalloc.
474 *
475 * This routine may not block.
476 */
477 void
478 contigfree(void *addr, unsigned long size, struct malloc_type *type)
479 {
480
481 kmem_free((vm_offset_t)addr, size);
482 malloc_type_freed(type, round_page(size));
483 }
484
485 #ifdef MALLOC_DEBUG
486 static int
487 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
488 int flags)
489 {
490 #ifdef INVARIANTS
491 int indx;
492
493 KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
494 /*
495 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
496 */
497 indx = flags & (M_WAITOK | M_NOWAIT);
498 if (indx != M_NOWAIT && indx != M_WAITOK) {
499 static struct timeval lasterr;
500 static int curerr, once;
501 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
502 printf("Bad malloc flags: %x\n", indx);
503 kdb_backtrace();
504 flags |= M_WAITOK;
505 once++;
506 }
507 }
508 #endif
509 #ifdef MALLOC_MAKE_FAILURES
510 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
511 atomic_add_int(&malloc_nowait_count, 1);
512 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
513 atomic_add_int(&malloc_failure_count, 1);
514 t_malloc_fail = time_uptime;
515 *vap = NULL;
516 return (EJUSTRETURN);
517 }
518 }
519 #endif
520 if (flags & M_WAITOK) {
521 KASSERT(curthread->td_intr_nesting_level == 0,
522 ("malloc(M_WAITOK) in interrupt context"));
523 KASSERT(curthread->td_epochnest == 0,
524 ("malloc(M_WAITOK) in epoch context"));
525 }
526 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
527 ("malloc: called with spinlock or critical section held"));
528
529 #ifdef DEBUG_MEMGUARD
530 if (memguard_cmp_mtp(mtp, *sizep)) {
531 *vap = memguard_alloc(*sizep, flags);
532 if (*vap != NULL)
533 return (EJUSTRETURN);
534 /* This is unfortunate but should not be fatal. */
535 }
536 #endif
537
538 #ifdef DEBUG_REDZONE
539 *sizep = redzone_size_ntor(*sizep);
540 #endif
541
542 return (0);
543 }
544 #endif
545
546 /*
547 * malloc:
548 *
549 * Allocate a block of memory.
550 *
551 * If M_NOWAIT is set, this routine will not block and return NULL if
552 * the allocation fails.
553 */
554 void *
555 (malloc)(size_t size, struct malloc_type *mtp, int flags)
556 {
557 int indx;
558 caddr_t va;
559 uma_zone_t zone;
560 #if defined(DEBUG_REDZONE)
561 unsigned long osize = size;
562 #endif
563
564 #ifdef MALLOC_DEBUG
565 va = NULL;
566 if (malloc_dbg(&va, &size, mtp, flags) != 0)
567 return (va);
568 #endif
569
570 if (size <= kmem_zmax && (flags & M_EXEC) == 0) {
571 if (size & KMEM_ZMASK)
572 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
573 indx = kmemsize[size >> KMEM_ZSHIFT];
574 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
575 #ifdef MALLOC_PROFILE
576 krequests[size >> KMEM_ZSHIFT]++;
577 #endif
578 va = uma_zalloc(zone, flags);
579 if (va != NULL)
580 size = zone->uz_size;
581 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
582 } else {
583 size = roundup(size, PAGE_SIZE);
584 zone = NULL;
585 va = uma_large_malloc(size, flags);
586 malloc_type_allocated(mtp, va == NULL ? 0 : size);
587 }
588 if (flags & M_WAITOK)
589 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
590 else if (va == NULL)
591 t_malloc_fail = time_uptime;
592 #ifdef DEBUG_REDZONE
593 if (va != NULL)
594 va = redzone_setup(va, osize);
595 #endif
596 return ((void *) va);
597 }
598
599 static void *
600 malloc_domain(size_t size, struct malloc_type *mtp, int domain, int flags)
601 {
602 int indx;
603 caddr_t va;
604 uma_zone_t zone;
605 #if defined(DEBUG_REDZONE)
606 unsigned long osize = size;
607 #endif
608
609 #ifdef MALLOC_DEBUG
610 va = NULL;
611 if (malloc_dbg(&va, &size, mtp, flags) != 0)
612 return (va);
613 #endif
614 if (size <= kmem_zmax && (flags & M_EXEC) == 0) {
615 if (size & KMEM_ZMASK)
616 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
617 indx = kmemsize[size >> KMEM_ZSHIFT];
618 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
619 #ifdef MALLOC_PROFILE
620 krequests[size >> KMEM_ZSHIFT]++;
621 #endif
622 va = uma_zalloc_domain(zone, NULL, domain, flags);
623 if (va != NULL)
624 size = zone->uz_size;
625 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
626 } else {
627 size = roundup(size, PAGE_SIZE);
628 zone = NULL;
629 va = uma_large_malloc_domain(size, domain, flags);
630 malloc_type_allocated(mtp, va == NULL ? 0 : size);
631 }
632 if (flags & M_WAITOK)
633 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
634 else if (va == NULL)
635 t_malloc_fail = time_uptime;
636 #ifdef DEBUG_REDZONE
637 if (va != NULL)
638 va = redzone_setup(va, osize);
639 #endif
640 return ((void *) va);
641 }
642
643 void *
644 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
645 int flags)
646 {
647 struct vm_domainset_iter di;
648 void *ret;
649 int domain;
650
651 vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
652 do {
653 ret = malloc_domain(size, mtp, domain, flags);
654 if (ret != NULL)
655 break;
656 } while (vm_domainset_iter_policy(&di, &domain) == 0);
657
658 return (ret);
659 }
660
661 void *
662 malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags)
663 {
664 return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(),
665 flags));
666 }
667
668 void *
669 malloc_domainset_aligned(size_t size, size_t align,
670 struct malloc_type *mtp, struct domainset *ds, int flags)
671 {
672 void *res;
673 size_t asize;
674
675 KASSERT(align != 0 && powerof2(align),
676 ("malloc_domainset_aligned: wrong align %#zx size %#zx",
677 align, size));
678 KASSERT(align <= PAGE_SIZE,
679 ("malloc_domainset_aligned: align %#zx (size %#zx) too large",
680 align, size));
681
682 /*
683 * Round the allocation size up to the next power of 2,
684 * because we can only guarantee alignment for
685 * power-of-2-sized allocations. Further increase the
686 * allocation size to align if the rounded size is less than
687 * align, since malloc zones provide alignment equal to their
688 * size.
689 */
690 asize = size <= align ? align : 1UL << flsl(size - 1);
691
692 res = malloc_domainset(asize, mtp, ds, flags);
693 KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0,
694 ("malloc_domainset_aligned: result not aligned %p size %#zx "
695 "allocsize %#zx align %#zx", res, size, asize, align));
696 return (res);
697 }
698
699 void *
700 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
701 {
702
703 if (WOULD_OVERFLOW(nmemb, size))
704 panic("mallocarray: %zu * %zu overflowed", nmemb, size);
705
706 return (malloc(size * nmemb, type, flags));
707 }
708
709 void *
710 mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
711 struct domainset *ds, int flags)
712 {
713
714 if (WOULD_OVERFLOW(nmemb, size))
715 panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size);
716
717 return (malloc_domainset(size * nmemb, type, ds, flags));
718 }
719
720 #ifdef INVARIANTS
721 static void
722 free_save_type(void *addr, struct malloc_type *mtp, u_long size)
723 {
724 struct malloc_type **mtpp = addr;
725
726 /*
727 * Cache a pointer to the malloc_type that most recently freed
728 * this memory here. This way we know who is most likely to
729 * have stepped on it later.
730 *
731 * This code assumes that size is a multiple of 8 bytes for
732 * 64 bit machines
733 */
734 mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
735 mtpp += (size - sizeof(struct malloc_type *)) /
736 sizeof(struct malloc_type *);
737 *mtpp = mtp;
738 }
739 #endif
740
741 #ifdef MALLOC_DEBUG
742 static int
743 free_dbg(void **addrp, struct malloc_type *mtp)
744 {
745 void *addr;
746
747 addr = *addrp;
748 KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
749 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
750 ("free: called with spinlock or critical section held"));
751
752 /* free(NULL, ...) does nothing */
753 if (addr == NULL)
754 return (EJUSTRETURN);
755
756 #ifdef DEBUG_MEMGUARD
757 if (is_memguard_addr(addr)) {
758 memguard_free(addr);
759 return (EJUSTRETURN);
760 }
761 #endif
762
763 #ifdef DEBUG_REDZONE
764 redzone_check(addr);
765 *addrp = redzone_addr_ntor(addr);
766 #endif
767
768 return (0);
769 }
770 #endif
771
772 /*
773 * free:
774 *
775 * Free a block of memory allocated by malloc.
776 *
777 * This routine may not block.
778 */
779 void
780 free(void *addr, struct malloc_type *mtp)
781 {
782 uma_slab_t slab;
783 u_long size;
784
785 #ifdef MALLOC_DEBUG
786 if (free_dbg(&addr, mtp) != 0)
787 return;
788 #endif
789 /* free(NULL, ...) does nothing */
790 if (addr == NULL)
791 return;
792
793 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
794 if (slab == NULL)
795 panic("free: address %p(%p) has not been allocated.\n",
796 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
797
798 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
799 size = slab->us_keg->uk_size;
800 #ifdef INVARIANTS
801 free_save_type(addr, mtp, size);
802 #endif
803 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
804 } else {
805 size = slab->us_size;
806 uma_large_free(slab);
807 }
808 malloc_type_freed(mtp, size);
809 }
810
811 void
812 free_domain(void *addr, struct malloc_type *mtp)
813 {
814 uma_slab_t slab;
815 u_long size;
816
817 #ifdef MALLOC_DEBUG
818 if (free_dbg(&addr, mtp) != 0)
819 return;
820 #endif
821
822 /* free(NULL, ...) does nothing */
823 if (addr == NULL)
824 return;
825
826 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
827 if (slab == NULL)
828 panic("free_domain: address %p(%p) has not been allocated.\n",
829 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
830
831 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
832 size = slab->us_keg->uk_size;
833 #ifdef INVARIANTS
834 free_save_type(addr, mtp, size);
835 #endif
836 uma_zfree_domain(LIST_FIRST(&slab->us_keg->uk_zones),
837 addr, slab);
838 } else {
839 size = slab->us_size;
840 uma_large_free(slab);
841 }
842 malloc_type_freed(mtp, size);
843 }
844
845 /*
846 * realloc: change the size of a memory block
847 */
848 void *
849 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
850 {
851 uma_slab_t slab;
852 unsigned long alloc;
853 void *newaddr;
854
855 KASSERT(mtp->ks_magic == M_MAGIC,
856 ("realloc: bad malloc type magic"));
857 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
858 ("realloc: called with spinlock or critical section held"));
859
860 /* realloc(NULL, ...) is equivalent to malloc(...) */
861 if (addr == NULL)
862 return (malloc(size, mtp, flags));
863
864 /*
865 * XXX: Should report free of old memory and alloc of new memory to
866 * per-CPU stats.
867 */
868
869 #ifdef DEBUG_MEMGUARD
870 if (is_memguard_addr(addr))
871 return (memguard_realloc(addr, size, mtp, flags));
872 #endif
873
874 #ifdef DEBUG_REDZONE
875 slab = NULL;
876 alloc = redzone_get_size(addr);
877 #else
878 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
879
880 /* Sanity check */
881 KASSERT(slab != NULL,
882 ("realloc: address %p out of range", (void *)addr));
883
884 /* Get the size of the original block */
885 if (!(slab->us_flags & UMA_SLAB_MALLOC))
886 alloc = slab->us_keg->uk_size;
887 else
888 alloc = slab->us_size;
889
890 /* Reuse the original block if appropriate */
891 if (size <= alloc
892 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
893 return (addr);
894 #endif /* !DEBUG_REDZONE */
895
896 /* Allocate a new, bigger (or smaller) block */
897 if ((newaddr = malloc(size, mtp, flags)) == NULL)
898 return (NULL);
899
900 /* Copy over original contents */
901 bcopy(addr, newaddr, min(size, alloc));
902 free(addr, mtp);
903 return (newaddr);
904 }
905
906 /*
907 * reallocf: same as realloc() but free memory on failure.
908 */
909 void *
910 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
911 {
912 void *mem;
913
914 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
915 free(addr, mtp);
916 return (mem);
917 }
918
919 /*
920 * malloc_usable_size: returns the usable size of the allocation.
921 */
922 size_t
923 malloc_usable_size(const void *addr)
924 {
925 #ifndef DEBUG_REDZONE
926 uma_slab_t slab;
927 #endif
928 u_long size;
929
930 if (addr == NULL)
931 return (0);
932
933 #ifdef DEBUG_MEMGUARD
934 if (is_memguard_addr(__DECONST(void *, addr)))
935 return (memguard_get_req_size(addr));
936 #endif
937
938 #ifdef DEBUG_REDZONE
939 size = redzone_get_size(__DECONST(void *, addr));
940 #else
941 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
942 if (slab == NULL)
943 panic("malloc_usable_size: address %p(%p) is not allocated.\n",
944 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
945
946 if (!(slab->us_flags & UMA_SLAB_MALLOC))
947 size = slab->us_keg->uk_size;
948 else
949 size = slab->us_size;
950 #endif
951 return (size);
952 }
953
954 #ifndef __sparc64__
955 CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
956 #endif
957
958 /*
959 * Initialize the kernel memory (kmem) arena.
960 */
961 void
962 kmeminit(void)
963 {
964 u_long mem_size;
965 u_long tmp;
966
967 #ifdef VM_KMEM_SIZE
968 if (vm_kmem_size == 0)
969 vm_kmem_size = VM_KMEM_SIZE;
970 #endif
971 #ifdef VM_KMEM_SIZE_MIN
972 if (vm_kmem_size_min == 0)
973 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
974 #endif
975 #ifdef VM_KMEM_SIZE_MAX
976 if (vm_kmem_size_max == 0)
977 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
978 #endif
979 /*
980 * Calculate the amount of kernel virtual address (KVA) space that is
981 * preallocated to the kmem arena. In order to support a wide range
982 * of machines, it is a function of the physical memory size,
983 * specifically,
984 *
985 * min(max(physical memory size / VM_KMEM_SIZE_SCALE,
986 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
987 *
988 * Every architecture must define an integral value for
989 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN
990 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
991 * ceiling on this preallocation, are optional. Typically,
992 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
993 * a given architecture.
994 */
995 mem_size = vm_cnt.v_page_count;
996 if (mem_size <= 32768) /* delphij XXX 128MB */
997 kmem_zmax = PAGE_SIZE;
998
999 if (vm_kmem_size_scale < 1)
1000 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
1001
1002 /*
1003 * Check if we should use defaults for the "vm_kmem_size"
1004 * variable:
1005 */
1006 if (vm_kmem_size == 0) {
1007 vm_kmem_size = mem_size / vm_kmem_size_scale;
1008 vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
1009 vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
1010 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
1011 vm_kmem_size = vm_kmem_size_min;
1012 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
1013 vm_kmem_size = vm_kmem_size_max;
1014 }
1015 if (vm_kmem_size == 0)
1016 panic("Tune VM_KMEM_SIZE_* for the platform");
1017
1018 /*
1019 * The amount of KVA space that is preallocated to the
1020 * kmem arena can be set statically at compile-time or manually
1021 * through the kernel environment. However, it is still limited to
1022 * twice the physical memory size, which has been sufficient to handle
1023 * the most severe cases of external fragmentation in the kmem arena.
1024 */
1025 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
1026 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
1027
1028 vm_kmem_size = round_page(vm_kmem_size);
1029 #ifdef DEBUG_MEMGUARD
1030 tmp = memguard_fudge(vm_kmem_size, kernel_map);
1031 #else
1032 tmp = vm_kmem_size;
1033 #endif
1034 uma_set_limit(tmp);
1035
1036 #ifdef DEBUG_MEMGUARD
1037 /*
1038 * Initialize MemGuard if support compiled in. MemGuard is a
1039 * replacement allocator used for detecting tamper-after-free
1040 * scenarios as they occur. It is only used for debugging.
1041 */
1042 memguard_init(kernel_arena);
1043 #endif
1044 }
1045
1046 /*
1047 * Initialize the kernel memory allocator
1048 */
1049 /* ARGSUSED*/
1050 static void
1051 mallocinit(void *dummy)
1052 {
1053 int i;
1054 uint8_t indx;
1055
1056 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
1057
1058 kmeminit();
1059
1060 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
1061 kmem_zmax = KMEM_ZMAX;
1062
1063 mt_stats_zone = uma_zcreate("mt_stats_zone",
1064 sizeof(struct malloc_type_stats), NULL, NULL, NULL, NULL,
1065 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
1066 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
1067 #ifdef INVARIANTS
1068 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
1069 #else
1070 NULL, NULL, NULL, NULL,
1071 #endif
1072 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
1073 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
1074 int size = kmemzones[indx].kz_size;
1075 char *name = kmemzones[indx].kz_name;
1076 size_t align;
1077 int subzone;
1078
1079 align = UMA_ALIGN_PTR;
1080 if (powerof2(size) && size > sizeof(void *))
1081 align = MIN(size, PAGE_SIZE) - 1;
1082 for (subzone = 0; subzone < numzones; subzone++) {
1083 kmemzones[indx].kz_zone[subzone] =
1084 uma_zcreate(name, size,
1085 #ifdef INVARIANTS
1086 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
1087 #else
1088 NULL, NULL, NULL, NULL,
1089 #endif
1090 align, UMA_ZONE_MALLOC);
1091 }
1092 for (;i <= size; i+= KMEM_ZBASE)
1093 kmemsize[i >> KMEM_ZSHIFT] = indx;
1094
1095 }
1096 }
1097 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1098
1099 void
1100 malloc_init(void *data)
1101 {
1102 struct malloc_type_internal *mtip;
1103 struct malloc_type *mtp;
1104
1105 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
1106
1107 mtp = data;
1108 if (mtp->ks_magic != M_MAGIC)
1109 panic("malloc_init: bad malloc type magic");
1110
1111 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
1112 mtip->mti_stats = uma_zalloc_pcpu(mt_stats_zone, M_WAITOK | M_ZERO);
1113 mtp->ks_handle = mtip;
1114 mtp_set_subzone(mtp);
1115
1116 mtx_lock(&malloc_mtx);
1117 mtp->ks_next = kmemstatistics;
1118 kmemstatistics = mtp;
1119 kmemcount++;
1120 mtx_unlock(&malloc_mtx);
1121 }
1122
1123 void
1124 malloc_uninit(void *data)
1125 {
1126 struct malloc_type_internal *mtip;
1127 struct malloc_type_stats *mtsp;
1128 struct malloc_type *mtp, *temp;
1129 uma_slab_t slab;
1130 long temp_allocs, temp_bytes;
1131 int i;
1132
1133 mtp = data;
1134 KASSERT(mtp->ks_magic == M_MAGIC,
1135 ("malloc_uninit: bad malloc type magic"));
1136 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
1137
1138 mtx_lock(&malloc_mtx);
1139 mtip = mtp->ks_handle;
1140 mtp->ks_handle = NULL;
1141 if (mtp != kmemstatistics) {
1142 for (temp = kmemstatistics; temp != NULL;
1143 temp = temp->ks_next) {
1144 if (temp->ks_next == mtp) {
1145 temp->ks_next = mtp->ks_next;
1146 break;
1147 }
1148 }
1149 KASSERT(temp,
1150 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
1151 } else
1152 kmemstatistics = mtp->ks_next;
1153 kmemcount--;
1154 mtx_unlock(&malloc_mtx);
1155
1156 /*
1157 * Look for memory leaks.
1158 */
1159 temp_allocs = temp_bytes = 0;
1160 for (i = 0; i <= mp_maxid; i++) {
1161 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1162 temp_allocs += mtsp->mts_numallocs;
1163 temp_allocs -= mtsp->mts_numfrees;
1164 temp_bytes += mtsp->mts_memalloced;
1165 temp_bytes -= mtsp->mts_memfreed;
1166 }
1167 if (temp_allocs > 0 || temp_bytes > 0) {
1168 printf("Warning: memory type %s leaked memory on destroy "
1169 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
1170 temp_allocs, temp_bytes);
1171 }
1172
1173 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
1174 uma_zfree_pcpu(mt_stats_zone, mtip->mti_stats);
1175 uma_zfree_arg(mt_zone, mtip, slab);
1176 }
1177
1178 struct malloc_type *
1179 malloc_desc2type(const char *desc)
1180 {
1181 struct malloc_type *mtp;
1182
1183 mtx_assert(&malloc_mtx, MA_OWNED);
1184 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1185 if (strcmp(mtp->ks_shortdesc, desc) == 0)
1186 return (mtp);
1187 }
1188 return (NULL);
1189 }
1190
1191 static int
1192 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1193 {
1194 struct malloc_type_stream_header mtsh;
1195 struct malloc_type_internal *mtip;
1196 struct malloc_type_stats *mtsp, zeromts;
1197 struct malloc_type_header mth;
1198 struct malloc_type *mtp;
1199 int error, i;
1200 struct sbuf sbuf;
1201
1202 error = sysctl_wire_old_buffer(req, 0);
1203 if (error != 0)
1204 return (error);
1205 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1206 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1207 mtx_lock(&malloc_mtx);
1208
1209 bzero(&zeromts, sizeof(zeromts));
1210
1211 /*
1212 * Insert stream header.
1213 */
1214 bzero(&mtsh, sizeof(mtsh));
1215 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1216 mtsh.mtsh_maxcpus = MAXCPU;
1217 mtsh.mtsh_count = kmemcount;
1218 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1219
1220 /*
1221 * Insert alternating sequence of type headers and type statistics.
1222 */
1223 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1224 mtip = (struct malloc_type_internal *)mtp->ks_handle;
1225
1226 /*
1227 * Insert type header.
1228 */
1229 bzero(&mth, sizeof(mth));
1230 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
1231 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1232
1233 /*
1234 * Insert type statistics for each CPU.
1235 */
1236 for (i = 0; i <= mp_maxid; i++) {
1237 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1238 (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1239 }
1240 /*
1241 * Fill in the missing CPUs.
1242 */
1243 for (; i < MAXCPU; i++) {
1244 (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
1245 }
1246
1247 }
1248 mtx_unlock(&malloc_mtx);
1249 error = sbuf_finish(&sbuf);
1250 sbuf_delete(&sbuf);
1251 return (error);
1252 }
1253
1254 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
1255 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1256 "Return malloc types");
1257
1258 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1259 "Count of kernel malloc types");
1260
1261 void
1262 malloc_type_list(malloc_type_list_func_t *func, void *arg)
1263 {
1264 struct malloc_type *mtp, **bufmtp;
1265 int count, i;
1266 size_t buflen;
1267
1268 mtx_lock(&malloc_mtx);
1269 restart:
1270 mtx_assert(&malloc_mtx, MA_OWNED);
1271 count = kmemcount;
1272 mtx_unlock(&malloc_mtx);
1273
1274 buflen = sizeof(struct malloc_type *) * count;
1275 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
1276
1277 mtx_lock(&malloc_mtx);
1278
1279 if (count < kmemcount) {
1280 free(bufmtp, M_TEMP);
1281 goto restart;
1282 }
1283
1284 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1285 bufmtp[i] = mtp;
1286
1287 mtx_unlock(&malloc_mtx);
1288
1289 for (i = 0; i < count; i++)
1290 (func)(bufmtp[i], arg);
1291
1292 free(bufmtp, M_TEMP);
1293 }
1294
1295 #ifdef DDB
1296 DB_SHOW_COMMAND(malloc, db_show_malloc)
1297 {
1298 struct malloc_type_internal *mtip;
1299 struct malloc_type_stats *mtsp;
1300 struct malloc_type *mtp;
1301 uint64_t allocs, frees;
1302 uint64_t alloced, freed;
1303 int i;
1304
1305 db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse",
1306 "Requests");
1307 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1308 mtip = (struct malloc_type_internal *)mtp->ks_handle;
1309 allocs = 0;
1310 frees = 0;
1311 alloced = 0;
1312 freed = 0;
1313 for (i = 0; i <= mp_maxid; i++) {
1314 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1315 allocs += mtsp->mts_numallocs;
1316 frees += mtsp->mts_numfrees;
1317 alloced += mtsp->mts_memalloced;
1318 freed += mtsp->mts_memfreed;
1319 }
1320 db_printf("%18s %12ju %12juK %12ju\n",
1321 mtp->ks_shortdesc, allocs - frees,
1322 (alloced - freed + 1023) / 1024, allocs);
1323 if (db_pager_quit)
1324 break;
1325 }
1326 }
1327
1328 #if MALLOC_DEBUG_MAXZONES > 1
1329 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1330 {
1331 struct malloc_type_internal *mtip;
1332 struct malloc_type *mtp;
1333 u_int subzone;
1334
1335 if (!have_addr) {
1336 db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1337 return;
1338 }
1339 mtp = (void *)addr;
1340 if (mtp->ks_magic != M_MAGIC) {
1341 db_printf("Magic %lx does not match expected %x\n",
1342 mtp->ks_magic, M_MAGIC);
1343 return;
1344 }
1345
1346 mtip = mtp->ks_handle;
1347 subzone = mtip->mti_zone;
1348
1349 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1350 mtip = mtp->ks_handle;
1351 if (mtip->mti_zone != subzone)
1352 continue;
1353 db_printf("%s\n", mtp->ks_shortdesc);
1354 if (db_pager_quit)
1355 break;
1356 }
1357 }
1358 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1359 #endif /* DDB */
1360
1361 #ifdef MALLOC_PROFILE
1362
1363 static int
1364 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
1365 {
1366 struct sbuf sbuf;
1367 uint64_t count;
1368 uint64_t waste;
1369 uint64_t mem;
1370 int error;
1371 int rsize;
1372 int size;
1373 int i;
1374
1375 waste = 0;
1376 mem = 0;
1377
1378 error = sysctl_wire_old_buffer(req, 0);
1379 if (error != 0)
1380 return (error);
1381 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1382 sbuf_printf(&sbuf,
1383 "\n Size Requests Real Size\n");
1384 for (i = 0; i < KMEM_ZSIZE; i++) {
1385 size = i << KMEM_ZSHIFT;
1386 rsize = kmemzones[kmemsize[i]].kz_size;
1387 count = (long long unsigned)krequests[i];
1388
1389 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
1390 (unsigned long long)count, rsize);
1391
1392 if ((rsize * count) > (size * count))
1393 waste += (rsize * count) - (size * count);
1394 mem += (rsize * count);
1395 }
1396 sbuf_printf(&sbuf,
1397 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
1398 (unsigned long long)mem, (unsigned long long)waste);
1399 error = sbuf_finish(&sbuf);
1400 sbuf_delete(&sbuf);
1401 return (error);
1402 }
1403
1404 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
1405 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
1406 #endif /* MALLOC_PROFILE */
Cache object: db3bc865c20a0f63d432d244ef427ca7
|