1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1987, 1991, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005-2009 Robert N. M. Watson
7 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
35 */
36
37 /*
38 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
39 * based on memory types. Back end is implemented using the UMA(9) zone
40 * allocator. A set of fixed-size buckets are used for smaller allocations,
41 * and a special UMA allocation interface is used for larger allocations.
42 * Callers declare memory types, and statistics are maintained independently
43 * for each memory type. Statistics are maintained per-CPU for performance
44 * reasons. See malloc(9) and comments in malloc.h for a detailed
45 * description.
46 */
47
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50
51 #include "opt_ddb.h"
52 #include "opt_vm.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/asan.h>
57 #include <sys/kdb.h>
58 #include <sys/kernel.h>
59 #include <sys/lock.h>
60 #include <sys/malloc.h>
61 #include <sys/mutex.h>
62 #include <sys/vmmeter.h>
63 #include <sys/proc.h>
64 #include <sys/queue.h>
65 #include <sys/sbuf.h>
66 #include <sys/smp.h>
67 #include <sys/sysctl.h>
68 #include <sys/time.h>
69 #include <sys/vmem.h>
70 #ifdef EPOCH_TRACE
71 #include <sys/epoch.h>
72 #endif
73
74 #include <vm/vm.h>
75 #include <vm/pmap.h>
76 #include <vm/vm_domainset.h>
77 #include <vm/vm_pageout.h>
78 #include <vm/vm_param.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_extern.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_phys.h>
84 #include <vm/vm_pagequeue.h>
85 #include <vm/uma.h>
86 #include <vm/uma_int.h>
87 #include <vm/uma_dbg.h>
88
89 #ifdef DEBUG_MEMGUARD
90 #include <vm/memguard.h>
91 #endif
92 #ifdef DEBUG_REDZONE
93 #include <vm/redzone.h>
94 #endif
95
96 #if defined(INVARIANTS) && defined(__i386__)
97 #include <machine/cpu.h>
98 #endif
99
100 #include <ddb/ddb.h>
101
102 #ifdef KDTRACE_HOOKS
103 #include <sys/dtrace_bsd.h>
104
105 bool __read_frequently dtrace_malloc_enabled;
106 dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe;
107 #endif
108
109 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \
110 defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
111 #define MALLOC_DEBUG 1
112 #endif
113
114 #if defined(KASAN) || defined(DEBUG_REDZONE)
115 #define DEBUG_REDZONE_ARG_DEF , unsigned long osize
116 #define DEBUG_REDZONE_ARG , osize
117 #else
118 #define DEBUG_REDZONE_ARG_DEF
119 #define DEBUG_REDZONE_ARG
120 #endif
121
122 /*
123 * When realloc() is called, if the new size is sufficiently smaller than
124 * the old size, realloc() will allocate a new, smaller block to avoid
125 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
126 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
127 */
128 #ifndef REALLOC_FRACTION
129 #define REALLOC_FRACTION 1 /* new block if <= half the size */
130 #endif
131
132 /*
133 * Centrally define some common malloc types.
134 */
135 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
136 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
137 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
138
139 static struct malloc_type *kmemstatistics;
140 static int kmemcount;
141
142 #define KMEM_ZSHIFT 4
143 #define KMEM_ZBASE 16
144 #define KMEM_ZMASK (KMEM_ZBASE - 1)
145
146 #define KMEM_ZMAX 65536
147 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
148 static uint8_t kmemsize[KMEM_ZSIZE + 1];
149
150 #ifndef MALLOC_DEBUG_MAXZONES
151 #define MALLOC_DEBUG_MAXZONES 1
152 #endif
153 static int numzones = MALLOC_DEBUG_MAXZONES;
154
155 /*
156 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
157 * of various sizes.
158 *
159 * Warning: the layout of the struct is duplicated in libmemstat for KVM support.
160 *
161 * XXX: The comment here used to read "These won't be powers of two for
162 * long." It's possible that a significant amount of wasted memory could be
163 * recovered by tuning the sizes of these buckets.
164 */
165 struct {
166 int kz_size;
167 const char *kz_name;
168 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
169 } kmemzones[] = {
170 {16, "malloc-16", },
171 {32, "malloc-32", },
172 {64, "malloc-64", },
173 {128, "malloc-128", },
174 {256, "malloc-256", },
175 {384, "malloc-384", },
176 {512, "malloc-512", },
177 {1024, "malloc-1024", },
178 {2048, "malloc-2048", },
179 {4096, "malloc-4096", },
180 {8192, "malloc-8192", },
181 {16384, "malloc-16384", },
182 {32768, "malloc-32768", },
183 {65536, "malloc-65536", },
184 {0, NULL},
185 };
186
187 u_long vm_kmem_size;
188 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
189 "Size of kernel memory");
190
191 static u_long kmem_zmax = KMEM_ZMAX;
192 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
193 "Maximum allocation size that malloc(9) would use UMA as backend");
194
195 static u_long vm_kmem_size_min;
196 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
197 "Minimum size of kernel memory");
198
199 static u_long vm_kmem_size_max;
200 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
201 "Maximum size of kernel memory");
202
203 static u_int vm_kmem_size_scale;
204 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
205 "Scale factor for kernel memory size");
206
207 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
208 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
209 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
210 sysctl_kmem_map_size, "LU", "Current kmem allocation size");
211
212 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
213 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
214 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
215 sysctl_kmem_map_free, "LU", "Free space in kmem");
216
217 static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
218 "Malloc information");
219
220 static u_int vm_malloc_zone_count = nitems(kmemzones);
221 SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count,
222 CTLFLAG_RD, &vm_malloc_zone_count, 0,
223 "Number of malloc zones");
224
225 static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS);
226 SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes,
227 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
228 sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc");
229
230 /*
231 * The malloc_mtx protects the kmemstatistics linked list.
232 */
233 struct mtx malloc_mtx;
234
235 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
236
237 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
238 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
239 "Kernel malloc debugging options");
240 #endif
241
242 /*
243 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
244 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
245 */
246 #ifdef MALLOC_MAKE_FAILURES
247 static int malloc_failure_rate;
248 static int malloc_nowait_count;
249 static int malloc_failure_count;
250 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
251 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
252 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
253 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
254 #endif
255
256 static int
257 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
258 {
259 u_long size;
260
261 size = uma_size();
262 return (sysctl_handle_long(oidp, &size, 0, req));
263 }
264
265 static int
266 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
267 {
268 u_long size, limit;
269
270 /* The sysctl is unsigned, implement as a saturation value. */
271 size = uma_size();
272 limit = uma_limit();
273 if (size > limit)
274 size = 0;
275 else
276 size = limit - size;
277 return (sysctl_handle_long(oidp, &size, 0, req));
278 }
279
280 static int
281 sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)
282 {
283 int sizes[nitems(kmemzones)];
284 int i;
285
286 for (i = 0; i < nitems(kmemzones); i++) {
287 sizes[i] = kmemzones[i].kz_size;
288 }
289
290 return (SYSCTL_OUT(req, &sizes, sizeof(sizes)));
291 }
292
293 /*
294 * malloc(9) uma zone separation -- sub-page buffer overruns in one
295 * malloc type will affect only a subset of other malloc types.
296 */
297 #if MALLOC_DEBUG_MAXZONES > 1
298 static void
299 tunable_set_numzones(void)
300 {
301
302 TUNABLE_INT_FETCH("debug.malloc.numzones",
303 &numzones);
304
305 /* Sanity check the number of malloc uma zones. */
306 if (numzones <= 0)
307 numzones = 1;
308 if (numzones > MALLOC_DEBUG_MAXZONES)
309 numzones = MALLOC_DEBUG_MAXZONES;
310 }
311 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
312 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
313 &numzones, 0, "Number of malloc uma subzones");
314
315 /*
316 * Any number that changes regularly is an okay choice for the
317 * offset. Build numbers are pretty good of you have them.
318 */
319 static u_int zone_offset = __FreeBSD_version;
320 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
321 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
322 &zone_offset, 0, "Separate malloc types by examining the "
323 "Nth character in the malloc type short description.");
324
325 static void
326 mtp_set_subzone(struct malloc_type *mtp)
327 {
328 struct malloc_type_internal *mtip;
329 const char *desc;
330 size_t len;
331 u_int val;
332
333 mtip = &mtp->ks_mti;
334 desc = mtp->ks_shortdesc;
335 if (desc == NULL || (len = strlen(desc)) == 0)
336 val = 0;
337 else
338 val = desc[zone_offset % len];
339 mtip->mti_zone = (val % numzones);
340 }
341
342 static inline u_int
343 mtp_get_subzone(struct malloc_type *mtp)
344 {
345 struct malloc_type_internal *mtip;
346
347 mtip = &mtp->ks_mti;
348
349 KASSERT(mtip->mti_zone < numzones,
350 ("mti_zone %u out of range %d",
351 mtip->mti_zone, numzones));
352 return (mtip->mti_zone);
353 }
354 #elif MALLOC_DEBUG_MAXZONES == 0
355 #error "MALLOC_DEBUG_MAXZONES must be positive."
356 #else
357 static void
358 mtp_set_subzone(struct malloc_type *mtp)
359 {
360 struct malloc_type_internal *mtip;
361
362 mtip = &mtp->ks_mti;
363 mtip->mti_zone = 0;
364 }
365
366 static inline u_int
367 mtp_get_subzone(struct malloc_type *mtp)
368 {
369
370 return (0);
371 }
372 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
373
374 /*
375 * An allocation has succeeded -- update malloc type statistics for the
376 * amount of bucket size. Occurs within a critical section so that the
377 * thread isn't preempted and doesn't migrate while updating per-PCU
378 * statistics.
379 */
380 static void
381 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
382 int zindx)
383 {
384 struct malloc_type_internal *mtip;
385 struct malloc_type_stats *mtsp;
386
387 critical_enter();
388 mtip = &mtp->ks_mti;
389 mtsp = zpcpu_get(mtip->mti_stats);
390 if (size > 0) {
391 mtsp->mts_memalloced += size;
392 mtsp->mts_numallocs++;
393 }
394 if (zindx != -1)
395 mtsp->mts_size |= 1 << zindx;
396
397 #ifdef KDTRACE_HOOKS
398 if (__predict_false(dtrace_malloc_enabled)) {
399 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
400 if (probe_id != 0)
401 (dtrace_malloc_probe)(probe_id,
402 (uintptr_t) mtp, (uintptr_t) mtip,
403 (uintptr_t) mtsp, size, zindx);
404 }
405 #endif
406
407 critical_exit();
408 }
409
410 void
411 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
412 {
413
414 if (size > 0)
415 malloc_type_zone_allocated(mtp, size, -1);
416 }
417
418 /*
419 * A free operation has occurred -- update malloc type statistics for the
420 * amount of the bucket size. Occurs within a critical section so that the
421 * thread isn't preempted and doesn't migrate while updating per-CPU
422 * statistics.
423 */
424 void
425 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
426 {
427 struct malloc_type_internal *mtip;
428 struct malloc_type_stats *mtsp;
429
430 critical_enter();
431 mtip = &mtp->ks_mti;
432 mtsp = zpcpu_get(mtip->mti_stats);
433 mtsp->mts_memfreed += size;
434 mtsp->mts_numfrees++;
435
436 #ifdef KDTRACE_HOOKS
437 if (__predict_false(dtrace_malloc_enabled)) {
438 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
439 if (probe_id != 0)
440 (dtrace_malloc_probe)(probe_id,
441 (uintptr_t) mtp, (uintptr_t) mtip,
442 (uintptr_t) mtsp, size, 0);
443 }
444 #endif
445
446 critical_exit();
447 }
448
449 /*
450 * contigmalloc:
451 *
452 * Allocate a block of physically contiguous memory.
453 *
454 * If M_NOWAIT is set, this routine will not block and return NULL if
455 * the allocation fails.
456 */
457 void *
458 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
459 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
460 vm_paddr_t boundary)
461 {
462 void *ret;
463
464 ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
465 boundary, VM_MEMATTR_DEFAULT);
466 if (ret != NULL)
467 malloc_type_allocated(type, round_page(size));
468 return (ret);
469 }
470
471 void *
472 contigmalloc_domainset(unsigned long size, struct malloc_type *type,
473 struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
474 unsigned long alignment, vm_paddr_t boundary)
475 {
476 void *ret;
477
478 ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
479 alignment, boundary, VM_MEMATTR_DEFAULT);
480 if (ret != NULL)
481 malloc_type_allocated(type, round_page(size));
482 return (ret);
483 }
484
485 /*
486 * contigfree:
487 *
488 * Free a block of memory allocated by contigmalloc.
489 *
490 * This routine may not block.
491 */
492 void
493 contigfree(void *addr, unsigned long size, struct malloc_type *type)
494 {
495
496 kmem_free((vm_offset_t)addr, size);
497 malloc_type_freed(type, round_page(size));
498 }
499
500 #ifdef MALLOC_DEBUG
501 static int
502 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
503 int flags)
504 {
505 #ifdef INVARIANTS
506 int indx;
507
508 KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version"));
509 /*
510 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
511 */
512 indx = flags & (M_WAITOK | M_NOWAIT);
513 if (indx != M_NOWAIT && indx != M_WAITOK) {
514 static struct timeval lasterr;
515 static int curerr, once;
516 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
517 printf("Bad malloc flags: %x\n", indx);
518 kdb_backtrace();
519 flags |= M_WAITOK;
520 once++;
521 }
522 }
523 #endif
524 #ifdef MALLOC_MAKE_FAILURES
525 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
526 atomic_add_int(&malloc_nowait_count, 1);
527 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
528 atomic_add_int(&malloc_failure_count, 1);
529 *vap = NULL;
530 return (EJUSTRETURN);
531 }
532 }
533 #endif
534 if (flags & M_WAITOK) {
535 KASSERT(curthread->td_intr_nesting_level == 0,
536 ("malloc(M_WAITOK) in interrupt context"));
537 if (__predict_false(!THREAD_CAN_SLEEP())) {
538 #ifdef EPOCH_TRACE
539 epoch_trace_list(curthread);
540 #endif
541 KASSERT(1,
542 ("malloc(M_WAITOK) with sleeping prohibited"));
543 }
544 }
545 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
546 ("malloc: called with spinlock or critical section held"));
547
548 #ifdef DEBUG_MEMGUARD
549 if (memguard_cmp_mtp(mtp, *sizep)) {
550 *vap = memguard_alloc(*sizep, flags);
551 if (*vap != NULL)
552 return (EJUSTRETURN);
553 /* This is unfortunate but should not be fatal. */
554 }
555 #endif
556
557 #ifdef DEBUG_REDZONE
558 *sizep = redzone_size_ntor(*sizep);
559 #endif
560
561 return (0);
562 }
563 #endif
564
565 /*
566 * Handle large allocations and frees by using kmem_malloc directly.
567 */
568 static inline bool
569 malloc_large_slab(uma_slab_t slab)
570 {
571 uintptr_t va;
572
573 va = (uintptr_t)slab;
574 return ((va & 1) != 0);
575 }
576
577 static inline size_t
578 malloc_large_size(uma_slab_t slab)
579 {
580 uintptr_t va;
581
582 va = (uintptr_t)slab;
583 return (va >> 1);
584 }
585
586 static caddr_t __noinline
587 malloc_large(size_t *size, struct malloc_type *mtp, struct domainset *policy,
588 int flags DEBUG_REDZONE_ARG_DEF)
589 {
590 vm_offset_t kva;
591 caddr_t va;
592 size_t sz;
593
594 sz = roundup(*size, PAGE_SIZE);
595 kva = kmem_malloc_domainset(policy, sz, flags);
596 if (kva != 0) {
597 /* The low bit is unused for slab pointers. */
598 vsetzoneslab(kva, NULL, (void *)((sz << 1) | 1));
599 uma_total_inc(sz);
600 *size = sz;
601 }
602 va = (caddr_t)kva;
603 malloc_type_allocated(mtp, va == NULL ? 0 : sz);
604 if (__predict_false(va == NULL)) {
605 KASSERT((flags & M_WAITOK) == 0,
606 ("malloc(M_WAITOK) returned NULL"));
607 } else {
608 #ifdef DEBUG_REDZONE
609 va = redzone_setup(va, osize);
610 #endif
611 kasan_mark((void *)va, osize, sz, KASAN_MALLOC_REDZONE);
612 }
613 return (va);
614 }
615
616 static void
617 free_large(void *addr, size_t size)
618 {
619
620 kmem_free((vm_offset_t)addr, size);
621 uma_total_dec(size);
622 }
623
624 /*
625 * malloc:
626 *
627 * Allocate a block of memory.
628 *
629 * If M_NOWAIT is set, this routine will not block and return NULL if
630 * the allocation fails.
631 */
632 void *
633 (malloc)(size_t size, struct malloc_type *mtp, int flags)
634 {
635 int indx;
636 caddr_t va;
637 uma_zone_t zone;
638 #if defined(DEBUG_REDZONE) || defined(KASAN)
639 unsigned long osize = size;
640 #endif
641
642 MPASS((flags & M_EXEC) == 0);
643
644 #ifdef MALLOC_DEBUG
645 va = NULL;
646 if (malloc_dbg(&va, &size, mtp, flags) != 0)
647 return (va);
648 #endif
649
650 if (__predict_false(size > kmem_zmax))
651 return (malloc_large(&size, mtp, DOMAINSET_RR(), flags
652 DEBUG_REDZONE_ARG));
653
654 if (size & KMEM_ZMASK)
655 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
656 indx = kmemsize[size >> KMEM_ZSHIFT];
657 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
658 va = uma_zalloc(zone, flags);
659 if (va != NULL)
660 size = zone->uz_size;
661 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
662 if (__predict_false(va == NULL)) {
663 KASSERT((flags & M_WAITOK) == 0,
664 ("malloc(M_WAITOK) returned NULL"));
665 }
666 #ifdef DEBUG_REDZONE
667 if (va != NULL)
668 va = redzone_setup(va, osize);
669 #endif
670 #ifdef KASAN
671 if (va != NULL)
672 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
673 #endif
674 return ((void *) va);
675 }
676
677 static void *
678 malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain,
679 int flags)
680 {
681 uma_zone_t zone;
682 caddr_t va;
683 size_t size;
684 int indx;
685
686 size = *sizep;
687 KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0,
688 ("malloc_domain: Called with bad flag / size combination."));
689 if (size & KMEM_ZMASK)
690 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
691 indx = kmemsize[size >> KMEM_ZSHIFT];
692 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
693 va = uma_zalloc_domain(zone, NULL, domain, flags);
694 if (va != NULL)
695 *sizep = zone->uz_size;
696 *indxp = indx;
697 return ((void *)va);
698 }
699
700 void *
701 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
702 int flags)
703 {
704 struct vm_domainset_iter di;
705 caddr_t va;
706 int domain;
707 int indx;
708 #if defined(KASAN) || defined(DEBUG_REDZONE)
709 unsigned long osize = size;
710 #endif
711
712 MPASS((flags & M_EXEC) == 0);
713
714 #ifdef MALLOC_DEBUG
715 va = NULL;
716 if (malloc_dbg(&va, &size, mtp, flags) != 0)
717 return (va);
718 #endif
719
720 if (__predict_false(size > kmem_zmax))
721 return (malloc_large(&size, mtp, DOMAINSET_RR(), flags
722 DEBUG_REDZONE_ARG));
723
724 vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
725 do {
726 va = malloc_domain(&size, &indx, mtp, domain, flags);
727 } while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0);
728 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
729 if (__predict_false(va == NULL)) {
730 KASSERT((flags & M_WAITOK) == 0,
731 ("malloc(M_WAITOK) returned NULL"));
732 }
733 #ifdef DEBUG_REDZONE
734 if (va != NULL)
735 va = redzone_setup(va, osize);
736 #endif
737 #ifdef KASAN
738 if (va != NULL)
739 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
740 #endif
741 return (va);
742 }
743
744 /*
745 * Allocate an executable area.
746 */
747 void *
748 malloc_exec(size_t size, struct malloc_type *mtp, int flags)
749 {
750
751 return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags));
752 }
753
754 void *
755 malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
756 int flags)
757 {
758 #if defined(DEBUG_REDZONE) || defined(KASAN)
759 unsigned long osize = size;
760 #endif
761 #ifdef MALLOC_DEBUG
762 caddr_t va;
763 #endif
764
765 flags |= M_EXEC;
766
767 #ifdef MALLOC_DEBUG
768 va = NULL;
769 if (malloc_dbg(&va, &size, mtp, flags) != 0)
770 return (va);
771 #endif
772
773 return (malloc_large(&size, mtp, ds, flags DEBUG_REDZONE_ARG));
774 }
775
776 void *
777 malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags)
778 {
779 return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(),
780 flags));
781 }
782
783 void *
784 malloc_domainset_aligned(size_t size, size_t align,
785 struct malloc_type *mtp, struct domainset *ds, int flags)
786 {
787 void *res;
788 size_t asize;
789
790 KASSERT(powerof2(align),
791 ("malloc_domainset_aligned: wrong align %#zx size %#zx",
792 align, size));
793 KASSERT(align <= PAGE_SIZE,
794 ("malloc_domainset_aligned: align %#zx (size %#zx) too large",
795 align, size));
796
797 /*
798 * Round the allocation size up to the next power of 2,
799 * because we can only guarantee alignment for
800 * power-of-2-sized allocations. Further increase the
801 * allocation size to align if the rounded size is less than
802 * align, since malloc zones provide alignment equal to their
803 * size.
804 */
805 if (size == 0)
806 size = 1;
807 asize = size <= align ? align : 1UL << flsl(size - 1);
808
809 res = malloc_domainset(asize, mtp, ds, flags);
810 KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0,
811 ("malloc_domainset_aligned: result not aligned %p size %#zx "
812 "allocsize %#zx align %#zx", res, size, asize, align));
813 return (res);
814 }
815
816 void *
817 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
818 {
819
820 if (WOULD_OVERFLOW(nmemb, size))
821 panic("mallocarray: %zu * %zu overflowed", nmemb, size);
822
823 return (malloc(size * nmemb, type, flags));
824 }
825
826 void *
827 mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
828 struct domainset *ds, int flags)
829 {
830
831 if (WOULD_OVERFLOW(nmemb, size))
832 panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size);
833
834 return (malloc_domainset(size * nmemb, type, ds, flags));
835 }
836
837 #if defined(INVARIANTS) && !defined(KASAN)
838 static void
839 free_save_type(void *addr, struct malloc_type *mtp, u_long size)
840 {
841 struct malloc_type **mtpp = addr;
842
843 /*
844 * Cache a pointer to the malloc_type that most recently freed
845 * this memory here. This way we know who is most likely to
846 * have stepped on it later.
847 *
848 * This code assumes that size is a multiple of 8 bytes for
849 * 64 bit machines
850 */
851 mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
852 mtpp += (size - sizeof(struct malloc_type *)) /
853 sizeof(struct malloc_type *);
854 *mtpp = mtp;
855 }
856 #endif
857
858 #ifdef MALLOC_DEBUG
859 static int
860 free_dbg(void **addrp, struct malloc_type *mtp)
861 {
862 void *addr;
863
864 addr = *addrp;
865 KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version"));
866 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
867 ("free: called with spinlock or critical section held"));
868
869 /* free(NULL, ...) does nothing */
870 if (addr == NULL)
871 return (EJUSTRETURN);
872
873 #ifdef DEBUG_MEMGUARD
874 if (is_memguard_addr(addr)) {
875 memguard_free(addr);
876 return (EJUSTRETURN);
877 }
878 #endif
879
880 #ifdef DEBUG_REDZONE
881 redzone_check(addr);
882 *addrp = redzone_addr_ntor(addr);
883 #endif
884
885 return (0);
886 }
887 #endif
888
889 /*
890 * free:
891 *
892 * Free a block of memory allocated by malloc.
893 *
894 * This routine may not block.
895 */
896 void
897 free(void *addr, struct malloc_type *mtp)
898 {
899 uma_zone_t zone;
900 uma_slab_t slab;
901 u_long size;
902
903 #ifdef MALLOC_DEBUG
904 if (free_dbg(&addr, mtp) != 0)
905 return;
906 #endif
907 /* free(NULL, ...) does nothing */
908 if (addr == NULL)
909 return;
910
911 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
912 if (slab == NULL)
913 panic("free: address %p(%p) has not been allocated.\n",
914 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
915
916 if (__predict_true(!malloc_large_slab(slab))) {
917 size = zone->uz_size;
918 #if defined(INVARIANTS) && !defined(KASAN)
919 free_save_type(addr, mtp, size);
920 #endif
921 uma_zfree_arg(zone, addr, slab);
922 } else {
923 size = malloc_large_size(slab);
924 free_large(addr, size);
925 }
926 malloc_type_freed(mtp, size);
927 }
928
929 /*
930 * zfree:
931 *
932 * Zero then free a block of memory allocated by malloc.
933 *
934 * This routine may not block.
935 */
936 void
937 zfree(void *addr, struct malloc_type *mtp)
938 {
939 uma_zone_t zone;
940 uma_slab_t slab;
941 u_long size;
942
943 #ifdef MALLOC_DEBUG
944 if (free_dbg(&addr, mtp) != 0)
945 return;
946 #endif
947 /* free(NULL, ...) does nothing */
948 if (addr == NULL)
949 return;
950
951 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
952 if (slab == NULL)
953 panic("free: address %p(%p) has not been allocated.\n",
954 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
955
956 if (__predict_true(!malloc_large_slab(slab))) {
957 size = zone->uz_size;
958 #if defined(INVARIANTS) && !defined(KASAN)
959 free_save_type(addr, mtp, size);
960 #endif
961 kasan_mark(addr, size, size, 0);
962 explicit_bzero(addr, size);
963 uma_zfree_arg(zone, addr, slab);
964 } else {
965 size = malloc_large_size(slab);
966 kasan_mark(addr, size, size, 0);
967 explicit_bzero(addr, size);
968 free_large(addr, size);
969 }
970 malloc_type_freed(mtp, size);
971 }
972
973 /*
974 * realloc: change the size of a memory block
975 */
976 void *
977 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
978 {
979 uma_zone_t zone;
980 uma_slab_t slab;
981 unsigned long alloc;
982 void *newaddr;
983
984 KASSERT(mtp->ks_version == M_VERSION,
985 ("realloc: bad malloc type version"));
986 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
987 ("realloc: called with spinlock or critical section held"));
988
989 /* realloc(NULL, ...) is equivalent to malloc(...) */
990 if (addr == NULL)
991 return (malloc(size, mtp, flags));
992
993 /*
994 * XXX: Should report free of old memory and alloc of new memory to
995 * per-CPU stats.
996 */
997
998 #ifdef DEBUG_MEMGUARD
999 if (is_memguard_addr(addr))
1000 return (memguard_realloc(addr, size, mtp, flags));
1001 #endif
1002
1003 #ifdef DEBUG_REDZONE
1004 slab = NULL;
1005 zone = NULL;
1006 alloc = redzone_get_size(addr);
1007 #else
1008 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1009
1010 /* Sanity check */
1011 KASSERT(slab != NULL,
1012 ("realloc: address %p out of range", (void *)addr));
1013
1014 /* Get the size of the original block */
1015 if (!malloc_large_slab(slab))
1016 alloc = zone->uz_size;
1017 else
1018 alloc = malloc_large_size(slab);
1019
1020 /* Reuse the original block if appropriate */
1021 if (size <= alloc &&
1022 (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) {
1023 kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE);
1024 return (addr);
1025 }
1026 #endif /* !DEBUG_REDZONE */
1027
1028 /* Allocate a new, bigger (or smaller) block */
1029 if ((newaddr = malloc(size, mtp, flags)) == NULL)
1030 return (NULL);
1031
1032 /*
1033 * Copy over original contents. For KASAN, the redzone must be marked
1034 * valid before performing the copy.
1035 */
1036 kasan_mark(addr, alloc, alloc, 0);
1037 bcopy(addr, newaddr, min(size, alloc));
1038 free(addr, mtp);
1039 return (newaddr);
1040 }
1041
1042 /*
1043 * reallocf: same as realloc() but free memory on failure.
1044 */
1045 void *
1046 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
1047 {
1048 void *mem;
1049
1050 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
1051 free(addr, mtp);
1052 return (mem);
1053 }
1054
1055 /*
1056 * malloc_size: returns the number of bytes allocated for a request of the
1057 * specified size
1058 */
1059 size_t
1060 malloc_size(size_t size)
1061 {
1062 int indx;
1063
1064 if (size > kmem_zmax)
1065 return (0);
1066 if (size & KMEM_ZMASK)
1067 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
1068 indx = kmemsize[size >> KMEM_ZSHIFT];
1069 return (kmemzones[indx].kz_size);
1070 }
1071
1072 /*
1073 * malloc_usable_size: returns the usable size of the allocation.
1074 */
1075 size_t
1076 malloc_usable_size(const void *addr)
1077 {
1078 #ifndef DEBUG_REDZONE
1079 uma_zone_t zone;
1080 uma_slab_t slab;
1081 #endif
1082 u_long size;
1083
1084 if (addr == NULL)
1085 return (0);
1086
1087 #ifdef DEBUG_MEMGUARD
1088 if (is_memguard_addr(__DECONST(void *, addr)))
1089 return (memguard_get_req_size(addr));
1090 #endif
1091
1092 #ifdef DEBUG_REDZONE
1093 size = redzone_get_size(__DECONST(void *, addr));
1094 #else
1095 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1096 if (slab == NULL)
1097 panic("malloc_usable_size: address %p(%p) is not allocated.\n",
1098 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
1099
1100 if (!malloc_large_slab(slab))
1101 size = zone->uz_size;
1102 else
1103 size = malloc_large_size(slab);
1104 #endif
1105 return (size);
1106 }
1107
1108 CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
1109
1110 /*
1111 * Initialize the kernel memory (kmem) arena.
1112 */
1113 void
1114 kmeminit(void)
1115 {
1116 u_long mem_size;
1117 u_long tmp;
1118
1119 #ifdef VM_KMEM_SIZE
1120 if (vm_kmem_size == 0)
1121 vm_kmem_size = VM_KMEM_SIZE;
1122 #endif
1123 #ifdef VM_KMEM_SIZE_MIN
1124 if (vm_kmem_size_min == 0)
1125 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
1126 #endif
1127 #ifdef VM_KMEM_SIZE_MAX
1128 if (vm_kmem_size_max == 0)
1129 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
1130 #endif
1131 /*
1132 * Calculate the amount of kernel virtual address (KVA) space that is
1133 * preallocated to the kmem arena. In order to support a wide range
1134 * of machines, it is a function of the physical memory size,
1135 * specifically,
1136 *
1137 * min(max(physical memory size / VM_KMEM_SIZE_SCALE,
1138 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
1139 *
1140 * Every architecture must define an integral value for
1141 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN
1142 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
1143 * ceiling on this preallocation, are optional. Typically,
1144 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
1145 * a given architecture.
1146 */
1147 mem_size = vm_cnt.v_page_count;
1148 if (mem_size <= 32768) /* delphij XXX 128MB */
1149 kmem_zmax = PAGE_SIZE;
1150
1151 if (vm_kmem_size_scale < 1)
1152 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
1153
1154 /*
1155 * Check if we should use defaults for the "vm_kmem_size"
1156 * variable:
1157 */
1158 if (vm_kmem_size == 0) {
1159 vm_kmem_size = mem_size / vm_kmem_size_scale;
1160 vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
1161 vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
1162 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
1163 vm_kmem_size = vm_kmem_size_min;
1164 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
1165 vm_kmem_size = vm_kmem_size_max;
1166 }
1167 if (vm_kmem_size == 0)
1168 panic("Tune VM_KMEM_SIZE_* for the platform");
1169
1170 /*
1171 * The amount of KVA space that is preallocated to the
1172 * kmem arena can be set statically at compile-time or manually
1173 * through the kernel environment. However, it is still limited to
1174 * twice the physical memory size, which has been sufficient to handle
1175 * the most severe cases of external fragmentation in the kmem arena.
1176 */
1177 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
1178 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
1179
1180 vm_kmem_size = round_page(vm_kmem_size);
1181
1182 #ifdef KASAN
1183 /*
1184 * With KASAN enabled, dynamically allocated kernel memory is shadowed.
1185 * Account for this when setting the UMA limit.
1186 */
1187 vm_kmem_size = (vm_kmem_size * KASAN_SHADOW_SCALE) /
1188 (KASAN_SHADOW_SCALE + 1);
1189 #endif
1190
1191 #ifdef DEBUG_MEMGUARD
1192 tmp = memguard_fudge(vm_kmem_size, kernel_map);
1193 #else
1194 tmp = vm_kmem_size;
1195 #endif
1196 uma_set_limit(tmp);
1197
1198 #ifdef DEBUG_MEMGUARD
1199 /*
1200 * Initialize MemGuard if support compiled in. MemGuard is a
1201 * replacement allocator used for detecting tamper-after-free
1202 * scenarios as they occur. It is only used for debugging.
1203 */
1204 memguard_init(kernel_arena);
1205 #endif
1206 }
1207
1208 /*
1209 * Initialize the kernel memory allocator
1210 */
1211 /* ARGSUSED*/
1212 static void
1213 mallocinit(void *dummy)
1214 {
1215 int i;
1216 uint8_t indx;
1217
1218 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
1219
1220 kmeminit();
1221
1222 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
1223 kmem_zmax = KMEM_ZMAX;
1224
1225 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
1226 int size = kmemzones[indx].kz_size;
1227 const char *name = kmemzones[indx].kz_name;
1228 size_t align;
1229 int subzone;
1230
1231 align = UMA_ALIGN_PTR;
1232 if (powerof2(size) && size > sizeof(void *))
1233 align = MIN(size, PAGE_SIZE) - 1;
1234 for (subzone = 0; subzone < numzones; subzone++) {
1235 kmemzones[indx].kz_zone[subzone] =
1236 uma_zcreate(name, size,
1237 #if defined(INVARIANTS) && !defined(KASAN)
1238 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
1239 #else
1240 NULL, NULL, NULL, NULL,
1241 #endif
1242 align, UMA_ZONE_MALLOC);
1243 }
1244 for (;i <= size; i+= KMEM_ZBASE)
1245 kmemsize[i >> KMEM_ZSHIFT] = indx;
1246 }
1247 }
1248 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1249
1250 void
1251 malloc_init(void *data)
1252 {
1253 struct malloc_type_internal *mtip;
1254 struct malloc_type *mtp;
1255
1256 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
1257
1258 mtp = data;
1259 if (mtp->ks_version != M_VERSION)
1260 panic("malloc_init: type %s with unsupported version %lu",
1261 mtp->ks_shortdesc, mtp->ks_version);
1262
1263 mtip = &mtp->ks_mti;
1264 mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO);
1265 mtp_set_subzone(mtp);
1266
1267 mtx_lock(&malloc_mtx);
1268 mtp->ks_next = kmemstatistics;
1269 kmemstatistics = mtp;
1270 kmemcount++;
1271 mtx_unlock(&malloc_mtx);
1272 }
1273
1274 void
1275 malloc_uninit(void *data)
1276 {
1277 struct malloc_type_internal *mtip;
1278 struct malloc_type_stats *mtsp;
1279 struct malloc_type *mtp, *temp;
1280 long temp_allocs, temp_bytes;
1281 int i;
1282
1283 mtp = data;
1284 KASSERT(mtp->ks_version == M_VERSION,
1285 ("malloc_uninit: bad malloc type version"));
1286
1287 mtx_lock(&malloc_mtx);
1288 mtip = &mtp->ks_mti;
1289 if (mtp != kmemstatistics) {
1290 for (temp = kmemstatistics; temp != NULL;
1291 temp = temp->ks_next) {
1292 if (temp->ks_next == mtp) {
1293 temp->ks_next = mtp->ks_next;
1294 break;
1295 }
1296 }
1297 KASSERT(temp,
1298 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
1299 } else
1300 kmemstatistics = mtp->ks_next;
1301 kmemcount--;
1302 mtx_unlock(&malloc_mtx);
1303
1304 /*
1305 * Look for memory leaks.
1306 */
1307 temp_allocs = temp_bytes = 0;
1308 for (i = 0; i <= mp_maxid; i++) {
1309 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1310 temp_allocs += mtsp->mts_numallocs;
1311 temp_allocs -= mtsp->mts_numfrees;
1312 temp_bytes += mtsp->mts_memalloced;
1313 temp_bytes -= mtsp->mts_memfreed;
1314 }
1315 if (temp_allocs > 0 || temp_bytes > 0) {
1316 printf("Warning: memory type %s leaked memory on destroy "
1317 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
1318 temp_allocs, temp_bytes);
1319 }
1320
1321 uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats);
1322 }
1323
1324 struct malloc_type *
1325 malloc_desc2type(const char *desc)
1326 {
1327 struct malloc_type *mtp;
1328
1329 mtx_assert(&malloc_mtx, MA_OWNED);
1330 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1331 if (strcmp(mtp->ks_shortdesc, desc) == 0)
1332 return (mtp);
1333 }
1334 return (NULL);
1335 }
1336
1337 static int
1338 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1339 {
1340 struct malloc_type_stream_header mtsh;
1341 struct malloc_type_internal *mtip;
1342 struct malloc_type_stats *mtsp, zeromts;
1343 struct malloc_type_header mth;
1344 struct malloc_type *mtp;
1345 int error, i;
1346 struct sbuf sbuf;
1347
1348 error = sysctl_wire_old_buffer(req, 0);
1349 if (error != 0)
1350 return (error);
1351 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1352 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1353 mtx_lock(&malloc_mtx);
1354
1355 bzero(&zeromts, sizeof(zeromts));
1356
1357 /*
1358 * Insert stream header.
1359 */
1360 bzero(&mtsh, sizeof(mtsh));
1361 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1362 mtsh.mtsh_maxcpus = MAXCPU;
1363 mtsh.mtsh_count = kmemcount;
1364 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1365
1366 /*
1367 * Insert alternating sequence of type headers and type statistics.
1368 */
1369 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1370 mtip = &mtp->ks_mti;
1371
1372 /*
1373 * Insert type header.
1374 */
1375 bzero(&mth, sizeof(mth));
1376 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
1377 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1378
1379 /*
1380 * Insert type statistics for each CPU.
1381 */
1382 for (i = 0; i <= mp_maxid; i++) {
1383 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1384 (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1385 }
1386 /*
1387 * Fill in the missing CPUs.
1388 */
1389 for (; i < MAXCPU; i++) {
1390 (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
1391 }
1392 }
1393 mtx_unlock(&malloc_mtx);
1394 error = sbuf_finish(&sbuf);
1395 sbuf_delete(&sbuf);
1396 return (error);
1397 }
1398
1399 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats,
1400 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0,
1401 sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1402 "Return malloc types");
1403
1404 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1405 "Count of kernel malloc types");
1406
1407 void
1408 malloc_type_list(malloc_type_list_func_t *func, void *arg)
1409 {
1410 struct malloc_type *mtp, **bufmtp;
1411 int count, i;
1412 size_t buflen;
1413
1414 mtx_lock(&malloc_mtx);
1415 restart:
1416 mtx_assert(&malloc_mtx, MA_OWNED);
1417 count = kmemcount;
1418 mtx_unlock(&malloc_mtx);
1419
1420 buflen = sizeof(struct malloc_type *) * count;
1421 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
1422
1423 mtx_lock(&malloc_mtx);
1424
1425 if (count < kmemcount) {
1426 free(bufmtp, M_TEMP);
1427 goto restart;
1428 }
1429
1430 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1431 bufmtp[i] = mtp;
1432
1433 mtx_unlock(&malloc_mtx);
1434
1435 for (i = 0; i < count; i++)
1436 (func)(bufmtp[i], arg);
1437
1438 free(bufmtp, M_TEMP);
1439 }
1440
1441 #ifdef DDB
1442 static int64_t
1443 get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs,
1444 uint64_t *inuse)
1445 {
1446 const struct malloc_type_stats *mtsp;
1447 uint64_t frees, alloced, freed;
1448 int i;
1449
1450 *allocs = 0;
1451 frees = 0;
1452 alloced = 0;
1453 freed = 0;
1454 for (i = 0; i <= mp_maxid; i++) {
1455 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1456
1457 *allocs += mtsp->mts_numallocs;
1458 frees += mtsp->mts_numfrees;
1459 alloced += mtsp->mts_memalloced;
1460 freed += mtsp->mts_memfreed;
1461 }
1462 *inuse = *allocs - frees;
1463 return (alloced - freed);
1464 }
1465
1466 DB_SHOW_COMMAND(malloc, db_show_malloc)
1467 {
1468 const char *fmt_hdr, *fmt_entry;
1469 struct malloc_type *mtp;
1470 uint64_t allocs, inuse;
1471 int64_t size;
1472 /* variables for sorting */
1473 struct malloc_type *last_mtype, *cur_mtype;
1474 int64_t cur_size, last_size;
1475 int ties;
1476
1477 if (modif[0] == 'i') {
1478 fmt_hdr = "%s,%s,%s,%s\n";
1479 fmt_entry = "\"%s\",%ju,%jdK,%ju\n";
1480 } else {
1481 fmt_hdr = "%18s %12s %12s %12s\n";
1482 fmt_entry = "%18s %12ju %12jdK %12ju\n";
1483 }
1484
1485 db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests");
1486
1487 /* Select sort, largest size first. */
1488 last_mtype = NULL;
1489 last_size = INT64_MAX;
1490 for (;;) {
1491 cur_mtype = NULL;
1492 cur_size = -1;
1493 ties = 0;
1494
1495 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1496 /*
1497 * In the case of size ties, print out mtypes
1498 * in the order they are encountered. That is,
1499 * when we encounter the most recently output
1500 * mtype, we have already printed all preceding
1501 * ties, and we must print all following ties.
1502 */
1503 if (mtp == last_mtype) {
1504 ties = 1;
1505 continue;
1506 }
1507 size = get_malloc_stats(&mtp->ks_mti, &allocs,
1508 &inuse);
1509 if (size > cur_size && size < last_size + ties) {
1510 cur_size = size;
1511 cur_mtype = mtp;
1512 }
1513 }
1514 if (cur_mtype == NULL)
1515 break;
1516
1517 size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
1518 db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
1519 howmany(size, 1024), allocs);
1520
1521 if (db_pager_quit)
1522 break;
1523
1524 last_mtype = cur_mtype;
1525 last_size = cur_size;
1526 }
1527 }
1528
1529 #if MALLOC_DEBUG_MAXZONES > 1
1530 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1531 {
1532 struct malloc_type_internal *mtip;
1533 struct malloc_type *mtp;
1534 u_int subzone;
1535
1536 if (!have_addr) {
1537 db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1538 return;
1539 }
1540 mtp = (void *)addr;
1541 if (mtp->ks_version != M_VERSION) {
1542 db_printf("Version %lx does not match expected %x\n",
1543 mtp->ks_version, M_VERSION);
1544 return;
1545 }
1546
1547 mtip = &mtp->ks_mti;
1548 subzone = mtip->mti_zone;
1549
1550 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1551 mtip = &mtp->ks_mti;
1552 if (mtip->mti_zone != subzone)
1553 continue;
1554 db_printf("%s\n", mtp->ks_shortdesc);
1555 if (db_pager_quit)
1556 break;
1557 }
1558 }
1559 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1560 #endif /* DDB */
Cache object: 8dc77af1abaa8441e7ab670bd77b9ac7
|