1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1987, 1991, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005-2009 Robert N. M. Watson
7 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
35 */
36
37 /*
38 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
39 * based on memory types. Back end is implemented using the UMA(9) zone
40 * allocator. A set of fixed-size buckets are used for smaller allocations,
41 * and a special UMA allocation interface is used for larger allocations.
42 * Callers declare memory types, and statistics are maintained independently
43 * for each memory type. Statistics are maintained per-CPU for performance
44 * reasons. See malloc(9) and comments in malloc.h for a detailed
45 * description.
46 */
47
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50
51 #include "opt_ddb.h"
52 #include "opt_vm.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kdb.h>
57 #include <sys/kernel.h>
58 #include <sys/lock.h>
59 #include <sys/malloc.h>
60 #include <sys/mutex.h>
61 #include <sys/vmmeter.h>
62 #include <sys/proc.h>
63 #include <sys/queue.h>
64 #include <sys/sbuf.h>
65 #include <sys/smp.h>
66 #include <sys/sysctl.h>
67 #include <sys/time.h>
68 #include <sys/vmem.h>
69 #ifdef EPOCH_TRACE
70 #include <sys/epoch.h>
71 #endif
72
73 #include <vm/vm.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_domainset.h>
76 #include <vm/vm_pageout.h>
77 #include <vm/vm_param.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_phys.h>
83 #include <vm/vm_pagequeue.h>
84 #include <vm/uma.h>
85 #include <vm/uma_int.h>
86 #include <vm/uma_dbg.h>
87
88 #ifdef DEBUG_MEMGUARD
89 #include <vm/memguard.h>
90 #endif
91 #ifdef DEBUG_REDZONE
92 #include <vm/redzone.h>
93 #endif
94
95 #if defined(INVARIANTS) && defined(__i386__)
96 #include <machine/cpu.h>
97 #endif
98
99 #include <ddb/ddb.h>
100
101 #ifdef KDTRACE_HOOKS
102 #include <sys/dtrace_bsd.h>
103
104 bool __read_frequently dtrace_malloc_enabled;
105 dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe;
106 #endif
107
108 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \
109 defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
110 #define MALLOC_DEBUG 1
111 #endif
112
113 #ifdef DEBUG_REDZONE
114 #define DEBUG_REDZONE_ARG_DEF , unsigned long osize
115 #define DEBUG_REDZONE_ARG , osize
116 #else
117 #define DEBUG_REDZONE_ARG_DEF
118 #define DEBUG_REDZONE_ARG
119 #endif
120
121 /*
122 * When realloc() is called, if the new size is sufficiently smaller than
123 * the old size, realloc() will allocate a new, smaller block to avoid
124 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
125 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
126 */
127 #ifndef REALLOC_FRACTION
128 #define REALLOC_FRACTION 1 /* new block if <= half the size */
129 #endif
130
131 /*
132 * Centrally define some common malloc types.
133 */
134 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
135 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
136 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
137
138 static struct malloc_type *kmemstatistics;
139 static int kmemcount;
140
141 #define KMEM_ZSHIFT 4
142 #define KMEM_ZBASE 16
143 #define KMEM_ZMASK (KMEM_ZBASE - 1)
144
145 #define KMEM_ZMAX 65536
146 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
147 static uint8_t kmemsize[KMEM_ZSIZE + 1];
148
149 #ifndef MALLOC_DEBUG_MAXZONES
150 #define MALLOC_DEBUG_MAXZONES 1
151 #endif
152 static int numzones = MALLOC_DEBUG_MAXZONES;
153
154 /*
155 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
156 * of various sizes.
157 *
158 * Warning: the layout of the struct is duplicated in libmemstat for KVM support.
159 *
160 * XXX: The comment here used to read "These won't be powers of two for
161 * long." It's possible that a significant amount of wasted memory could be
162 * recovered by tuning the sizes of these buckets.
163 */
164 struct {
165 int kz_size;
166 const char *kz_name;
167 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
168 } kmemzones[] = {
169 {16, "malloc-16", },
170 {32, "malloc-32", },
171 {64, "malloc-64", },
172 {128, "malloc-128", },
173 {256, "malloc-256", },
174 {384, "malloc-384", },
175 {512, "malloc-512", },
176 {1024, "malloc-1024", },
177 {2048, "malloc-2048", },
178 {4096, "malloc-4096", },
179 {8192, "malloc-8192", },
180 {16384, "malloc-16384", },
181 {32768, "malloc-32768", },
182 {65536, "malloc-65536", },
183 {0, NULL},
184 };
185
186 u_long vm_kmem_size;
187 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
188 "Size of kernel memory");
189
190 static u_long kmem_zmax = KMEM_ZMAX;
191 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
192 "Maximum allocation size that malloc(9) would use UMA as backend");
193
194 static u_long vm_kmem_size_min;
195 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
196 "Minimum size of kernel memory");
197
198 static u_long vm_kmem_size_max;
199 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
200 "Maximum size of kernel memory");
201
202 static u_int vm_kmem_size_scale;
203 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
204 "Scale factor for kernel memory size");
205
206 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
207 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
208 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
209 sysctl_kmem_map_size, "LU", "Current kmem allocation size");
210
211 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
212 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
213 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
214 sysctl_kmem_map_free, "LU", "Free space in kmem");
215
216 static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
217 "Malloc information");
218
219 static u_int vm_malloc_zone_count = nitems(kmemzones);
220 SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count,
221 CTLFLAG_RD, &vm_malloc_zone_count, 0,
222 "Number of malloc zones");
223
224 static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS);
225 SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes,
226 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
227 sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc");
228
229 /*
230 * The malloc_mtx protects the kmemstatistics linked list.
231 */
232 struct mtx malloc_mtx;
233
234 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
235
236 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
237 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
238 "Kernel malloc debugging options");
239 #endif
240
241 /*
242 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
243 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
244 */
245 #ifdef MALLOC_MAKE_FAILURES
246 static int malloc_failure_rate;
247 static int malloc_nowait_count;
248 static int malloc_failure_count;
249 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
250 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
251 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
252 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
253 #endif
254
255 static int
256 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
257 {
258 u_long size;
259
260 size = uma_size();
261 return (sysctl_handle_long(oidp, &size, 0, req));
262 }
263
264 static int
265 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
266 {
267 u_long size, limit;
268
269 /* The sysctl is unsigned, implement as a saturation value. */
270 size = uma_size();
271 limit = uma_limit();
272 if (size > limit)
273 size = 0;
274 else
275 size = limit - size;
276 return (sysctl_handle_long(oidp, &size, 0, req));
277 }
278
279 static int
280 sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)
281 {
282 int sizes[nitems(kmemzones)];
283 int i;
284
285 for (i = 0; i < nitems(kmemzones); i++) {
286 sizes[i] = kmemzones[i].kz_size;
287 }
288
289 return (SYSCTL_OUT(req, &sizes, sizeof(sizes)));
290 }
291
292 /*
293 * malloc(9) uma zone separation -- sub-page buffer overruns in one
294 * malloc type will affect only a subset of other malloc types.
295 */
296 #if MALLOC_DEBUG_MAXZONES > 1
297 static void
298 tunable_set_numzones(void)
299 {
300
301 TUNABLE_INT_FETCH("debug.malloc.numzones",
302 &numzones);
303
304 /* Sanity check the number of malloc uma zones. */
305 if (numzones <= 0)
306 numzones = 1;
307 if (numzones > MALLOC_DEBUG_MAXZONES)
308 numzones = MALLOC_DEBUG_MAXZONES;
309 }
310 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
311 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
312 &numzones, 0, "Number of malloc uma subzones");
313
314 /*
315 * Any number that changes regularly is an okay choice for the
316 * offset. Build numbers are pretty good of you have them.
317 */
318 static u_int zone_offset = __FreeBSD_version;
319 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
320 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
321 &zone_offset, 0, "Separate malloc types by examining the "
322 "Nth character in the malloc type short description.");
323
324 static void
325 mtp_set_subzone(struct malloc_type *mtp)
326 {
327 struct malloc_type_internal *mtip;
328 const char *desc;
329 size_t len;
330 u_int val;
331
332 mtip = &mtp->ks_mti;
333 desc = mtp->ks_shortdesc;
334 if (desc == NULL || (len = strlen(desc)) == 0)
335 val = 0;
336 else
337 val = desc[zone_offset % len];
338 mtip->mti_zone = (val % numzones);
339 }
340
341 static inline u_int
342 mtp_get_subzone(struct malloc_type *mtp)
343 {
344 struct malloc_type_internal *mtip;
345
346 mtip = &mtp->ks_mti;
347
348 KASSERT(mtip->mti_zone < numzones,
349 ("mti_zone %u out of range %d",
350 mtip->mti_zone, numzones));
351 return (mtip->mti_zone);
352 }
353 #elif MALLOC_DEBUG_MAXZONES == 0
354 #error "MALLOC_DEBUG_MAXZONES must be positive."
355 #else
356 static void
357 mtp_set_subzone(struct malloc_type *mtp)
358 {
359 struct malloc_type_internal *mtip;
360
361 mtip = &mtp->ks_mti;
362 mtip->mti_zone = 0;
363 }
364
365 static inline u_int
366 mtp_get_subzone(struct malloc_type *mtp)
367 {
368
369 return (0);
370 }
371 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
372
373 /*
374 * An allocation has succeeded -- update malloc type statistics for the
375 * amount of bucket size. Occurs within a critical section so that the
376 * thread isn't preempted and doesn't migrate while updating per-PCU
377 * statistics.
378 */
379 static void
380 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
381 int zindx)
382 {
383 struct malloc_type_internal *mtip;
384 struct malloc_type_stats *mtsp;
385
386 critical_enter();
387 mtip = &mtp->ks_mti;
388 mtsp = zpcpu_get(mtip->mti_stats);
389 if (size > 0) {
390 mtsp->mts_memalloced += size;
391 mtsp->mts_numallocs++;
392 }
393 if (zindx != -1)
394 mtsp->mts_size |= 1 << zindx;
395
396 #ifdef KDTRACE_HOOKS
397 if (__predict_false(dtrace_malloc_enabled)) {
398 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
399 if (probe_id != 0)
400 (dtrace_malloc_probe)(probe_id,
401 (uintptr_t) mtp, (uintptr_t) mtip,
402 (uintptr_t) mtsp, size, zindx);
403 }
404 #endif
405
406 critical_exit();
407 }
408
409 void
410 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
411 {
412
413 if (size > 0)
414 malloc_type_zone_allocated(mtp, size, -1);
415 }
416
417 /*
418 * A free operation has occurred -- update malloc type statistics for the
419 * amount of the bucket size. Occurs within a critical section so that the
420 * thread isn't preempted and doesn't migrate while updating per-CPU
421 * statistics.
422 */
423 void
424 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
425 {
426 struct malloc_type_internal *mtip;
427 struct malloc_type_stats *mtsp;
428
429 critical_enter();
430 mtip = &mtp->ks_mti;
431 mtsp = zpcpu_get(mtip->mti_stats);
432 mtsp->mts_memfreed += size;
433 mtsp->mts_numfrees++;
434
435 #ifdef KDTRACE_HOOKS
436 if (__predict_false(dtrace_malloc_enabled)) {
437 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
438 if (probe_id != 0)
439 (dtrace_malloc_probe)(probe_id,
440 (uintptr_t) mtp, (uintptr_t) mtip,
441 (uintptr_t) mtsp, size, 0);
442 }
443 #endif
444
445 critical_exit();
446 }
447
448 /*
449 * contigmalloc:
450 *
451 * Allocate a block of physically contiguous memory.
452 *
453 * If M_NOWAIT is set, this routine will not block and return NULL if
454 * the allocation fails.
455 */
456 void *
457 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
458 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
459 vm_paddr_t boundary)
460 {
461 void *ret;
462
463 ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
464 boundary, VM_MEMATTR_DEFAULT);
465 if (ret != NULL)
466 malloc_type_allocated(type, round_page(size));
467 return (ret);
468 }
469
470 void *
471 contigmalloc_domainset(unsigned long size, struct malloc_type *type,
472 struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
473 unsigned long alignment, vm_paddr_t boundary)
474 {
475 void *ret;
476
477 ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
478 alignment, boundary, VM_MEMATTR_DEFAULT);
479 if (ret != NULL)
480 malloc_type_allocated(type, round_page(size));
481 return (ret);
482 }
483
484 /*
485 * contigfree:
486 *
487 * Free a block of memory allocated by contigmalloc.
488 *
489 * This routine may not block.
490 */
491 void
492 contigfree(void *addr, unsigned long size, struct malloc_type *type)
493 {
494
495 kmem_free((vm_offset_t)addr, size);
496 malloc_type_freed(type, round_page(size));
497 }
498
499 #ifdef MALLOC_DEBUG
500 static int
501 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
502 int flags)
503 {
504 #ifdef INVARIANTS
505 int indx;
506
507 KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version"));
508 /*
509 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
510 */
511 indx = flags & (M_WAITOK | M_NOWAIT);
512 if (indx != M_NOWAIT && indx != M_WAITOK) {
513 static struct timeval lasterr;
514 static int curerr, once;
515 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
516 printf("Bad malloc flags: %x\n", indx);
517 kdb_backtrace();
518 flags |= M_WAITOK;
519 once++;
520 }
521 }
522 #endif
523 #ifdef MALLOC_MAKE_FAILURES
524 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
525 atomic_add_int(&malloc_nowait_count, 1);
526 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
527 atomic_add_int(&malloc_failure_count, 1);
528 *vap = NULL;
529 return (EJUSTRETURN);
530 }
531 }
532 #endif
533 if (flags & M_WAITOK) {
534 KASSERT(curthread->td_intr_nesting_level == 0,
535 ("malloc(M_WAITOK) in interrupt context"));
536 if (__predict_false(!THREAD_CAN_SLEEP())) {
537 #ifdef EPOCH_TRACE
538 epoch_trace_list(curthread);
539 #endif
540 KASSERT(1,
541 ("malloc(M_WAITOK) with sleeping prohibited"));
542 }
543 }
544 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
545 ("malloc: called with spinlock or critical section held"));
546
547 #ifdef DEBUG_MEMGUARD
548 if (memguard_cmp_mtp(mtp, *sizep)) {
549 *vap = memguard_alloc(*sizep, flags);
550 if (*vap != NULL)
551 return (EJUSTRETURN);
552 /* This is unfortunate but should not be fatal. */
553 }
554 #endif
555
556 #ifdef DEBUG_REDZONE
557 *sizep = redzone_size_ntor(*sizep);
558 #endif
559
560 return (0);
561 }
562 #endif
563
564 /*
565 * Handle large allocations and frees by using kmem_malloc directly.
566 */
567 static inline bool
568 malloc_large_slab(uma_slab_t slab)
569 {
570 uintptr_t va;
571
572 va = (uintptr_t)slab;
573 return ((va & 1) != 0);
574 }
575
576 static inline size_t
577 malloc_large_size(uma_slab_t slab)
578 {
579 uintptr_t va;
580
581 va = (uintptr_t)slab;
582 return (va >> 1);
583 }
584
585 static caddr_t __noinline
586 malloc_large(size_t *size, struct malloc_type *mtp, struct domainset *policy,
587 int flags DEBUG_REDZONE_ARG_DEF)
588 {
589 vm_offset_t kva;
590 caddr_t va;
591 size_t sz;
592
593 sz = roundup(*size, PAGE_SIZE);
594 kva = kmem_malloc_domainset(policy, sz, flags);
595 if (kva != 0) {
596 /* The low bit is unused for slab pointers. */
597 vsetzoneslab(kva, NULL, (void *)((sz << 1) | 1));
598 uma_total_inc(sz);
599 *size = sz;
600 }
601 va = (caddr_t)kva;
602 malloc_type_allocated(mtp, va == NULL ? 0 : sz);
603 if (__predict_false(va == NULL)) {
604 KASSERT((flags & M_WAITOK) == 0,
605 ("malloc(M_WAITOK) returned NULL"));
606 }
607 #ifdef DEBUG_REDZONE
608 if (va != NULL)
609 va = redzone_setup(va, osize);
610 #endif
611 return (va);
612 }
613
614 static void
615 free_large(void *addr, size_t size)
616 {
617
618 kmem_free((vm_offset_t)addr, size);
619 uma_total_dec(size);
620 }
621
622 /*
623 * malloc:
624 *
625 * Allocate a block of memory.
626 *
627 * If M_NOWAIT is set, this routine will not block and return NULL if
628 * the allocation fails.
629 */
630 void *
631 (malloc)(size_t size, struct malloc_type *mtp, int flags)
632 {
633 int indx;
634 caddr_t va;
635 uma_zone_t zone;
636 #ifdef DEBUG_REDZONE
637 unsigned long osize = size;
638 #endif
639
640 MPASS((flags & M_EXEC) == 0);
641
642 #ifdef MALLOC_DEBUG
643 va = NULL;
644 if (malloc_dbg(&va, &size, mtp, flags) != 0)
645 return (va);
646 #endif
647
648 if (__predict_false(size > kmem_zmax))
649 return (malloc_large(&size, mtp, DOMAINSET_RR(), flags
650 DEBUG_REDZONE_ARG));
651
652 if (size & KMEM_ZMASK)
653 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
654 indx = kmemsize[size >> KMEM_ZSHIFT];
655 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
656 va = uma_zalloc(zone, flags);
657 if (va != NULL)
658 size = zone->uz_size;
659 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
660 if (__predict_false(va == NULL)) {
661 KASSERT((flags & M_WAITOK) == 0,
662 ("malloc(M_WAITOK) returned NULL"));
663 }
664 #ifdef DEBUG_REDZONE
665 if (va != NULL)
666 va = redzone_setup(va, osize);
667 #endif
668 return ((void *) va);
669 }
670
671 static void *
672 malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain,
673 int flags)
674 {
675 uma_zone_t zone;
676 caddr_t va;
677 size_t size;
678 int indx;
679
680 size = *sizep;
681 KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0,
682 ("malloc_domain: Called with bad flag / size combination."));
683 if (size & KMEM_ZMASK)
684 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
685 indx = kmemsize[size >> KMEM_ZSHIFT];
686 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
687 va = uma_zalloc_domain(zone, NULL, domain, flags);
688 if (va != NULL)
689 *sizep = zone->uz_size;
690 *indxp = indx;
691 return ((void *)va);
692 }
693
694 void *
695 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
696 int flags)
697 {
698 struct vm_domainset_iter di;
699 caddr_t va;
700 int domain;
701 int indx;
702 #ifdef DEBUG_REDZONE
703 unsigned long osize = size;
704 #endif
705
706 MPASS((flags & M_EXEC) == 0);
707
708 #ifdef MALLOC_DEBUG
709 va = NULL;
710 if (malloc_dbg(&va, &size, mtp, flags) != 0)
711 return (va);
712 #endif
713
714 if (__predict_false(size > kmem_zmax))
715 return (malloc_large(&size, mtp, DOMAINSET_RR(), flags
716 DEBUG_REDZONE_ARG));
717
718 vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
719 do {
720 va = malloc_domain(&size, &indx, mtp, domain, flags);
721 } while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0);
722 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
723 if (__predict_false(va == NULL)) {
724 KASSERT((flags & M_WAITOK) == 0,
725 ("malloc(M_WAITOK) returned NULL"));
726 }
727 #ifdef DEBUG_REDZONE
728 if (va != NULL)
729 va = redzone_setup(va, osize);
730 #endif
731 return (va);
732 }
733
734 /*
735 * Allocate an executable area.
736 */
737 void *
738 malloc_exec(size_t size, struct malloc_type *mtp, int flags)
739 {
740
741 return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags));
742 }
743
744 void *
745 malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
746 int flags)
747 {
748 #ifdef DEBUG_REDZONE
749 unsigned long osize = size;
750 #endif
751 #ifdef MALLOC_DEBUG
752 caddr_t va;
753 #endif
754
755 flags |= M_EXEC;
756
757 #ifdef MALLOC_DEBUG
758 va = NULL;
759 if (malloc_dbg(&va, &size, mtp, flags) != 0)
760 return (va);
761 #endif
762
763 return (malloc_large(&size, mtp, ds, flags DEBUG_REDZONE_ARG));
764 }
765
766 void *
767 malloc_domainset_aligned(size_t size, size_t align,
768 struct malloc_type *mtp, struct domainset *ds, int flags)
769 {
770 void *res;
771 size_t asize;
772
773 KASSERT(align != 0 && powerof2(align),
774 ("malloc_domainset_aligned: wrong align %#zx size %#zx",
775 align, size));
776 KASSERT(align <= PAGE_SIZE,
777 ("malloc_domainset_aligned: align %#zx (size %#zx) too large",
778 align, size));
779
780 /*
781 * Round the allocation size up to the next power of 2,
782 * because we can only guarantee alignment for
783 * power-of-2-sized allocations. Further increase the
784 * allocation size to align if the rounded size is less than
785 * align, since malloc zones provide alignment equal to their
786 * size.
787 */
788 asize = size <= align ? align : 1UL << flsl(size - 1);
789
790 res = malloc_domainset(asize, mtp, ds, flags);
791 KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0,
792 ("malloc_domainset_aligned: result not aligned %p size %#zx "
793 "allocsize %#zx align %#zx", res, size, asize, align));
794 return (res);
795 }
796
797 void *
798 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
799 {
800
801 if (WOULD_OVERFLOW(nmemb, size))
802 panic("mallocarray: %zu * %zu overflowed", nmemb, size);
803
804 return (malloc(size * nmemb, type, flags));
805 }
806
807 #ifdef INVARIANTS
808 static void
809 free_save_type(void *addr, struct malloc_type *mtp, u_long size)
810 {
811 struct malloc_type **mtpp = addr;
812
813 /*
814 * Cache a pointer to the malloc_type that most recently freed
815 * this memory here. This way we know who is most likely to
816 * have stepped on it later.
817 *
818 * This code assumes that size is a multiple of 8 bytes for
819 * 64 bit machines
820 */
821 mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
822 mtpp += (size - sizeof(struct malloc_type *)) /
823 sizeof(struct malloc_type *);
824 *mtpp = mtp;
825 }
826 #endif
827
828 #ifdef MALLOC_DEBUG
829 static int
830 free_dbg(void **addrp, struct malloc_type *mtp)
831 {
832 void *addr;
833
834 addr = *addrp;
835 KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version"));
836 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
837 ("free: called with spinlock or critical section held"));
838
839 /* free(NULL, ...) does nothing */
840 if (addr == NULL)
841 return (EJUSTRETURN);
842
843 #ifdef DEBUG_MEMGUARD
844 if (is_memguard_addr(addr)) {
845 memguard_free(addr);
846 return (EJUSTRETURN);
847 }
848 #endif
849
850 #ifdef DEBUG_REDZONE
851 redzone_check(addr);
852 *addrp = redzone_addr_ntor(addr);
853 #endif
854
855 return (0);
856 }
857 #endif
858
859 /*
860 * free:
861 *
862 * Free a block of memory allocated by malloc.
863 *
864 * This routine may not block.
865 */
866 void
867 free(void *addr, struct malloc_type *mtp)
868 {
869 uma_zone_t zone;
870 uma_slab_t slab;
871 u_long size;
872
873 #ifdef MALLOC_DEBUG
874 if (free_dbg(&addr, mtp) != 0)
875 return;
876 #endif
877 /* free(NULL, ...) does nothing */
878 if (addr == NULL)
879 return;
880
881 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
882 if (slab == NULL)
883 panic("free: address %p(%p) has not been allocated.\n",
884 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
885
886 if (__predict_true(!malloc_large_slab(slab))) {
887 size = zone->uz_size;
888 #ifdef INVARIANTS
889 free_save_type(addr, mtp, size);
890 #endif
891 uma_zfree_arg(zone, addr, slab);
892 } else {
893 size = malloc_large_size(slab);
894 free_large(addr, size);
895 }
896 malloc_type_freed(mtp, size);
897 }
898
899 /*
900 * zfree:
901 *
902 * Zero then free a block of memory allocated by malloc.
903 *
904 * This routine may not block.
905 */
906 void
907 zfree(void *addr, struct malloc_type *mtp)
908 {
909 uma_zone_t zone;
910 uma_slab_t slab;
911 u_long size;
912
913 #ifdef MALLOC_DEBUG
914 if (free_dbg(&addr, mtp) != 0)
915 return;
916 #endif
917 /* free(NULL, ...) does nothing */
918 if (addr == NULL)
919 return;
920
921 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
922 if (slab == NULL)
923 panic("free: address %p(%p) has not been allocated.\n",
924 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
925
926 if (__predict_true(!malloc_large_slab(slab))) {
927 size = zone->uz_size;
928 #ifdef INVARIANTS
929 free_save_type(addr, mtp, size);
930 #endif
931 explicit_bzero(addr, size);
932 uma_zfree_arg(zone, addr, slab);
933 } else {
934 size = malloc_large_size(slab);
935 explicit_bzero(addr, size);
936 free_large(addr, size);
937 }
938 malloc_type_freed(mtp, size);
939 }
940
941 /*
942 * realloc: change the size of a memory block
943 */
944 void *
945 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
946 {
947 uma_zone_t zone;
948 uma_slab_t slab;
949 unsigned long alloc;
950 void *newaddr;
951
952 KASSERT(mtp->ks_version == M_VERSION,
953 ("realloc: bad malloc type version"));
954 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
955 ("realloc: called with spinlock or critical section held"));
956
957 /* realloc(NULL, ...) is equivalent to malloc(...) */
958 if (addr == NULL)
959 return (malloc(size, mtp, flags));
960
961 /*
962 * XXX: Should report free of old memory and alloc of new memory to
963 * per-CPU stats.
964 */
965
966 #ifdef DEBUG_MEMGUARD
967 if (is_memguard_addr(addr))
968 return (memguard_realloc(addr, size, mtp, flags));
969 #endif
970
971 #ifdef DEBUG_REDZONE
972 slab = NULL;
973 zone = NULL;
974 alloc = redzone_get_size(addr);
975 #else
976 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
977
978 /* Sanity check */
979 KASSERT(slab != NULL,
980 ("realloc: address %p out of range", (void *)addr));
981
982 /* Get the size of the original block */
983 if (!malloc_large_slab(slab))
984 alloc = zone->uz_size;
985 else
986 alloc = malloc_large_size(slab);
987
988 /* Reuse the original block if appropriate */
989 if (size <= alloc
990 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
991 return (addr);
992 #endif /* !DEBUG_REDZONE */
993
994 /* Allocate a new, bigger (or smaller) block */
995 if ((newaddr = malloc(size, mtp, flags)) == NULL)
996 return (NULL);
997
998 /* Copy over original contents */
999 bcopy(addr, newaddr, min(size, alloc));
1000 free(addr, mtp);
1001 return (newaddr);
1002 }
1003
1004 /*
1005 * reallocf: same as realloc() but free memory on failure.
1006 */
1007 void *
1008 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
1009 {
1010 void *mem;
1011
1012 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
1013 free(addr, mtp);
1014 return (mem);
1015 }
1016
1017 /*
1018 * malloc_size: returns the number of bytes allocated for a request of the
1019 * specified size
1020 */
1021 size_t
1022 malloc_size(size_t size)
1023 {
1024 int indx;
1025
1026 if (size > kmem_zmax)
1027 return (0);
1028 if (size & KMEM_ZMASK)
1029 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
1030 indx = kmemsize[size >> KMEM_ZSHIFT];
1031 return (kmemzones[indx].kz_size);
1032 }
1033
1034 /*
1035 * malloc_usable_size: returns the usable size of the allocation.
1036 */
1037 size_t
1038 malloc_usable_size(const void *addr)
1039 {
1040 #ifndef DEBUG_REDZONE
1041 uma_zone_t zone;
1042 uma_slab_t slab;
1043 #endif
1044 u_long size;
1045
1046 if (addr == NULL)
1047 return (0);
1048
1049 #ifdef DEBUG_MEMGUARD
1050 if (is_memguard_addr(__DECONST(void *, addr)))
1051 return (memguard_get_req_size(addr));
1052 #endif
1053
1054 #ifdef DEBUG_REDZONE
1055 size = redzone_get_size(__DECONST(void *, addr));
1056 #else
1057 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1058 if (slab == NULL)
1059 panic("malloc_usable_size: address %p(%p) is not allocated.\n",
1060 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
1061
1062 if (!malloc_large_slab(slab))
1063 size = zone->uz_size;
1064 else
1065 size = malloc_large_size(slab);
1066 #endif
1067 return (size);
1068 }
1069
1070 CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
1071
1072 /*
1073 * Initialize the kernel memory (kmem) arena.
1074 */
1075 void
1076 kmeminit(void)
1077 {
1078 u_long mem_size;
1079 u_long tmp;
1080
1081 #ifdef VM_KMEM_SIZE
1082 if (vm_kmem_size == 0)
1083 vm_kmem_size = VM_KMEM_SIZE;
1084 #endif
1085 #ifdef VM_KMEM_SIZE_MIN
1086 if (vm_kmem_size_min == 0)
1087 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
1088 #endif
1089 #ifdef VM_KMEM_SIZE_MAX
1090 if (vm_kmem_size_max == 0)
1091 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
1092 #endif
1093 /*
1094 * Calculate the amount of kernel virtual address (KVA) space that is
1095 * preallocated to the kmem arena. In order to support a wide range
1096 * of machines, it is a function of the physical memory size,
1097 * specifically,
1098 *
1099 * min(max(physical memory size / VM_KMEM_SIZE_SCALE,
1100 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
1101 *
1102 * Every architecture must define an integral value for
1103 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN
1104 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
1105 * ceiling on this preallocation, are optional. Typically,
1106 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
1107 * a given architecture.
1108 */
1109 mem_size = vm_cnt.v_page_count;
1110 if (mem_size <= 32768) /* delphij XXX 128MB */
1111 kmem_zmax = PAGE_SIZE;
1112
1113 if (vm_kmem_size_scale < 1)
1114 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
1115
1116 /*
1117 * Check if we should use defaults for the "vm_kmem_size"
1118 * variable:
1119 */
1120 if (vm_kmem_size == 0) {
1121 vm_kmem_size = mem_size / vm_kmem_size_scale;
1122 vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
1123 vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
1124 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
1125 vm_kmem_size = vm_kmem_size_min;
1126 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
1127 vm_kmem_size = vm_kmem_size_max;
1128 }
1129 if (vm_kmem_size == 0)
1130 panic("Tune VM_KMEM_SIZE_* for the platform");
1131
1132 /*
1133 * The amount of KVA space that is preallocated to the
1134 * kmem arena can be set statically at compile-time or manually
1135 * through the kernel environment. However, it is still limited to
1136 * twice the physical memory size, which has been sufficient to handle
1137 * the most severe cases of external fragmentation in the kmem arena.
1138 */
1139 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
1140 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
1141
1142 vm_kmem_size = round_page(vm_kmem_size);
1143 #ifdef DEBUG_MEMGUARD
1144 tmp = memguard_fudge(vm_kmem_size, kernel_map);
1145 #else
1146 tmp = vm_kmem_size;
1147 #endif
1148 uma_set_limit(tmp);
1149
1150 #ifdef DEBUG_MEMGUARD
1151 /*
1152 * Initialize MemGuard if support compiled in. MemGuard is a
1153 * replacement allocator used for detecting tamper-after-free
1154 * scenarios as they occur. It is only used for debugging.
1155 */
1156 memguard_init(kernel_arena);
1157 #endif
1158 }
1159
1160 /*
1161 * Initialize the kernel memory allocator
1162 */
1163 /* ARGSUSED*/
1164 static void
1165 mallocinit(void *dummy)
1166 {
1167 int i;
1168 uint8_t indx;
1169
1170 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
1171
1172 kmeminit();
1173
1174 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
1175 kmem_zmax = KMEM_ZMAX;
1176
1177 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
1178 int size = kmemzones[indx].kz_size;
1179 const char *name = kmemzones[indx].kz_name;
1180 size_t align;
1181 int subzone;
1182
1183 align = UMA_ALIGN_PTR;
1184 if (powerof2(size) && size > sizeof(void *))
1185 align = MIN(size, PAGE_SIZE) - 1;
1186 for (subzone = 0; subzone < numzones; subzone++) {
1187 kmemzones[indx].kz_zone[subzone] =
1188 uma_zcreate(name, size,
1189 #ifdef INVARIANTS
1190 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
1191 #else
1192 NULL, NULL, NULL, NULL,
1193 #endif
1194 align, UMA_ZONE_MALLOC);
1195 }
1196 for (;i <= size; i+= KMEM_ZBASE)
1197 kmemsize[i >> KMEM_ZSHIFT] = indx;
1198 }
1199 }
1200 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1201
1202 void
1203 malloc_init(void *data)
1204 {
1205 struct malloc_type_internal *mtip;
1206 struct malloc_type *mtp;
1207
1208 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
1209
1210 mtp = data;
1211 if (mtp->ks_version != M_VERSION)
1212 panic("malloc_init: type %s with unsupported version %lu",
1213 mtp->ks_shortdesc, mtp->ks_version);
1214
1215 mtip = &mtp->ks_mti;
1216 mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO);
1217 mtp_set_subzone(mtp);
1218
1219 mtx_lock(&malloc_mtx);
1220 mtp->ks_next = kmemstatistics;
1221 kmemstatistics = mtp;
1222 kmemcount++;
1223 mtx_unlock(&malloc_mtx);
1224 }
1225
1226 void
1227 malloc_uninit(void *data)
1228 {
1229 struct malloc_type_internal *mtip;
1230 struct malloc_type_stats *mtsp;
1231 struct malloc_type *mtp, *temp;
1232 long temp_allocs, temp_bytes;
1233 int i;
1234
1235 mtp = data;
1236 KASSERT(mtp->ks_version == M_VERSION,
1237 ("malloc_uninit: bad malloc type version"));
1238
1239 mtx_lock(&malloc_mtx);
1240 mtip = &mtp->ks_mti;
1241 if (mtp != kmemstatistics) {
1242 for (temp = kmemstatistics; temp != NULL;
1243 temp = temp->ks_next) {
1244 if (temp->ks_next == mtp) {
1245 temp->ks_next = mtp->ks_next;
1246 break;
1247 }
1248 }
1249 KASSERT(temp,
1250 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
1251 } else
1252 kmemstatistics = mtp->ks_next;
1253 kmemcount--;
1254 mtx_unlock(&malloc_mtx);
1255
1256 /*
1257 * Look for memory leaks.
1258 */
1259 temp_allocs = temp_bytes = 0;
1260 for (i = 0; i <= mp_maxid; i++) {
1261 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1262 temp_allocs += mtsp->mts_numallocs;
1263 temp_allocs -= mtsp->mts_numfrees;
1264 temp_bytes += mtsp->mts_memalloced;
1265 temp_bytes -= mtsp->mts_memfreed;
1266 }
1267 if (temp_allocs > 0 || temp_bytes > 0) {
1268 printf("Warning: memory type %s leaked memory on destroy "
1269 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
1270 temp_allocs, temp_bytes);
1271 }
1272
1273 uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats);
1274 }
1275
1276 struct malloc_type *
1277 malloc_desc2type(const char *desc)
1278 {
1279 struct malloc_type *mtp;
1280
1281 mtx_assert(&malloc_mtx, MA_OWNED);
1282 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1283 if (strcmp(mtp->ks_shortdesc, desc) == 0)
1284 return (mtp);
1285 }
1286 return (NULL);
1287 }
1288
1289 static int
1290 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1291 {
1292 struct malloc_type_stream_header mtsh;
1293 struct malloc_type_internal *mtip;
1294 struct malloc_type_stats *mtsp, zeromts;
1295 struct malloc_type_header mth;
1296 struct malloc_type *mtp;
1297 int error, i;
1298 struct sbuf sbuf;
1299
1300 error = sysctl_wire_old_buffer(req, 0);
1301 if (error != 0)
1302 return (error);
1303 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1304 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1305 mtx_lock(&malloc_mtx);
1306
1307 bzero(&zeromts, sizeof(zeromts));
1308
1309 /*
1310 * Insert stream header.
1311 */
1312 bzero(&mtsh, sizeof(mtsh));
1313 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1314 mtsh.mtsh_maxcpus = MAXCPU;
1315 mtsh.mtsh_count = kmemcount;
1316 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1317
1318 /*
1319 * Insert alternating sequence of type headers and type statistics.
1320 */
1321 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1322 mtip = &mtp->ks_mti;
1323
1324 /*
1325 * Insert type header.
1326 */
1327 bzero(&mth, sizeof(mth));
1328 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
1329 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1330
1331 /*
1332 * Insert type statistics for each CPU.
1333 */
1334 for (i = 0; i <= mp_maxid; i++) {
1335 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1336 (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1337 }
1338 /*
1339 * Fill in the missing CPUs.
1340 */
1341 for (; i < MAXCPU; i++) {
1342 (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
1343 }
1344 }
1345 mtx_unlock(&malloc_mtx);
1346 error = sbuf_finish(&sbuf);
1347 sbuf_delete(&sbuf);
1348 return (error);
1349 }
1350
1351 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats,
1352 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0,
1353 sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1354 "Return malloc types");
1355
1356 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1357 "Count of kernel malloc types");
1358
1359 void
1360 malloc_type_list(malloc_type_list_func_t *func, void *arg)
1361 {
1362 struct malloc_type *mtp, **bufmtp;
1363 int count, i;
1364 size_t buflen;
1365
1366 mtx_lock(&malloc_mtx);
1367 restart:
1368 mtx_assert(&malloc_mtx, MA_OWNED);
1369 count = kmemcount;
1370 mtx_unlock(&malloc_mtx);
1371
1372 buflen = sizeof(struct malloc_type *) * count;
1373 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
1374
1375 mtx_lock(&malloc_mtx);
1376
1377 if (count < kmemcount) {
1378 free(bufmtp, M_TEMP);
1379 goto restart;
1380 }
1381
1382 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1383 bufmtp[i] = mtp;
1384
1385 mtx_unlock(&malloc_mtx);
1386
1387 for (i = 0; i < count; i++)
1388 (func)(bufmtp[i], arg);
1389
1390 free(bufmtp, M_TEMP);
1391 }
1392
1393 #ifdef DDB
1394 static int64_t
1395 get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs,
1396 uint64_t *inuse)
1397 {
1398 const struct malloc_type_stats *mtsp;
1399 uint64_t frees, alloced, freed;
1400 int i;
1401
1402 *allocs = 0;
1403 frees = 0;
1404 alloced = 0;
1405 freed = 0;
1406 for (i = 0; i <= mp_maxid; i++) {
1407 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1408
1409 *allocs += mtsp->mts_numallocs;
1410 frees += mtsp->mts_numfrees;
1411 alloced += mtsp->mts_memalloced;
1412 freed += mtsp->mts_memfreed;
1413 }
1414 *inuse = *allocs - frees;
1415 return (alloced - freed);
1416 }
1417
1418 DB_SHOW_COMMAND(malloc, db_show_malloc)
1419 {
1420 const char *fmt_hdr, *fmt_entry;
1421 struct malloc_type *mtp;
1422 uint64_t allocs, inuse;
1423 int64_t size;
1424 /* variables for sorting */
1425 struct malloc_type *last_mtype, *cur_mtype;
1426 int64_t cur_size, last_size;
1427 int ties;
1428
1429 if (modif[0] == 'i') {
1430 fmt_hdr = "%s,%s,%s,%s\n";
1431 fmt_entry = "\"%s\",%ju,%jdK,%ju\n";
1432 } else {
1433 fmt_hdr = "%18s %12s %12s %12s\n";
1434 fmt_entry = "%18s %12ju %12jdK %12ju\n";
1435 }
1436
1437 db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests");
1438
1439 /* Select sort, largest size first. */
1440 last_mtype = NULL;
1441 last_size = INT64_MAX;
1442 for (;;) {
1443 cur_mtype = NULL;
1444 cur_size = -1;
1445 ties = 0;
1446
1447 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1448 /*
1449 * In the case of size ties, print out mtypes
1450 * in the order they are encountered. That is,
1451 * when we encounter the most recently output
1452 * mtype, we have already printed all preceding
1453 * ties, and we must print all following ties.
1454 */
1455 if (mtp == last_mtype) {
1456 ties = 1;
1457 continue;
1458 }
1459 size = get_malloc_stats(&mtp->ks_mti, &allocs,
1460 &inuse);
1461 if (size > cur_size && size < last_size + ties) {
1462 cur_size = size;
1463 cur_mtype = mtp;
1464 }
1465 }
1466 if (cur_mtype == NULL)
1467 break;
1468
1469 size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
1470 db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
1471 howmany(size, 1024), allocs);
1472
1473 if (db_pager_quit)
1474 break;
1475
1476 last_mtype = cur_mtype;
1477 last_size = cur_size;
1478 }
1479 }
1480
1481 #if MALLOC_DEBUG_MAXZONES > 1
1482 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1483 {
1484 struct malloc_type_internal *mtip;
1485 struct malloc_type *mtp;
1486 u_int subzone;
1487
1488 if (!have_addr) {
1489 db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1490 return;
1491 }
1492 mtp = (void *)addr;
1493 if (mtp->ks_version != M_VERSION) {
1494 db_printf("Version %lx does not match expected %x\n",
1495 mtp->ks_version, M_VERSION);
1496 return;
1497 }
1498
1499 mtip = &mtp->ks_mti;
1500 subzone = mtip->mti_zone;
1501
1502 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1503 mtip = &mtp->ks_mti;
1504 if (mtip->mti_zone != subzone)
1505 continue;
1506 db_printf("%s\n", mtp->ks_shortdesc);
1507 if (db_pager_quit)
1508 break;
1509 }
1510 }
1511 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1512 #endif /* DDB */
Cache object: 57099c1a4cf5936cbe89755b730956e9
|