1 /*-
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005-2009 Robert N. M. Watson
5 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
33 */
34
35 /*
36 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
37 * based on memory types. Back end is implemented using the UMA(9) zone
38 * allocator. A set of fixed-size buckets are used for smaller allocations,
39 * and a special UMA allocation interface is used for larger allocations.
40 * Callers declare memory types, and statistics are maintained independently
41 * for each memory type. Statistics are maintained per-CPU for performance
42 * reasons. See malloc(9) and comments in malloc.h for a detailed
43 * description.
44 */
45
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48
49 #include "opt_ddb.h"
50 #include "opt_vm.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kdb.h>
55 #include <sys/kernel.h>
56 #include <sys/lock.h>
57 #include <sys/malloc.h>
58 #include <sys/mutex.h>
59 #include <sys/vmmeter.h>
60 #include <sys/proc.h>
61 #include <sys/sbuf.h>
62 #include <sys/sysctl.h>
63 #include <sys/time.h>
64 #include <sys/vmem.h>
65
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_pageout.h>
69 #include <vm/vm_param.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_extern.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_page.h>
74 #include <vm/uma.h>
75 #include <vm/uma_int.h>
76 #include <vm/uma_dbg.h>
77
78 #ifdef DEBUG_MEMGUARD
79 #include <vm/memguard.h>
80 #endif
81 #ifdef DEBUG_REDZONE
82 #include <vm/redzone.h>
83 #endif
84
85 #if defined(INVARIANTS) && defined(__i386__)
86 #include <machine/cpu.h>
87 #endif
88
89 #include <ddb/ddb.h>
90
91 #ifdef KDTRACE_HOOKS
92 #include <sys/dtrace_bsd.h>
93
94 dtrace_malloc_probe_func_t dtrace_malloc_probe;
95 #endif
96
97 /*
98 * When realloc() is called, if the new size is sufficiently smaller than
99 * the old size, realloc() will allocate a new, smaller block to avoid
100 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
101 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
102 */
103 #ifndef REALLOC_FRACTION
104 #define REALLOC_FRACTION 1 /* new block if <= half the size */
105 #endif
106
107 /*
108 * Centrally define some common malloc types.
109 */
110 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
111 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
112 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
113
114 static struct malloc_type *kmemstatistics;
115 static int kmemcount;
116
117 #define KMEM_ZSHIFT 4
118 #define KMEM_ZBASE 16
119 #define KMEM_ZMASK (KMEM_ZBASE - 1)
120
121 #define KMEM_ZMAX 65536
122 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
123 static uint8_t kmemsize[KMEM_ZSIZE + 1];
124
125 #ifndef MALLOC_DEBUG_MAXZONES
126 #define MALLOC_DEBUG_MAXZONES 1
127 #endif
128 static int numzones = MALLOC_DEBUG_MAXZONES;
129
130 /*
131 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
132 * of various sizes.
133 *
134 * XXX: The comment here used to read "These won't be powers of two for
135 * long." It's possible that a significant amount of wasted memory could be
136 * recovered by tuning the sizes of these buckets.
137 */
138 struct {
139 int kz_size;
140 char *kz_name;
141 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
142 } kmemzones[] = {
143 {16, "16", },
144 {32, "32", },
145 {64, "64", },
146 {128, "128", },
147 {256, "256", },
148 {512, "512", },
149 {1024, "1024", },
150 {2048, "2048", },
151 {4096, "4096", },
152 {8192, "8192", },
153 {16384, "16384", },
154 {32768, "32768", },
155 {65536, "65536", },
156 {0, NULL},
157 };
158
159 /*
160 * Zone to allocate malloc type descriptions from. For ABI reasons, memory
161 * types are described by a data structure passed by the declaring code, but
162 * the malloc(9) implementation has its own data structure describing the
163 * type and statistics. This permits the malloc(9)-internal data structures
164 * to be modified without breaking binary-compiled kernel modules that
165 * declare malloc types.
166 */
167 static uma_zone_t mt_zone;
168
169 u_long vm_kmem_size;
170 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
171 "Size of kernel memory");
172
173 static u_long kmem_zmax = KMEM_ZMAX;
174 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
175 "Maximum allocation size that malloc(9) would use UMA as backend");
176
177 static u_long vm_kmem_size_min;
178 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
179 "Minimum size of kernel memory");
180
181 static u_long vm_kmem_size_max;
182 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
183 "Maximum size of kernel memory");
184
185 static u_int vm_kmem_size_scale;
186 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
187 "Scale factor for kernel memory size");
188
189 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
190 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
191 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
192 sysctl_kmem_map_size, "LU", "Current kmem allocation size");
193
194 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
195 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
196 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
197 sysctl_kmem_map_free, "LU", "Free space in kmem");
198
199 /*
200 * The malloc_mtx protects the kmemstatistics linked list.
201 */
202 struct mtx malloc_mtx;
203
204 #ifdef MALLOC_PROFILE
205 uint64_t krequests[KMEM_ZSIZE + 1];
206
207 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
208 #endif
209
210 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
211
212 /*
213 * time_uptime of the last malloc(9) failure (induced or real).
214 */
215 static time_t t_malloc_fail;
216
217 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
218 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
219 "Kernel malloc debugging options");
220 #endif
221
222 /*
223 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
224 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
225 */
226 #ifdef MALLOC_MAKE_FAILURES
227 static int malloc_failure_rate;
228 static int malloc_nowait_count;
229 static int malloc_failure_count;
230 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
231 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
232 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
233 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
234 #endif
235
236 static int
237 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
238 {
239 u_long size;
240
241 size = vmem_size(kmem_arena, VMEM_ALLOC);
242 return (sysctl_handle_long(oidp, &size, 0, req));
243 }
244
245 static int
246 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
247 {
248 u_long size;
249
250 size = vmem_size(kmem_arena, VMEM_FREE);
251 return (sysctl_handle_long(oidp, &size, 0, req));
252 }
253
254 /*
255 * malloc(9) uma zone separation -- sub-page buffer overruns in one
256 * malloc type will affect only a subset of other malloc types.
257 */
258 #if MALLOC_DEBUG_MAXZONES > 1
259 static void
260 tunable_set_numzones(void)
261 {
262
263 TUNABLE_INT_FETCH("debug.malloc.numzones",
264 &numzones);
265
266 /* Sanity check the number of malloc uma zones. */
267 if (numzones <= 0)
268 numzones = 1;
269 if (numzones > MALLOC_DEBUG_MAXZONES)
270 numzones = MALLOC_DEBUG_MAXZONES;
271 }
272 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
273 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
274 &numzones, 0, "Number of malloc uma subzones");
275
276 /*
277 * Any number that changes regularly is an okay choice for the
278 * offset. Build numbers are pretty good of you have them.
279 */
280 static u_int zone_offset = __FreeBSD_version;
281 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
282 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
283 &zone_offset, 0, "Separate malloc types by examining the "
284 "Nth character in the malloc type short description.");
285
286 static u_int
287 mtp_get_subzone(const char *desc)
288 {
289 size_t len;
290 u_int val;
291
292 if (desc == NULL || (len = strlen(desc)) == 0)
293 return (0);
294 val = desc[zone_offset % len];
295 return (val % numzones);
296 }
297 #elif MALLOC_DEBUG_MAXZONES == 0
298 #error "MALLOC_DEBUG_MAXZONES must be positive."
299 #else
300 static inline u_int
301 mtp_get_subzone(const char *desc)
302 {
303
304 return (0);
305 }
306 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
307
308 int
309 malloc_last_fail(void)
310 {
311
312 return (time_uptime - t_malloc_fail);
313 }
314
315 /*
316 * An allocation has succeeded -- update malloc type statistics for the
317 * amount of bucket size. Occurs within a critical section so that the
318 * thread isn't preempted and doesn't migrate while updating per-PCU
319 * statistics.
320 */
321 static void
322 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
323 int zindx)
324 {
325 struct malloc_type_internal *mtip;
326 struct malloc_type_stats *mtsp;
327
328 critical_enter();
329 mtip = mtp->ks_handle;
330 mtsp = &mtip->mti_stats[curcpu];
331 if (size > 0) {
332 mtsp->mts_memalloced += size;
333 mtsp->mts_numallocs++;
334 }
335 if (zindx != -1)
336 mtsp->mts_size |= 1 << zindx;
337
338 #ifdef KDTRACE_HOOKS
339 if (dtrace_malloc_probe != NULL) {
340 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
341 if (probe_id != 0)
342 (dtrace_malloc_probe)(probe_id,
343 (uintptr_t) mtp, (uintptr_t) mtip,
344 (uintptr_t) mtsp, size, zindx);
345 }
346 #endif
347
348 critical_exit();
349 }
350
351 void
352 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
353 {
354
355 if (size > 0)
356 malloc_type_zone_allocated(mtp, size, -1);
357 }
358
359 /*
360 * A free operation has occurred -- update malloc type statistics for the
361 * amount of the bucket size. Occurs within a critical section so that the
362 * thread isn't preempted and doesn't migrate while updating per-CPU
363 * statistics.
364 */
365 void
366 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
367 {
368 struct malloc_type_internal *mtip;
369 struct malloc_type_stats *mtsp;
370
371 critical_enter();
372 mtip = mtp->ks_handle;
373 mtsp = &mtip->mti_stats[curcpu];
374 mtsp->mts_memfreed += size;
375 mtsp->mts_numfrees++;
376
377 #ifdef KDTRACE_HOOKS
378 if (dtrace_malloc_probe != NULL) {
379 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
380 if (probe_id != 0)
381 (dtrace_malloc_probe)(probe_id,
382 (uintptr_t) mtp, (uintptr_t) mtip,
383 (uintptr_t) mtsp, size, 0);
384 }
385 #endif
386
387 critical_exit();
388 }
389
390 /*
391 * contigmalloc:
392 *
393 * Allocate a block of physically contiguous memory.
394 *
395 * If M_NOWAIT is set, this routine will not block and return NULL if
396 * the allocation fails.
397 */
398 void *
399 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
400 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
401 vm_paddr_t boundary)
402 {
403 void *ret;
404
405 ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high,
406 alignment, boundary, VM_MEMATTR_DEFAULT);
407 if (ret != NULL)
408 malloc_type_allocated(type, round_page(size));
409 return (ret);
410 }
411
412 /*
413 * contigfree:
414 *
415 * Free a block of memory allocated by contigmalloc.
416 *
417 * This routine may not block.
418 */
419 void
420 contigfree(void *addr, unsigned long size, struct malloc_type *type)
421 {
422
423 kmem_free(kernel_arena, (vm_offset_t)addr, size);
424 malloc_type_freed(type, round_page(size));
425 }
426
427 /*
428 * malloc:
429 *
430 * Allocate a block of memory.
431 *
432 * If M_NOWAIT is set, this routine will not block and return NULL if
433 * the allocation fails.
434 */
435 void *
436 malloc(unsigned long size, struct malloc_type *mtp, int flags)
437 {
438 int indx;
439 struct malloc_type_internal *mtip;
440 caddr_t va;
441 uma_zone_t zone;
442 #if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE)
443 unsigned long osize = size;
444 #endif
445
446 #ifdef INVARIANTS
447 KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
448 /*
449 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
450 */
451 indx = flags & (M_WAITOK | M_NOWAIT);
452 if (indx != M_NOWAIT && indx != M_WAITOK) {
453 static struct timeval lasterr;
454 static int curerr, once;
455 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
456 printf("Bad malloc flags: %x\n", indx);
457 kdb_backtrace();
458 flags |= M_WAITOK;
459 once++;
460 }
461 }
462 #endif
463 #ifdef MALLOC_MAKE_FAILURES
464 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
465 atomic_add_int(&malloc_nowait_count, 1);
466 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
467 atomic_add_int(&malloc_failure_count, 1);
468 t_malloc_fail = time_uptime;
469 return (NULL);
470 }
471 }
472 #endif
473 if (flags & M_WAITOK)
474 KASSERT(curthread->td_intr_nesting_level == 0,
475 ("malloc(M_WAITOK) in interrupt context"));
476 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
477 ("malloc: called with spinlock or critical section held"));
478
479 #ifdef DEBUG_MEMGUARD
480 if (memguard_cmp_mtp(mtp, size)) {
481 va = memguard_alloc(size, flags);
482 if (va != NULL)
483 return (va);
484 /* This is unfortunate but should not be fatal. */
485 }
486 #endif
487
488 #ifdef DEBUG_REDZONE
489 size = redzone_size_ntor(size);
490 #endif
491
492 if (size <= kmem_zmax) {
493 mtip = mtp->ks_handle;
494 if (size & KMEM_ZMASK)
495 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
496 indx = kmemsize[size >> KMEM_ZSHIFT];
497 KASSERT(mtip->mti_zone < numzones,
498 ("mti_zone %u out of range %d",
499 mtip->mti_zone, numzones));
500 zone = kmemzones[indx].kz_zone[mtip->mti_zone];
501 #ifdef MALLOC_PROFILE
502 krequests[size >> KMEM_ZSHIFT]++;
503 #endif
504 va = uma_zalloc(zone, flags);
505 if (va != NULL)
506 size = zone->uz_size;
507 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
508 } else {
509 size = roundup(size, PAGE_SIZE);
510 zone = NULL;
511 va = uma_large_malloc(size, flags);
512 malloc_type_allocated(mtp, va == NULL ? 0 : size);
513 }
514 if (flags & M_WAITOK)
515 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
516 else if (va == NULL)
517 t_malloc_fail = time_uptime;
518 #ifdef DIAGNOSTIC
519 if (va != NULL && !(flags & M_ZERO)) {
520 memset(va, 0x70, osize);
521 }
522 #endif
523 #ifdef DEBUG_REDZONE
524 if (va != NULL)
525 va = redzone_setup(va, osize);
526 #endif
527 return ((void *) va);
528 }
529
530 void *
531 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
532 {
533
534 if (WOULD_OVERFLOW(nmemb, size))
535 panic("mallocarray: %zu * %zu overflowed", nmemb, size);
536
537 return (malloc(size * nmemb, type, flags));
538 }
539
540 /*
541 * free:
542 *
543 * Free a block of memory allocated by malloc.
544 *
545 * This routine may not block.
546 */
547 void
548 free(void *addr, struct malloc_type *mtp)
549 {
550 uma_slab_t slab;
551 u_long size;
552
553 KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
554 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
555 ("free: called with spinlock or critical section held"));
556
557 /* free(NULL, ...) does nothing */
558 if (addr == NULL)
559 return;
560
561 #ifdef DEBUG_MEMGUARD
562 if (is_memguard_addr(addr)) {
563 memguard_free(addr);
564 return;
565 }
566 #endif
567
568 #ifdef DEBUG_REDZONE
569 redzone_check(addr);
570 addr = redzone_addr_ntor(addr);
571 #endif
572
573 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
574
575 if (slab == NULL)
576 panic("free: address %p(%p) has not been allocated.\n",
577 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
578
579 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
580 #ifdef INVARIANTS
581 struct malloc_type **mtpp = addr;
582 #endif
583 size = slab->us_keg->uk_size;
584 #ifdef INVARIANTS
585 /*
586 * Cache a pointer to the malloc_type that most recently freed
587 * this memory here. This way we know who is most likely to
588 * have stepped on it later.
589 *
590 * This code assumes that size is a multiple of 8 bytes for
591 * 64 bit machines
592 */
593 mtpp = (struct malloc_type **)
594 ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
595 mtpp += (size - sizeof(struct malloc_type *)) /
596 sizeof(struct malloc_type *);
597 *mtpp = mtp;
598 #endif
599 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
600 } else {
601 size = slab->us_size;
602 uma_large_free(slab);
603 }
604 malloc_type_freed(mtp, size);
605 }
606
607 /*
608 * realloc: change the size of a memory block
609 */
610 void *
611 realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
612 {
613 uma_slab_t slab;
614 unsigned long alloc;
615 void *newaddr;
616
617 KASSERT(mtp->ks_magic == M_MAGIC,
618 ("realloc: bad malloc type magic"));
619 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
620 ("realloc: called with spinlock or critical section held"));
621
622 /* realloc(NULL, ...) is equivalent to malloc(...) */
623 if (addr == NULL)
624 return (malloc(size, mtp, flags));
625
626 /*
627 * XXX: Should report free of old memory and alloc of new memory to
628 * per-CPU stats.
629 */
630
631 #ifdef DEBUG_MEMGUARD
632 if (is_memguard_addr(addr))
633 return (memguard_realloc(addr, size, mtp, flags));
634 #endif
635
636 #ifdef DEBUG_REDZONE
637 slab = NULL;
638 alloc = redzone_get_size(addr);
639 #else
640 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
641
642 /* Sanity check */
643 KASSERT(slab != NULL,
644 ("realloc: address %p out of range", (void *)addr));
645
646 /* Get the size of the original block */
647 if (!(slab->us_flags & UMA_SLAB_MALLOC))
648 alloc = slab->us_keg->uk_size;
649 else
650 alloc = slab->us_size;
651
652 /* Reuse the original block if appropriate */
653 if (size <= alloc
654 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
655 return (addr);
656 #endif /* !DEBUG_REDZONE */
657
658 /* Allocate a new, bigger (or smaller) block */
659 if ((newaddr = malloc(size, mtp, flags)) == NULL)
660 return (NULL);
661
662 /* Copy over original contents */
663 bcopy(addr, newaddr, min(size, alloc));
664 free(addr, mtp);
665 return (newaddr);
666 }
667
668 /*
669 * reallocf: same as realloc() but free memory on failure.
670 */
671 void *
672 reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
673 {
674 void *mem;
675
676 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
677 free(addr, mtp);
678 return (mem);
679 }
680
681 /*
682 * Wake the uma reclamation pagedaemon thread when we exhaust KVA. It
683 * will call the lowmem handler and uma_reclaim() callbacks in a
684 * context that is safe.
685 */
686 static void
687 kmem_reclaim(vmem_t *vm, int flags)
688 {
689
690 uma_reclaim_wakeup();
691 pagedaemon_wakeup();
692 }
693
694 #ifndef __sparc64__
695 CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
696 #endif
697
698 /*
699 * Initialize the kernel memory (kmem) arena.
700 */
701 void
702 kmeminit(void)
703 {
704 u_long mem_size;
705 u_long tmp;
706
707 #ifdef VM_KMEM_SIZE
708 if (vm_kmem_size == 0)
709 vm_kmem_size = VM_KMEM_SIZE;
710 #endif
711 #ifdef VM_KMEM_SIZE_MIN
712 if (vm_kmem_size_min == 0)
713 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
714 #endif
715 #ifdef VM_KMEM_SIZE_MAX
716 if (vm_kmem_size_max == 0)
717 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
718 #endif
719 /*
720 * Calculate the amount of kernel virtual address (KVA) space that is
721 * preallocated to the kmem arena. In order to support a wide range
722 * of machines, it is a function of the physical memory size,
723 * specifically,
724 *
725 * min(max(physical memory size / VM_KMEM_SIZE_SCALE,
726 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
727 *
728 * Every architecture must define an integral value for
729 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN
730 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
731 * ceiling on this preallocation, are optional. Typically,
732 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
733 * a given architecture.
734 */
735 mem_size = vm_cnt.v_page_count;
736 if (mem_size <= 32768) /* delphij XXX 128MB */
737 kmem_zmax = PAGE_SIZE;
738
739 if (vm_kmem_size_scale < 1)
740 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
741
742 /*
743 * Check if we should use defaults for the "vm_kmem_size"
744 * variable:
745 */
746 if (vm_kmem_size == 0) {
747 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
748
749 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
750 vm_kmem_size = vm_kmem_size_min;
751 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
752 vm_kmem_size = vm_kmem_size_max;
753 }
754
755 /*
756 * The amount of KVA space that is preallocated to the
757 * kmem arena can be set statically at compile-time or manually
758 * through the kernel environment. However, it is still limited to
759 * twice the physical memory size, which has been sufficient to handle
760 * the most severe cases of external fragmentation in the kmem arena.
761 */
762 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
763 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
764
765 vm_kmem_size = round_page(vm_kmem_size);
766 #ifdef DEBUG_MEMGUARD
767 tmp = memguard_fudge(vm_kmem_size, kernel_map);
768 #else
769 tmp = vm_kmem_size;
770 #endif
771 vmem_init(kmem_arena, "kmem arena", kva_alloc(tmp), tmp, PAGE_SIZE,
772 0, 0);
773 vmem_set_reclaim(kmem_arena, kmem_reclaim);
774
775 #ifdef DEBUG_MEMGUARD
776 /*
777 * Initialize MemGuard if support compiled in. MemGuard is a
778 * replacement allocator used for detecting tamper-after-free
779 * scenarios as they occur. It is only used for debugging.
780 */
781 memguard_init(kmem_arena);
782 #endif
783 }
784
785 /*
786 * Initialize the kernel memory allocator
787 */
788 /* ARGSUSED*/
789 static void
790 mallocinit(void *dummy)
791 {
792 int i;
793 uint8_t indx;
794
795 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
796
797 kmeminit();
798
799 uma_startup2();
800
801 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
802 kmem_zmax = KMEM_ZMAX;
803
804 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
805 #ifdef INVARIANTS
806 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
807 #else
808 NULL, NULL, NULL, NULL,
809 #endif
810 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
811 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
812 int size = kmemzones[indx].kz_size;
813 char *name = kmemzones[indx].kz_name;
814 int subzone;
815
816 for (subzone = 0; subzone < numzones; subzone++) {
817 kmemzones[indx].kz_zone[subzone] =
818 uma_zcreate(name, size,
819 #ifdef INVARIANTS
820 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
821 #else
822 NULL, NULL, NULL, NULL,
823 #endif
824 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
825 }
826 for (;i <= size; i+= KMEM_ZBASE)
827 kmemsize[i >> KMEM_ZSHIFT] = indx;
828
829 }
830 }
831 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
832
833 void
834 malloc_init(void *data)
835 {
836 struct malloc_type_internal *mtip;
837 struct malloc_type *mtp;
838
839 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
840
841 mtp = data;
842 if (mtp->ks_magic != M_MAGIC)
843 panic("malloc_init: bad malloc type magic");
844
845 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
846 mtp->ks_handle = mtip;
847 mtip->mti_zone = mtp_get_subzone(mtp->ks_shortdesc);
848
849 mtx_lock(&malloc_mtx);
850 mtp->ks_next = kmemstatistics;
851 kmemstatistics = mtp;
852 kmemcount++;
853 mtx_unlock(&malloc_mtx);
854 }
855
856 void
857 malloc_uninit(void *data)
858 {
859 struct malloc_type_internal *mtip;
860 struct malloc_type_stats *mtsp;
861 struct malloc_type *mtp, *temp;
862 uma_slab_t slab;
863 long temp_allocs, temp_bytes;
864 int i;
865
866 mtp = data;
867 KASSERT(mtp->ks_magic == M_MAGIC,
868 ("malloc_uninit: bad malloc type magic"));
869 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
870
871 mtx_lock(&malloc_mtx);
872 mtip = mtp->ks_handle;
873 mtp->ks_handle = NULL;
874 if (mtp != kmemstatistics) {
875 for (temp = kmemstatistics; temp != NULL;
876 temp = temp->ks_next) {
877 if (temp->ks_next == mtp) {
878 temp->ks_next = mtp->ks_next;
879 break;
880 }
881 }
882 KASSERT(temp,
883 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
884 } else
885 kmemstatistics = mtp->ks_next;
886 kmemcount--;
887 mtx_unlock(&malloc_mtx);
888
889 /*
890 * Look for memory leaks.
891 */
892 temp_allocs = temp_bytes = 0;
893 for (i = 0; i < MAXCPU; i++) {
894 mtsp = &mtip->mti_stats[i];
895 temp_allocs += mtsp->mts_numallocs;
896 temp_allocs -= mtsp->mts_numfrees;
897 temp_bytes += mtsp->mts_memalloced;
898 temp_bytes -= mtsp->mts_memfreed;
899 }
900 if (temp_allocs > 0 || temp_bytes > 0) {
901 printf("Warning: memory type %s leaked memory on destroy "
902 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
903 temp_allocs, temp_bytes);
904 }
905
906 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
907 uma_zfree_arg(mt_zone, mtip, slab);
908 }
909
910 struct malloc_type *
911 malloc_desc2type(const char *desc)
912 {
913 struct malloc_type *mtp;
914
915 mtx_assert(&malloc_mtx, MA_OWNED);
916 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
917 if (strcmp(mtp->ks_shortdesc, desc) == 0)
918 return (mtp);
919 }
920 return (NULL);
921 }
922
923 static int
924 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
925 {
926 struct malloc_type_stream_header mtsh;
927 struct malloc_type_internal *mtip;
928 struct malloc_type_header mth;
929 struct malloc_type *mtp;
930 int error, i;
931 struct sbuf sbuf;
932
933 error = sysctl_wire_old_buffer(req, 0);
934 if (error != 0)
935 return (error);
936 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
937 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
938 mtx_lock(&malloc_mtx);
939
940 /*
941 * Insert stream header.
942 */
943 bzero(&mtsh, sizeof(mtsh));
944 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
945 mtsh.mtsh_maxcpus = MAXCPU;
946 mtsh.mtsh_count = kmemcount;
947 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
948
949 /*
950 * Insert alternating sequence of type headers and type statistics.
951 */
952 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
953 mtip = (struct malloc_type_internal *)mtp->ks_handle;
954
955 /*
956 * Insert type header.
957 */
958 bzero(&mth, sizeof(mth));
959 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
960 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
961
962 /*
963 * Insert type statistics for each CPU.
964 */
965 for (i = 0; i < MAXCPU; i++) {
966 (void)sbuf_bcat(&sbuf, &mtip->mti_stats[i],
967 sizeof(mtip->mti_stats[i]));
968 }
969 }
970 mtx_unlock(&malloc_mtx);
971 error = sbuf_finish(&sbuf);
972 sbuf_delete(&sbuf);
973 return (error);
974 }
975
976 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
977 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
978 "Return malloc types");
979
980 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
981 "Count of kernel malloc types");
982
983 void
984 malloc_type_list(malloc_type_list_func_t *func, void *arg)
985 {
986 struct malloc_type *mtp, **bufmtp;
987 int count, i;
988 size_t buflen;
989
990 mtx_lock(&malloc_mtx);
991 restart:
992 mtx_assert(&malloc_mtx, MA_OWNED);
993 count = kmemcount;
994 mtx_unlock(&malloc_mtx);
995
996 buflen = sizeof(struct malloc_type *) * count;
997 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
998
999 mtx_lock(&malloc_mtx);
1000
1001 if (count < kmemcount) {
1002 free(bufmtp, M_TEMP);
1003 goto restart;
1004 }
1005
1006 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1007 bufmtp[i] = mtp;
1008
1009 mtx_unlock(&malloc_mtx);
1010
1011 for (i = 0; i < count; i++)
1012 (func)(bufmtp[i], arg);
1013
1014 free(bufmtp, M_TEMP);
1015 }
1016
1017 #ifdef DDB
1018 DB_SHOW_COMMAND(malloc, db_show_malloc)
1019 {
1020 struct malloc_type_internal *mtip;
1021 struct malloc_type *mtp;
1022 uint64_t allocs, frees;
1023 uint64_t alloced, freed;
1024 int i;
1025
1026 db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse",
1027 "Requests");
1028 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1029 mtip = (struct malloc_type_internal *)mtp->ks_handle;
1030 allocs = 0;
1031 frees = 0;
1032 alloced = 0;
1033 freed = 0;
1034 for (i = 0; i < MAXCPU; i++) {
1035 allocs += mtip->mti_stats[i].mts_numallocs;
1036 frees += mtip->mti_stats[i].mts_numfrees;
1037 alloced += mtip->mti_stats[i].mts_memalloced;
1038 freed += mtip->mti_stats[i].mts_memfreed;
1039 }
1040 db_printf("%18s %12ju %12juK %12ju\n",
1041 mtp->ks_shortdesc, allocs - frees,
1042 (alloced - freed + 1023) / 1024, allocs);
1043 if (db_pager_quit)
1044 break;
1045 }
1046 }
1047
1048 #if MALLOC_DEBUG_MAXZONES > 1
1049 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1050 {
1051 struct malloc_type_internal *mtip;
1052 struct malloc_type *mtp;
1053 u_int subzone;
1054
1055 if (!have_addr) {
1056 db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1057 return;
1058 }
1059 mtp = (void *)addr;
1060 if (mtp->ks_magic != M_MAGIC) {
1061 db_printf("Magic %lx does not match expected %x\n",
1062 mtp->ks_magic, M_MAGIC);
1063 return;
1064 }
1065
1066 mtip = mtp->ks_handle;
1067 subzone = mtip->mti_zone;
1068
1069 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1070 mtip = mtp->ks_handle;
1071 if (mtip->mti_zone != subzone)
1072 continue;
1073 db_printf("%s\n", mtp->ks_shortdesc);
1074 if (db_pager_quit)
1075 break;
1076 }
1077 }
1078 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1079 #endif /* DDB */
1080
1081 #ifdef MALLOC_PROFILE
1082
1083 static int
1084 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
1085 {
1086 struct sbuf sbuf;
1087 uint64_t count;
1088 uint64_t waste;
1089 uint64_t mem;
1090 int error;
1091 int rsize;
1092 int size;
1093 int i;
1094
1095 waste = 0;
1096 mem = 0;
1097
1098 error = sysctl_wire_old_buffer(req, 0);
1099 if (error != 0)
1100 return (error);
1101 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1102 sbuf_printf(&sbuf,
1103 "\n Size Requests Real Size\n");
1104 for (i = 0; i < KMEM_ZSIZE; i++) {
1105 size = i << KMEM_ZSHIFT;
1106 rsize = kmemzones[kmemsize[i]].kz_size;
1107 count = (long long unsigned)krequests[i];
1108
1109 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
1110 (unsigned long long)count, rsize);
1111
1112 if ((rsize * count) > (size * count))
1113 waste += (rsize * count) - (size * count);
1114 mem += (rsize * count);
1115 }
1116 sbuf_printf(&sbuf,
1117 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
1118 (unsigned long long)mem, (unsigned long long)waste);
1119 error = sbuf_finish(&sbuf);
1120 sbuf_delete(&sbuf);
1121 return (error);
1122 }
1123
1124 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
1125 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
1126 #endif /* MALLOC_PROFILE */
Cache object: 870fde49cba33b4f7ce6aeb5b856c010
|